thumbnailator对图片进行各种处理
2024-01-03 15:57:40
引入jar包
<!-- java图片工具 https://mvnrepository.com/artifact/net.coobird/thumbnailator -->
<dependency>
<groupId>net.coobird</groupId>
<artifactId>thumbnailator</artifactId>
<version>0.4.12</version>
</dependency>
方法
1.读入源图片
可以批量处理,多个文件或者文件夹;
Thumbnails.of(BufferedImage... images) 从BufferedImage读入源;
Thumbnails.of(File... files) 从文件或者文件夹读入源;
Thumbnails.of(InputStream... inputStreams) 从流读入源;
Thumbnails.of(String... files)
Thumbnails.of(URL... urls)
2.设置大小,按比例或者拉伸
.size(int width, int height) 按比例,使原图撑满size大小;
.width(int width) 设置宽,高按比例;
.height(int height) 设置高,宽按比例;
.forceSize(int width, int height) 设置宽高,不按比例,会按照设置的宽高拉伸;
.scale(double scale) 按比例缩放,0~1缩小,1原比例,>1放大;
.scale(double scaleWidth, double scaleHeight) 长宽各自设置比例,会拉伸;
.scalingMode(ScalingMode config) 缩放模式(ScalingMode枚举BICUBIC、BILINEAR、PROGRESSIVE_BILINEAR);
.keepAspectRatio(boolean keep) 设置是否按比例,false不按比例;
注:size、width/height、scale、forceSize不能并用;size相当于width+height;forceSize相当于设置长宽+keepAspectRatio=false,所以forceSize不能跟其他设置长宽属性、keepAspectRatio并用;
3.剪裁
.sourceRegion(int x, int y, int width, int height) 剪裁原图,坐标x,y起始,剪裁出宽度width高度height的图像,x向右为正,y向下为正,width(向右)和height(向下)必须大于0;
.sourceRegion(Position position, int width, int height) 剪裁原图,区域位置position可用Positions枚举的9个位置或者实现Position接口的实现类;
.sourceRegion(Position position, Size size)
.sourceRegion(Rectangle region)
.sourceRegion(Region sourceRegion)
.crop(Position position) 剪裁生成的缩略图,按照size设定剪裁;
4.旋转
.rotate(double angle) 旋转角度,顺时针为正;
5.水印
.watermark(BufferedImage image) 中心位置50%透明度设置水印;
.watermark(BufferedImage image, float opacity) 中心位置,opacity的不透明度设置水印(0.0<=opacity<=1.0);
.watermark(Position position, BufferedImage image, float opacity) 在position位置,opacity不透明度设置水印;
.watermark(Watermark w)
6.质量
.outputQuality(double quality) 质量0.0<=quality<=1.0;
.outputQuality(float quality)
7.输出格式
.outputFormat(String format) 设置输出格式(可用ImageIO.getWriterFormatNames()获得支持的格式),[JPG, jpg, bmp, BMP, gif, GIF, WBMP, png, PNG, wbmp, jpeg, JPEG];
.outputFormatType(String formatType)
.useOriginalFormat() 使用原图格式;
8.输出图片
.asBufferedImage() 返回BufferedImage对象;
.asBufferedImages() 返回多个BufferedImage对象;
.asFiles(Rename rename) 返回文件列表,并按照重命名规则生成文件(Rename抽象类属性:Rename.NO_CHANGE 名称不变,Rename.PREFIX_DOT_THUMBNAIL 名称前缀“thumbnail.”,Rename.PREFIX_HYPHEN_THUMBNAIL 名称前缀“thumbnail-”,Rename.SUFFIX_DOT_THUMBNAIL 名称后缀“.thumbnail”,Rename.SUFFIX_HYPHEN_THUMBNAIL 名称后缀“-thumbnail”);
.asFiles(File destinationDir, Rename rename) 返回文件列表,并按照指定的重命名规则生成到指定目录里(文件夹要存在);
.asFiles(Iterable<File> iterable)
.toFile(File outFile) 无返回,写入文件里(如果没有后缀名会自动添加,下同);
.toFile(String outFilepath)
.toFiles(File destinationDir, Rename rename) 无返回,按照重命名规则生成到文件夹里;
.toFiles(Iterable<File> iterable)
.toFiles(Rename rename)
.toOutputStream(OutputStream os) 无返回,写入outputStream里;
.toOutputStreams(Iterable<? extends OutputStream> iterable)
.allowOverwrite(boolean allowOverwrite) 设置是否覆盖已存在的文件(只对toFile、toFiles、asFiles有效);
实现代码
import net.coobird.thumbnailator.Thumbnails;
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.*;
import java.nio.charset.StandardCharsets;
public class ImageUtil {
// 获取图片线条边界用
// 阈值越大,空白越多
// 值3000000适用于色彩分明的图片,如卡通人物蜡笔小新
// 值1000000适用于色彩过渡平缓的图片,如3D卡通今年我们十七八岁
private static final int threshold = 3000000;
// 生成文本宽度
// 值100适用于商标,头像等小图片
// 值400适用于复杂,内容丰富的大图片
private static final int picWidth = 500;
public static void main(String[] args) throws IOException {
String fromPic = "C:\\Users\\Public\\Pictures\\Sample Pictures\\QQ图片20201022091237.jpg";
String toTxt = "d:\\test5.txt";
BufferedImage bufferedImage = ImageIO.read(new File(fromPic));
// 压缩图片
BufferedImage compactImage = Thumbnails.of(bufferedImage).size(picWidth, 2000).asBufferedImage();
// 灰度化
//BufferedImage grayImage = grayingImage(compactImage);
// 二值化
//BufferedImage binaryImage = binaryImage(grayImage);
// 获取边界
//BufferedImage borderImage = getImageBorder(compactImage);
// 去除噪点
//BufferedImage noNoiseImage = clearNoise(borderImage);
// 锐化
//BufferedImage sharpenImage = sharpenImage2(compactImage);
// 提取边缘,别人的
//BufferedImage sobelProcessIamge = sobelProcess(sharpenImage);
// 均值滤波
//BufferedImage meanValueProcessIamge = meanValueProcess(sharpenImage);
// 底片效果
//BufferedImage negativeImage = negativeProcess(bufferedImage);
// S曲线加深
//BufferedImage curveImage = curveProcess(compactImage);
// USM锐化
//BufferedImage usmSharpenImage = USMSharpen(compactImage);
// 扩散,各向异性
/*BufferedImage anisotropyImage = anisotropyImage(compactImage);
for (int i = 1; i < 10; i++) {
anisotropyImage = anisotropyImage(anisotropyImage);
}*/
// 5.输出到txt文本
//writeToTxt(noNoiseImage, toTxt);
// 组合功能一: 提取色彩分明图像的边界,像卡通人物,logo,二维码等
BufferedImage curveImage = curveProcess(compactImage);
BufferedImage grayImage = grayingImage(curveImage);
BufferedImage binaryImage = binaryImage(grayImage);
BufferedImage borderImage = getImageBorder(binaryImage);
// 组合功能二: 人物图片卡通化. 添加锐化滤镜效果 --> 调整曲线 --> USM锐化 --> 扩散各向异性 --> 滤镜减少杂色
//BufferedImage sharpenImage = sharpenImage2(compactImage);
/*BufferedImage curveImage = curveProcess(compactImage);
BufferedImage usmSharpenImage = USMSharpen(curveImage);
BufferedImage anisotropyImage = anisotropyImage(usmSharpenImage);
for (int i = 1; i < 10; i++) {
anisotropyImage = anisotropyImage(anisotropyImage);
}*/
/*BufferedImage curveImage = curveProcess(compactImage);*/
// 去除杂色
//BufferedImage noPartiColorImage = removePartiColor(compactImage);
// 组合功能三 对除了边界外的像素点进行均值滤波
/*BufferedImage curveImage = curveProcess(compactImage);
BufferedImage grayImage = grayingImage(curveImage);
BufferedImage binaryImage = binaryImage(grayImage);
BufferedImage borderImage = getImageBorder(binaryImage);
BufferedImage usmSharpenImage = USMSharpen(curveImage);
BufferedImage anisotropyImage = anisotropyImage(usmSharpenImage);
for (int i = 1; i < 10; i++) {
anisotropyImage = anisotropyImage(anisotropyImage);
}
BufferedImage noPartiColorImage = removePartiColor(anisotropyImage, borderImage);*/
//BufferedImage curveImage = curveProcess(compactImage);
//BufferedImage usmSharpenImage = USMSharpen(curveImage);
//BufferedImage meanValueProcessIamge = meanValueProcess(curveImage);
/*BufferedImage anisotropyImage = anisotropyImage(curveImage);
for (int i = 1; i < 10; i++) {
anisotropyImage = anisotropyImage(anisotropyImage);
}*/
//BufferedImage noNoiseImage = clearNoise(anisotropyImage);
// 直方图开始
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage histImage = histeq(grayImage);*/
// 直方图结束
// 腐蚀开始
int type = 0;
int base = 30;
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage binaryImage = binaryImage(grayImage);
BufferedImage corrodeImage = corrodeImage(binaryImage, type);*/
// 腐蚀结束
// 膨胀开始
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage binaryImage = binaryImage(grayImage);
BufferedImage expandImage = expandImage(binaryImage, type);*/
// 膨胀结束
// 开运算开始,先腐蚀后膨胀
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage binaryImage = binaryImage(grayImage);
BufferedImage corrodeImage = corrodeImage(binaryImage, type);
BufferedImage expandImage = expandImage(corrodeImage, type);*/
// 开运算结束
// 闭运算开始,先腐蚀后膨胀
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage binaryImage = binaryImage(grayImage);
BufferedImage expandImage = expandImage(binaryImage, type);
BufferedImage corrodeImage = corrodeImage(expandImage, type);*/
// 闭运算结束
// 灰度膨胀开始
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage expandImage = grayExpandImage(grayImage, base);*/
// 灰度膨胀结束
// 灰度腐蚀开始
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage corrodeImage = grayCorrodeImage(grayImage, base);*/
// 灰度腐蚀结束
// 灰度开运算开始,先灰度腐蚀后灰度膨胀,base取30比取120好
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage corrodeImage = grayCorrodeImage(grayImage, base);
BufferedImage expandImage = grayExpandImage(corrodeImage, base);*/
// 灰度开运算结束
// 灰度闭运算开始,先灰度腐蚀后灰度膨胀,base取30比取120好
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage expandImage = grayExpandImage(grayImage, base);
BufferedImage corrodeImage = grayCorrodeImage(expandImage, base);*/
// 灰度闭运算结束
// 顶帽变换开始
/*BufferedImage grayImage = grayingImage(compactImage);
BufferedImage topHatImage = topHatImage(grayImage, base);*/
// 顶帽变换结束
// 组合功能四,人物磨皮
BufferedImage contrastImage = contrastReserve(compactImage, 1);
BufferedImage linearImage = linearLighten(compactImage, contrastImage);
BufferedImage surfaceBlurImage = surfaceBlur(linearImage, 5);
BufferedImage surfaceBlurImage2 = surfaceBlur(surfaceBlurImage, 3);
BufferedImage surfaceBlurImage3 = surfaceBlur(surfaceBlurImage2, 1);
// 保存图片
File newFile = new File("d:\\test8.jpg");
ImageIO.write(borderImage, "jpg", newFile);
}
/**
* 灰度化图片
*
* @param bufferedImage 原图片
* @return 灰度化之后的图片
*/
private static BufferedImage grayingImage(BufferedImage bufferedImage) {
BufferedImage grayImage = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(),
BufferedImage.TYPE_BYTE_GRAY);
// 以图片左上角点为坐标原点
for (int i = 0; i < bufferedImage.getWidth(); i++) {
for (int j = 0; j < bufferedImage.getHeight(); j++) {
int color = bufferedImage.getRGB(i, j);
grayImage.setRGB(i, j, color);
}
}
return grayImage;
}
/**
* 二值化图片
*
* @param bufferedImage 原图片
* @return 二值化后的图片
*/
private static BufferedImage binaryImage(BufferedImage bufferedImage) {
BufferedImage grayImage = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int threshold = getMeanThreshold(bufferedImage);
for (int i = 0; i < bufferedImage.getWidth(); i++) {
for (int j = 0; j < bufferedImage.getHeight(); j++) {
// getRGB()方法,根据手册,其返回的int型数据(32位)为ARGB格式,其中ARGB各占8bit
int color = bufferedImage.getRGB(i, j);
int r = (color >> 16) & 0xff;
int g = (color >> 8) & 0xff;
int b = color & 0xff;
int gray = (int) (0.3 * r + 0.59 * g + 0.11 * b);
if (gray > threshold) {
// 白色
grayImage.setRGB(i, j, 0xFFFFFF);
} else {
// 黑色
grayImage.setRGB(i, j, 0);
}
}
}
return grayImage;
}
/**
* 获取图片的阀值,采用基于灰度平均值的阈值
*
* @param bufferedImage 原图片
* @return 二值化的阈值
*/
private static int getMeanThreshold(BufferedImage bufferedImage) {
int w = bufferedImage.getWidth();
int h = bufferedImage.getHeight();
int num = 0;
int sum = 0;
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
int color = bufferedImage.getRGB(i, j);
int r = (color >> 16) & 0xff;
int g = (color >> 8) & 0xff;
int b = color & 0xff;
int gray = (int) (0.3 * r + 0.59 * g + 0.11 * b);
sum += gray;
num += 1;
}
}
// 测试表明,阀值取平均值的1.2倍效果最好。
int threshold = sum / num;
if (threshold * 1.2 < 255) {
threshold = (int) (1.2 * sum / num);
}
System.out.println("width: " + w + " height: " + h + " threshold: " + threshold);
return threshold;
}
/**
* 输出 0,1 TXT文本
*/
public static void writeToTxt(BufferedImage bufferedImage, String toSaveFilePath) {
File file = new File(toSaveFilePath);
try {
Writer writer = new OutputStreamWriter(new FileOutputStream(file, true), StandardCharsets.UTF_8);
StringBuilder builder = new StringBuilder();
for (int j = 0; j < bufferedImage.getHeight(); j++) {
for (int i = 0; i < bufferedImage.getWidth(); i++) {
int color = bufferedImage.getRGB(i, j);
if (color == -1) {
builder.append(" ");
} else {
builder.append("0 ");
}
}
builder.append("\r\n");
}
writer.write(builder.toString());
writer.close();
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 提取图片的边界
* 对二维码有奇效
*
* @param bufferedImage 原图片
* @return 二值化后的图片
*/
private static BufferedImage getImageBorder(BufferedImage bufferedImage) {
BufferedImage borderImage = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
//List<String> toDealPoints = new ArrayList<>();
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
// 当前点
int color = bufferedImage.getRGB(i, j);
// 上点
int upColor = bufferedImage.getRGB(i, j - 1);
// 下点
int downColor = bufferedImage.getRGB(i, j + 1);
// 左点
int leftColor = bufferedImage.getRGB(i - 1, j);
// 右点
int rightColor = bufferedImage.getRGB(i + 1, j);
// 如果某个黑点的上下左右点都为黑点,就表示它不是边界,把它设为白点
if (isQualified(color, upColor, downColor, leftColor, rightColor)) {
// 白色
borderImage.setRGB(i, j, 0xFFFFFF);
} else {
// 原色不变
borderImage.setRGB(i, j, color);
}
}
}
return borderImage;
}
/**
* 根据设置的阈值,判断当前点是否是边界点
* 判断规则如下:
* 如果当前点是白色的点,直接跳过
* 如果当前点不是白色,且它的上下左右4个点和它的差别都在阈值内,
* 那么就认为它不是边界点,返回true,否则返回false;
*
* @param color 当前点
* @param upColor 上点
* @param downColor 下点
* @param leftColor 左点
* @param rightColor 右点
* @return 是否设置为白色
*/
public static boolean isQualified(int color, int upColor, int downColor, int leftColor, int rightColor) {
// color == -1 表示白色,白色的不需要再设置为白色
return color != -1 && (Math.abs(color - upColor) < threshold
&& Math.abs(color - downColor) < threshold
&& Math.abs(color - leftColor) < threshold
&& Math.abs(color - rightColor) < threshold);
}
/**
* 判断一个像素点是不是噪点
* 只要它上下左右四个相邻的像素点中有3个及以上以上是白色像素,那么就认为它是噪点
* 缺陷: 对于一条斜线来说,会被当成噪点清除掉.可以添加条件,如果当前像素点的四个斜对角像素有3个或以上白色,就认为是噪点.
*
* @param color 当前像素
* @param upColor 上方像素
* @param downColor 下方像素
* @param leftColor 左边像素
* @param rightColor 右边像素
* @return 是否是噪点
*/
public static boolean isNoise(int color, int upColor, int downColor, int leftColor, int rightColor) {
// color == -1 表示白色,白色的不需要再设置为白色
if (color != -1) {
int whiteCount = 0;
if (upColor == -1) {
whiteCount++;
}
if (downColor == -1) {
whiteCount++;
}
if (leftColor == -1) {
whiteCount++;
}
if (rightColor == -1) {
whiteCount++;
}
return whiteCount >= 3;
}
return false;
}
/**
* 清楚噪点
* 如果一个黑点,它的上下左右四个点中至少有三个点是白色的,
* 那么就认为它是噪点,将它变成白色
*
* @param bufferedImage 原图片
* @return 处理后的图片
*/
private static BufferedImage clearNoise(BufferedImage bufferedImage) {
BufferedImage noNoiseImage = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
// 当前点
int color = bufferedImage.getRGB(i, j);
// 上点
int upColor = bufferedImage.getRGB(i, j - 1);
// 下点
int downColor = bufferedImage.getRGB(i, j + 1);
// 左点
int leftColor = bufferedImage.getRGB(i - 1, j);
// 右点
int rightColor = bufferedImage.getRGB(i + 1, j);
// 如果某个黑点的上下左右点都为黑点,就表示它不是边界,把它设为白点
if (isNoise(color, upColor, downColor, leftColor, rightColor)) {
// 白色
noNoiseImage.setRGB(i, j, 0xFFFFFF);
} else {
// 原色不变
noNoiseImage.setRGB(i, j, color);
}
}
}
return noNoiseImage;
}
private static final int[][] sharpen = new int[][]{{-1, -1, -1}, {-1, 9, -1}, {-1, -1, -1}};
/**
* 锐化图片
* 将原图像和拉普拉斯图像叠加到一起,便可以得到锐化图像。
* 锐化卷积核模板
* 0 -1 0
* -1 5 -1
* 0 -1 0
*
* @param bufferedImage 原图片
* @return 目标图片
*/
private static BufferedImage sharpenImage(BufferedImage bufferedImage) {
BufferedImage image = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
int[][] pixels = new int[3][3];
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
pixels[0][0] = bufferedImage.getRGB(i - 1, j - 1);
pixels[0][1] = bufferedImage.getRGB(i, j - 1);
pixels[0][2] = bufferedImage.getRGB(i + 1, j - 1);
pixels[1][0] = bufferedImage.getRGB(i - 1, j);
pixels[1][1] = bufferedImage.getRGB(i, j);
pixels[1][2] = bufferedImage.getRGB(i + 1, j);
pixels[2][0] = bufferedImage.getRGB(i - 1, j + 1);
pixels[2][1] = bufferedImage.getRGB(i, j + 1);
pixels[2][2] = bufferedImage.getRGB(i + 1, j + 1);
int newPixels = pixels[0][0] * sharpen[0][0] + pixels[0][1] * sharpen[0][1] + pixels[0][2] * sharpen[0][2] +
pixels[1][0] * sharpen[1][0] + pixels[1][1] * sharpen[1][1] + pixels[1][2] * sharpen[1][2] +
pixels[2][0] * sharpen[2][0] + pixels[2][1] * sharpen[2][1] + pixels[2][2] * sharpen[2][2];
image.setRGB(i, j, newPixels);
}
}
return image;
}
/**
* https://www.cnblogs.com/wangyong/p/8367623.html
*
* @param src
* @return
*/
private static BufferedImage sharpenImage2(BufferedImage src) {
// 拉普拉斯算子
int[] LAPLACE = new int[]{0, -1, 0, -1, 4, -1, 0, -1, 0};
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
int type = src.getType();
if (type == BufferedImage.TYPE_INT_ARGB
|| type == BufferedImage.TYPE_INT_RGB) {
src.getRaster().getDataElements(0, 0, width, height, pixels);
}
src.getRGB(0, 0, width, height, pixels, 0, width);
int k0 = 0, k1 = 0, k2 = 0;
int k3 = 0, k4 = 0, k5 = 0;
int k6 = 0, k7 = 0, k8 = 0;
k0 = LAPLACE[0];
k1 = LAPLACE[1];
k2 = LAPLACE[2];
k3 = LAPLACE[3];
k4 = LAPLACE[4];
k5 = LAPLACE[5];
k6 = LAPLACE[6];
k7 = LAPLACE[7];
k8 = LAPLACE[8];
int offset = 0;
int sr = 0, sg = 0, sb = 0;
int r = 0, g = 0, b = 0;
for (int row = 1; row < height - 1; row++) {
offset = row * width;
for (int col = 1; col < width - 1; col++) {
r = (pixels[offset + col] >> 16) & 0xff;
g = (pixels[offset + col] >> 8) & 0xff;
b = (pixels[offset + col]) & 0xff;
// red
sr = k0 * ((pixels[offset - width + col - 1] >> 16) & 0xff)
+ k1 * ((pixels[offset - width + col] >> 16) & 0xff)
+ k2
* ((pixels[offset - width + col + 1] >> 16) & 0xff)
+ k3 * ((pixels[offset + col - 1] >> 16) & 0xff) + k4
* ((pixels[offset + col] >> 16) & 0xff) + k5
* ((pixels[offset + col + 1] >> 16) & 0xff) + k6
* ((pixels[offset + width + col - 1] >> 16) & 0xff)
+ k7 * ((pixels[offset + width + col] >> 16) & 0xff)
+ k8
* ((pixels[offset + width + col + 1] >> 16) & 0xff);
// green
sg = k0 * ((pixels[offset - width + col - 1] >> 8) & 0xff) + k1
* ((pixels[offset - width + col] >> 8) & 0xff) + k2
* ((pixels[offset - width + col + 1] >> 8) & 0xff) + k3
* ((pixels[offset + col - 1] >> 8) & 0xff) + k4
* ((pixels[offset + col] >> 8) & 0xff) + k5
* ((pixels[offset + col + 1] >> 8) & 0xff) + k6
* ((pixels[offset + width + col - 1] >> 8) & 0xff) + k7
* ((pixels[offset + width + col] >> 8) & 0xff) + k8
* ((pixels[offset + width + col + 1] >> 8) & 0xff);
// blue
sb = k0 * (pixels[offset - width + col - 1] & 0xff) + k1
* (pixels[offset - width + col] & 0xff) + k2
* (pixels[offset - width + col + 1] & 0xff) + k3
* (pixels[offset + col - 1] & 0xff) + k4
* (pixels[offset + col] & 0xff) + k5
* (pixels[offset + col + 1] & 0xff) + k6
* (pixels[offset + width + col - 1] & 0xff) + k7
* (pixels[offset + width + col] & 0xff) + k8
* (pixels[offset + width + col + 1] & 0xff);
// 运算后的像素值和原图像素叠加
r += sr;
g += sg;
b += sb;
outPixels[offset + col] = (0xff << 24) | (clamp(r) << 16) | (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height, src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* Sobel提取边缘
*
* @param src
* @return
*/
public static BufferedImage sobelProcess(BufferedImage src) {
// Sobel算子
int[] sobel_y = new int[]{-1, -2, -1, 0, 0, 0, 1, 2, 1};
int[] sobel_x = new int[]{-1, 0, 1, -2, 0, 2, -1, 0, 1};
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
src.getRGB(0, 0, width, height, pixels, 0, width);
int offset = 0;
int x0 = sobel_x[0];
int x1 = sobel_x[1];
int x2 = sobel_x[2];
int x3 = sobel_x[3];
int x4 = sobel_x[4];
int x5 = sobel_x[5];
int x6 = sobel_x[6];
int x7 = sobel_x[7];
int x8 = sobel_x[8];
int k0 = sobel_y[0];
int k1 = sobel_y[1];
int k2 = sobel_y[2];
int k3 = sobel_y[3];
int k4 = sobel_y[4];
int k5 = sobel_y[5];
int k6 = sobel_y[6];
int k7 = sobel_y[7];
int k8 = sobel_y[8];
int yr = 0, yg = 0, yb = 0;
int xr = 0, xg = 0, xb = 0;
int r = 0, g = 0, b = 0;
for (int row = 1; row < height - 1; row++) {
offset = row * width;
for (int col = 1; col < width - 1; col++) {
// red
yr = k0 * ((pixels[offset - width + col - 1] >> 16) & 0xff)
+ k1 * ((pixels[offset - width + col] >> 16) & 0xff)
+ k2
* ((pixels[offset - width + col + 1] >> 16) & 0xff)
+ k3 * ((pixels[offset + col - 1] >> 16) & 0xff) + k4
* ((pixels[offset + col] >> 16) & 0xff) + k5
* ((pixels[offset + col + 1] >> 16) & 0xff) + k6
* ((pixels[offset + width + col - 1] >> 16) & 0xff)
+ k7 * ((pixels[offset + width + col] >> 16) & 0xff)
+ k8
* ((pixels[offset + width + col + 1] >> 16) & 0xff);
xr = x0 * ((pixels[offset - width + col - 1] >> 16) & 0xff)
+ x1 * ((pixels[offset - width + col] >> 16) & 0xff)
+ x2
* ((pixels[offset - width + col + 1] >> 16) & 0xff)
+ x3 * ((pixels[offset + col - 1] >> 16) & 0xff) + x4
* ((pixels[offset + col] >> 16) & 0xff) + x5
* ((pixels[offset + col + 1] >> 16) & 0xff) + x6
* ((pixels[offset + width + col - 1] >> 16) & 0xff)
+ x7 * ((pixels[offset + width + col] >> 16) & 0xff)
+ x8
* ((pixels[offset + width + col + 1] >> 16) & 0xff);
// green
yg = k0 * ((pixels[offset - width + col - 1] >> 8) & 0xff) + k1
* ((pixels[offset - width + col] >> 8) & 0xff) + k2
* ((pixels[offset - width + col + 1] >> 8) & 0xff) + k3
* ((pixels[offset + col - 1] >> 8) & 0xff) + k4
* ((pixels[offset + col] >> 8) & 0xff) + k5
* ((pixels[offset + col + 1] >> 8) & 0xff) + k6
* ((pixels[offset + width + col - 1] >> 8) & 0xff) + k7
* ((pixels[offset + width + col] >> 8) & 0xff) + k8
* ((pixels[offset + width + col + 1] >> 8) & 0xff);
xg = x0 * ((pixels[offset - width + col - 1] >> 8) & 0xff) + x1
* ((pixels[offset - width + col] >> 8) & 0xff) + x2
* ((pixels[offset - width + col + 1] >> 8) & 0xff) + x3
* ((pixels[offset + col - 1] >> 8) & 0xff) + x4
* ((pixels[offset + col] >> 8) & 0xff) + x5
* ((pixels[offset + col + 1] >> 8) & 0xff) + x6
* ((pixels[offset + width + col - 1] >> 8) & 0xff) + x7
* ((pixels[offset + width + col] >> 8) & 0xff) + x8
* ((pixels[offset + width + col + 1] >> 8) & 0xff);
// blue
yb = k0 * (pixels[offset - width + col - 1] & 0xff) + k1
* (pixels[offset - width + col] & 0xff) + k2
* (pixels[offset - width + col + 1] & 0xff) + k3
* (pixels[offset + col - 1] & 0xff) + k4
* (pixels[offset + col] & 0xff) + k5
* (pixels[offset + col + 1] & 0xff) + k6
* (pixels[offset + width + col - 1] & 0xff) + k7
* (pixels[offset + width + col] & 0xff) + k8
* (pixels[offset + width + col + 1] & 0xff);
xb = x0 * (pixels[offset - width + col - 1] & 0xff) + x1
* (pixels[offset - width + col] & 0xff) + x2
* (pixels[offset - width + col + 1] & 0xff) + x3
* (pixels[offset + col - 1] & 0xff) + x4
* (pixels[offset + col] & 0xff) + x5
* (pixels[offset + col + 1] & 0xff) + x6
* (pixels[offset + width + col - 1] & 0xff) + x7
* (pixels[offset + width + col] & 0xff) + x8
* (pixels[offset + width + col + 1] & 0xff);
// 索贝尔梯度
r = (int) Math.sqrt(yr * yr + xr * xr);
g = (int) Math.sqrt(yg * yg + xg * xg);
b = (int) Math.sqrt(yb * yb + xb * xb);
outPixels[offset + col] = (0xff << 24) | (clamp(r) << 16)
| (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height,
src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* 模糊
* 计算以当前像素为中心3*3矩阵中所有像素点的和,除以9,就是模糊后的当前像素.
* 对所有的像素点做同样的操作,最终得到的就是模糊图像.
* 均值滤波
*
* @param src
* @return
*/
private static BufferedImage meanValueProcess(BufferedImage src) {
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
src.getRGB(0, 0, width, height, pixels, 0, width);
// 均值滤波使用的卷积模板半径,这里使用5*5均值,所以半径使用2
int radius = 1;
int total = (2 * radius + 1) * (2 * radius + 1);
int r = 0, g = 0, b = 0;
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int rSum = 0;
int gSum = 0;
int bSum = 0;
for (int i = -radius; i <= radius; i++) {
int roffset = row + i;
roffset = (roffset < 0) ? 0 : (roffset >= height ? height - 1 : roffset);
for (int j = -radius; j <= radius; j++) {
int coffset = col + j;
coffset = (coffset < 0) ? 0 : (coffset >= width ? width - 1 : coffset);
int pixel = pixels[roffset * width + coffset];
r = (pixel >> 16) & 0XFF;
g = (pixel >> 8) & 0xff;
b = pixel & 0xff;
rSum += r;
gSum += g;
bSum += b;
}
}
r = rSum / total;
g = gSum / total;
b = bSum / total;
outPixels[row * width + col] = (255 << 24) | (clamp(r) << 16) | (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height, src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* 底片效果
* 获取当前像素的值,用255减去这个值,就是底片的效果.
*
* @param src 源图片
* @return 处理后的图片
*/
public static BufferedImage negativeProcess(BufferedImage src) {
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
src.getRGB(0, 0, width, height, pixels, 0, width);
int offset = 0;
for (int row = 1; row < height - 1; row++) {
offset = row * width;
for (int col = 1; col < width - 1; col++) {
int r = (pixels[offset + col] >> 16) & 0xff;
int g = (pixels[offset + col] >> 8) & 0xff;
int b = (pixels[offset + col]) & 0xff;
// red
r = 255 - r;
g = 255 - g;
b = 255 - b;
outPixels[offset + col] = (0xff << 24) | (clamp(r) << 16)
| (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height, src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* 逻辑斯谛曲线
* S曲线加深对比度
* 效果: 浅色更浅,深色更深
* 缺点: 该曲线函数无法进行轻度的加深,需要另外找替代函数
*
* @param src 源图像
* @return 处理后的图像
*/
public static BufferedImage curveProcess(BufferedImage src) {
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
src.getRGB(0, 0, width, height, pixels, 0, width);
int offset = 0;
for (int row = 1; row < height - 1; row++) {
offset = row * width;
for (int col = 1; col < width - 1; col++) {
int r = (pixels[offset + col] >> 16) & 0xff;
int g = (pixels[offset + col] >> 8) & 0xff;
int b = (pixels[offset + col]) & 0xff;
// red
r = getSCurve(r);
g = getSCurve(g);
b = getSCurve(b);
outPixels[offset + col] = (0xff << 24) | (clamp(r) << 16) | (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height, src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* USM锐化
* 原理: 先对图片进行均值滤波,再将原图片与模糊图片的差值加到原图片上,就得到了锐化后的图片,具体说明可以看下方网址.
* 参数说明
* 数量:代表你要做锐化的强度。
* 半径:粒度,我们所希望强调画面细节的尺度。
* 阈值:是在Photoshop中指定,当反差小于某一个数字的时候,就不进行锐化处理。
* USM功能详解: https://baijiahao.baidu.com/s?id=1651538317157891526&wfr=spider&for=pc
*
* @param src 源图像
* @return 处理后的图像
*/
private static BufferedImage USMSharpen(BufferedImage src) {
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
src.getRGB(0, 0, width, height, pixels, 0, width);
// 均值滤波使用的卷积模板半径,这里使用5*5均值,所以半径使用2
int radius = 2;
int total = (2 * radius + 1) * (2 * radius + 1);
int r = 0, g = 0, b = 0;
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int rSum = 0;
int gSum = 0;
int bSum = 0;
for (int i = -radius; i <= radius; i++) {
int roffset = row + i;
roffset = (roffset < 0) ? 0 : (roffset >= height ? height - 1 : roffset);
for (int j = -radius; j <= radius; j++) {
int coffset = col + j;
coffset = (coffset < 0) ? 0 : (coffset >= width ? width - 1 : coffset);
int pixel = pixels[roffset * width + coffset];
r = (pixel >> 16) & 0XFF;
g = (pixel >> 8) & 0xff;
b = pixel & 0xff;
rSum += r;
gSum += g;
bSum += b;
}
}
r = 2 * r - rSum / total;
g = 2 * g - gSum / total;
b = 2 * b - bSum / total;
outPixels[row * width + col] = (255 << 24) | (clamp(r) << 16) | (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height, src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* 扩散滤镜,各向异性
* 采用流体理论,像素只在边界围成的区域内流动,不会跨过边界.
* 这样就达到了既模糊了图片,有保留了边界的效果.
* 适合于处理人脸上的麻子,适用于JGP图片,PNG图片会有问题
* 参考网站: https://blog.csdn.net/qq_38784098/article/details/81605963
* https://blog.csdn.net/jia20003/article/details/78415384
*
* @param srcImage 原JPG图片
* @return 处理后的图片
*/
public static BufferedImage anisotropyImage(BufferedImage srcImage) {
BufferedImage anisotropyImage = new BufferedImage(srcImage.getWidth(), srcImage.getHeight(), srcImage.getType());
//整个公式需要先前设置的参数主要有三个,迭代次数t,根据情况设置,一般20次;
// 导热系数相关的k,取值越大越平滑,越不易保留边缘;
// lambda同样也是取值越大越平滑。
// 原理:取当前像素周围8个像素,计算出它们红色分量的差值,然后调用公式计算最终的差值。
// k值越大,越不易保留边缘
double k = 10;
// lambda值越大越平滑
double lambda = 0.25;
int width = srcImage.getWidth();
int height = srcImage.getHeight();
// 四邻域梯度
double rn = 0, rs = 0, re = 0, rw = 0;
// 四个斜对角
double rn2 = 0, rs2 = 0, re2 = 0, rw2 = 0;
// 四邻域系数
double rnc = 0, rsc = 0, rec = 0, rwc = 0;
// 四个斜对角
double rnc2 = 0, rsc2 = 0, rec2 = 0, rwc2 = 0;
double k2 = k * k;
for (int row = 1; row < width - 1; row++) {
for (int col = 1; col < height - 1; col++) {
// 当前点
int color = srcImage.getRGB(row, col);
// 上点
int upColor = srcImage.getRGB(row - 1, col);
// 下点
int downColor = srcImage.getRGB(row + 1, col);
// 左点
int leftColor = srcImage.getRGB(row, col - 1);
// 右点
int rightColor = srcImage.getRGB(row, col + 1);
// 左上点
int upColor2 = srcImage.getRGB(row - 1, col - 1);
// 右上点
int downColor2 = srcImage.getRGB(row - 1, col + 1);
// 左下点
int leftColor2 = srcImage.getRGB(row + 1, col - 1);
// 右下点
int rightColor2 = srcImage.getRGB(row + 1, col + 1);
int r = (color >> 16) & 0xff;
int g = (color >> 8) & 0xff;
int b = color & 0xff;
// 红色
rn = ((upColor >> 16) & 0xff) - r;
rs = ((downColor >> 16) & 0xff) - r;
re = ((leftColor >> 16) & 0xff) - r;
rw = ((rightColor >> 16) & 0xff) - r;
rnc = Math.exp(-rn * rn / k2);
rsc = Math.exp(-rs * rs / k2);
rec = Math.exp(-re * re / k2);
rwc = Math.exp(-rw * rw / k2);
rn2 = ((upColor2 >> 16) & 0xff) - r;
rs2 = ((downColor2 >> 16) & 0xff) - r;
re2 = ((leftColor2 >> 16) & 0xff) - r;
rw2 = ((rightColor2 >> 16) & 0xff) - r;
rnc2 = Math.exp(-rn2 * rn2 / k2);
rsc2 = Math.exp(-rs2 * rs2 / k2);
rec2 = Math.exp(-re2 * re2 / k2);
rwc2 = Math.exp(-rw2 * rw2 / k2);
// 最终结果
int fr = r + (int) (lambda * (rn * rnc + rs * rsc + re * rec + rw * rwc + rn2 * rnc2 + rs2 * rsc2 + re2 * rec2 + rw2 * rwc2));
// 绿色和蓝色分量的变化和红色分量几乎一致,就不重复计算了
int fg = g + fr - r;
int fb = b + fr - r;
int rgb = (clamp(fr) << 16) | (clamp(fg) << 8) | clamp(fb);
anisotropyImage.setRGB(row, col, rgb);
}
}
return anisotropyImage;
}
/**
* 如果像素点的值超过了0-255的范围,予以调整
*
* @param value 输入值
* @return 输出值
*/
private static int clamp(int value) {
return value > 255 ? 255 : (Math.max(value, 0));
}
/**
* S型曲线函数, 提高对比度, 反s形正好相反。
* 函数: y = 85*3/(1+100^(1.5-x/85));
* 作用: x轴为输入值,y轴为输出值
* 范围: 0 <= x <= 255, 0 <= y <= 255
*
* @param x 入参 0-255
* @return 出参 0-255
* 调参网站: https://zh.numberempire.com/graphingcalculator.php?functions=85*3%2F(1%2B100%5E(1.5-x%2F85))&xmin=-126.649422&xmax=381.57014&ymin=-40.111112&ymax=298.702136&var=x
* 曲线参考: https://blog.csdn.net/c80486/article/details/52499919
* 函数参考: https://blog.csdn.net/chengke1866/article/details/100695604
* 在线画函数: https://zh.numberempire.com/graphingcalculator.php?functions=85*3%2F(1%2B100%5E(1.5-x%2F85))&xmin=-126.649422&xmax=381.57014&ymin=-40.111112&ymax=298.702136&var=x
* https://zh.numberempire.com/graphingcalculator.php?functions=6%2F(1%2B3%5E(3.5-x))&xmin=-1.428779&xmax=13.935977&ymin=-1.186938&ymax=9.509463&var=x
*
* 轻度加深函数: 255/(1+e^(5-x*2/51))
* 由f(x)=1/(1+e^-x)推导而来
*/
private static int getSCurve(int x) {
//double j = 255 / (1 + Math.pow(100, 1.5 - x / 85.0));
double j = 255 / (1 + Math.exp(5 - x * 2/ 51.0));
return (int) Math.round(j);
}
/**
* 对3*3像素矩阵进行处理,计算中心像素的值
* @param pixelMatrix 输入像素矩阵
* @return 输出中心像素
*/
public static int dealWithPixel(int[] pixelMatrix) {
double lambda = 1;
int k = 5 * 5;
// 红色
int rn = ((pixelMatrix[1] >> 16) & 0xff) - ((pixelMatrix[4] >> 16) & 0xff);
int rs = ((pixelMatrix[7] >> 16) & 0xff) - ((pixelMatrix[4] >> 16) & 0xff);
int re = ((pixelMatrix[3] >> 16) & 0xff) - ((pixelMatrix[4] >> 16) & 0xff);
int rw = ((pixelMatrix[5] >> 16) & 0xff) - ((pixelMatrix[4] >> 16) & 0xff);
// 绿色
int gn = ((pixelMatrix[1] >> 8) & 0xff) - ((pixelMatrix[4] >> 8) & 0xff);
int gs = ((pixelMatrix[7] >> 8) & 0xff) - ((pixelMatrix[4] >> 8) & 0xff);
int ge = ((pixelMatrix[3] >> 8) & 0xff) - ((pixelMatrix[4] >> 8) & 0xff);
int gw = ((pixelMatrix[5] >> 8) & 0xff) - ((pixelMatrix[4] >> 8) & 0xff);
// 蓝色
int bn = (pixelMatrix[1] & 0xff) - (pixelMatrix[4] & 0xff);
int bs = (pixelMatrix[7] & 0xff) - (pixelMatrix[4] & 0xff);
int be = (pixelMatrix[3] & 0xff) - (pixelMatrix[4] & 0xff);
int bw = (pixelMatrix[5] & 0xff) - (pixelMatrix[4] & 0xff);
int rDifference = calculateAnisotropy(rn, rs, re, rw, lambda, k);
int gDifference = calculateAnisotropy(gn, gs, ge, gw, lambda, k);
int bDifference = calculateAnisotropy(bn, bs, be, bw, lambda, k);
int fr = (pixelMatrix[4] >> 16) & 0xff;
int fg = (pixelMatrix[4] >> 8) & 0xff;
int fb = pixelMatrix[4] & 0xff;
/*if (rDifference != 0 && gDifference != 0 && bDifference != 0) {
fr = ((pixelMatrix[4] >> 16) & 0xff) + rDifference;
fg = ((pixelMatrix[4] >> 8) & 0xff) + gDifference;
fb = (pixelMatrix[4] & 0xff) + bDifference;
}*/
/* if (rDifference != 0 && gDifference != 0 && bDifference != 0) {
// 30k 29k 35k 10k 70k/550k
System.out.println("didi");
}*/
/*int fr = (pixelMatrix[4] >> 16) & 0xff;
int fg = (pixelMatrix[4] >> 8) & 0xff;
int fb = pixelMatrix[4] & 0xff;
// 等于0,表示该像素点是边界或者和周围像素颜色一致
// 不等于0,表示该像素和周围像素有差距,且差值大于我们设置的阈值.
if (rDifference != 0) {
// 采用均值滤波进行模糊
fr = (((pixelMatrix[0] >> 16) & 0xFF) +
((pixelMatrix[1] >> 16) & 0xFF) +
((pixelMatrix[2] >> 16) & 0xFF) +
((pixelMatrix[3] >> 16) & 0xFF) +
((pixelMatrix[4] >> 16) & 0xFF) +
((pixelMatrix[5] >> 16) & 0xFF) +
((pixelMatrix[6] >> 16) & 0xFF) +
((pixelMatrix[7] >> 16) & 0xFF) +
((pixelMatrix[8] >> 16) & 0xFF))/9;
}
else {
//fr = 255;
}
if (gDifference != 0) {
fg = (((pixelMatrix[0] >> 8) & 0xFF) +
((pixelMatrix[1] >> 8) & 0xFF) +
((pixelMatrix[2] >> 8) & 0xFF) +
((pixelMatrix[3] >> 8) & 0xFF) +
((pixelMatrix[4] >> 8) & 0xFF) +
((pixelMatrix[5] >> 8) & 0xFF) +
((pixelMatrix[6] >> 8) & 0xFF) +
((pixelMatrix[7] >> 8) & 0xFF) +
((pixelMatrix[8] >> 8) & 0xFF))/9;
}
else {
//fg = 255;
}
if (bDifference != 0) {
fb = (
((pixelMatrix[0]) & 0xFF) +
((pixelMatrix[1]) & 0xFF) +
((pixelMatrix[2]) & 0xFF) +
((pixelMatrix[3]) & 0xFF) +
((pixelMatrix[4]) & 0xFF) +
((pixelMatrix[5]) & 0xFF) +
((pixelMatrix[6]) & 0xFF) +
((pixelMatrix[7]) & 0xFF) +
((pixelMatrix[8]) & 0xFF))/9;
}
else {
//fb = 255;
}*/
fr = (((pixelMatrix[0] >> 16) & 0xFF) +
((pixelMatrix[1] >> 16) & 0xFF) +
((pixelMatrix[2] >> 16) & 0xFF) +
((pixelMatrix[3] >> 16) & 0xFF) +
((pixelMatrix[4] >> 16) & 0xFF) +
((pixelMatrix[5] >> 16) & 0xFF) +
((pixelMatrix[6] >> 16) & 0xFF) +
((pixelMatrix[7] >> 16) & 0xFF) +
((pixelMatrix[8] >> 16) & 0xFF))/9;
fg = (((pixelMatrix[0] >> 8) & 0xFF) +
((pixelMatrix[1] >> 8) & 0xFF) +
((pixelMatrix[2] >> 8) & 0xFF) +
((pixelMatrix[3] >> 8) & 0xFF) +
((pixelMatrix[4] >> 8) & 0xFF) +
((pixelMatrix[5] >> 8) & 0xFF) +
((pixelMatrix[6] >> 8) & 0xFF) +
((pixelMatrix[7] >> 8) & 0xFF) +
((pixelMatrix[8] >> 8) & 0xFF))/9;
fb = (
((pixelMatrix[0]) & 0xFF) +
((pixelMatrix[1]) & 0xFF) +
((pixelMatrix[2]) & 0xFF) +
((pixelMatrix[3]) & 0xFF) +
((pixelMatrix[4]) & 0xFF) +
((pixelMatrix[5]) & 0xFF) +
((pixelMatrix[6]) & 0xFF) +
((pixelMatrix[7]) & 0xFF) +
((pixelMatrix[8]) & 0xFF))/9;
return (clamp(fr) << 16) | (clamp(fg) << 8) | clamp(fb);
}
private static int calculateAnisotropy(int rn, int rs, int re, int rw, double lambda, int k) {
//x*e^(-x*x/400)
return (int) (lambda * (rn * Math.exp(-rn * rn / k) + rs * Math.exp(-rs * rs / k) +
re * Math.exp(-re * re / k) + rw * Math.exp(-rw * rw / k)));
}
/**
* 直方图
* 适用于太亮或者太暗的图片(可以看到战斗机的尾焰痕迹)
* 直方图均衡化又称为灰度均衡化,是指通过某种灰度映射使输入图像转换为在每一灰度级上都有近似相同的像素点数的输出图像(即输出的直方图是均匀的)。
* 在经过均衡化处理后的图像中,像素将占有尽可能多的灰度级并且分布均匀。因此,这样的图像将具有较高的对比度和较大的动态范围。
* @param bufferedImage 灰度化图像
* @return 处理后的图像
*/
private static BufferedImage histeq(BufferedImage bufferedImage) {
BufferedImage grayImage = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
// 统计各级灰度的像素个数
int[] pdHist = new int[256];
// 像素总数
int total = bufferedImage.getWidth() * bufferedImage.getHeight();
for (int i = 0; i < bufferedImage.getWidth(); i++) {
for (int j = 0; j < bufferedImage.getHeight(); j++) {
// getRGB()方法,根据手册,其返回的int型数据(32位)为ARGB格式,其中ARGB各占8bit
int color = bufferedImage.getRGB(i, j);
int b = color & 0xff;
pdHist[b] += 1;
}
}
// 根据公式计算转换后的灰度值
for (int i = 0; i < bufferedImage.getWidth(); i++) {
for (int j = 0; j < bufferedImage.getHeight(); j++) {
int color = bufferedImage.getRGB(i, j);
int b = color & 0xff;
// 累加变量
int temp = 0;
for (int k = 0; k < b; k++) {
temp += pdHist[k];
}
int result = 255 * temp / total;
if (result > 255) {
result = 255;
}
if (result < 0) {
result = 0;
}
int rgb = (clamp(result) << 16) | (clamp(result) << 8) | clamp(result);
grayImage.setRGB(i, j, rgb);
}
}
return grayImage;
}
/**
* 二值化腐蚀
* 用于消除噪点
* @param bufferedImage 二值化图像
* @param type 0为操作黑色,1操作白色
* @return 腐蚀后的图像
*/
private static BufferedImage corrodeImage(BufferedImage bufferedImage, int type) {
BufferedImage image = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
int[][] pixels = new int[3][3];
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
pixels[0][0] = bufferedImage.getRGB(i - 1, j - 1) & 0xff;
pixels[0][1] = bufferedImage.getRGB(i, j - 1) & 0xff;
pixels[0][2] = bufferedImage.getRGB(i + 1, j - 1) & 0xff;
pixels[1][0] = bufferedImage.getRGB(i - 1, j) & 0xff;
pixels[1][1] = bufferedImage.getRGB(i, j) & 0xff;
pixels[1][2] = bufferedImage.getRGB(i + 1, j) & 0xff;
pixels[2][0] = bufferedImage.getRGB(i - 1, j + 1) & 0xff;
pixels[2][1] = bufferedImage.getRGB(i, j + 1) & 0xff;
pixels[2][2] = bufferedImage.getRGB(i + 1, j + 1) & 0xff;
int newPixels = pixels[0][0] * corrode[0][0] + pixels[0][1] * corrode[0][1] + pixels[0][2] * corrode[0][2] +
pixels[1][0] * corrode[1][0] + pixels[1][1] * corrode[1][1] + pixels[1][2] * corrode[1][2] +
pixels[2][0] * corrode[2][0] + pixels[2][1] * corrode[2][1] + pixels[2][2] * corrode[2][2];
if (type == 0) {
// 腐蚀黑色
if (newPixels == 0) {
// 黑色
image.setRGB(i, j, 0);
} else {
// 白色
image.setRGB(i, j, 0xFFFFFF);
}
} else if (type == 1) {
// 腐蚀白色
if (newPixels == 255 * 9) {
// 白色
image.setRGB(i, j, 0xFFFFFF);
} else {
// 黑色
image.setRGB(i, j, 0);
}
}
}
}
return image;
}
/**
* 二值化膨胀
* 将一个像素膨胀为3*3的同色像素
* 用于粗话图像
* @param bufferedImage 二值化图像
* @param type 0为操作黑色,1操作白色
* @return 膨胀后的图像
*/
private static BufferedImage expandImage(BufferedImage bufferedImage, int type) {
BufferedImage image = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
int color = bufferedImage.getRGB(i, j) & 0xff;
if (type == 0) {
// 扩展黑色
if (color == 0) {
image.setRGB(i - 1, j - 1, 0);
image.setRGB(i, j - 1, 0);
image.setRGB(i + 1, j - 1, 0);
image.setRGB(i - 1, j, 0);
image.setRGB(i, j, 0);
image.setRGB(i + 1, j, 0);
image.setRGB(i - 1, j + 1, 0);
image.setRGB(i, j + 1, 0);
image.setRGB(i + 1, j + 1, 0);
} else {
image.setRGB(i, j, 0xFFFFFF);
}
} else if (type == 1) {
// 扩展白色
if (color == 255) {
image.setRGB(i - 1, j - 1, 0xFFFFFF);
image.setRGB(i, j - 1, 0xFFFFFF);
image.setRGB(i + 1, j - 1, 0xFFFFFF);
image.setRGB(i - 1, j, 0xFFFFFF);
image.setRGB(i, j, 0xFFFFFF);
image.setRGB(i + 1, j, 0xFFFFFF);
image.setRGB(i - 1, j + 1, 0xFFFFFF);
image.setRGB(i, j + 1, 0xFFFFFF);
image.setRGB(i + 1, j + 1, 0xFFFFFF);
} else {
image.setRGB(i, j, 0);
}
}
}
}
return image;
}
/**
* 灰度腐蚀
* 用于消除噪点
* @param bufferedImage 二值化图像
* @param base 基准值,值越大,图片越暗
* @return 腐蚀后的图像
*/
private static BufferedImage grayCorrodeImage(BufferedImage bufferedImage, int base) {
BufferedImage image = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
int[][] pixels = new int[3][3];
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
pixels[0][0] = bufferedImage.getRGB(i - 1, j - 1) & 0xff;
pixels[0][1] = bufferedImage.getRGB(i, j - 1) & 0xff;
pixels[0][2] = bufferedImage.getRGB(i + 1, j - 1) & 0xff;
pixels[1][0] = bufferedImage.getRGB(i - 1, j) & 0xff;
pixels[1][1] = bufferedImage.getRGB(i, j) & 0xff;
pixels[1][2] = bufferedImage.getRGB(i + 1, j) & 0xff;
pixels[2][0] = bufferedImage.getRGB(i - 1, j + 1) & 0xff;
pixels[2][1] = bufferedImage.getRGB(i, j + 1) & 0xff;
pixels[2][2] = bufferedImage.getRGB(i + 1, j + 1) & 0xff;
int minValue = 255;
for (int m = 0; m < pixels.length; m++) {
for (int n = 0; n < pixels.length; n++) {
if (pixels[m][n] < minValue) {
minValue = pixels[m][n];
}
}
}
int result = minValue - base;
if (result < 0) {
result = 0;
}
int rgb = (clamp(result) << 16) | (clamp(result) << 8) | clamp(result);
image.setRGB(i, j, rgb);
}
}
return image;
}
/**
* 灰度膨胀
* 取3*3像素里面的最大灰度值和基本值相加
* 用于粗话图像
* @param bufferedImage 灰度图像
* @param base 基准值,值越大,图片越亮
* @return 膨胀后的图像
*/
private static BufferedImage grayExpandImage(BufferedImage bufferedImage, int base) {
BufferedImage image = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
int[][] pixels = new int[3][3];
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
pixels[0][0] = bufferedImage.getRGB(i - 1, j - 1) & 0xff;
pixels[0][1] = bufferedImage.getRGB(i, j - 1) & 0xff;
pixels[0][2] = bufferedImage.getRGB(i + 1, j - 1) & 0xff;
pixels[1][0] = bufferedImage.getRGB(i - 1, j) & 0xff;
pixels[1][1] = bufferedImage.getRGB(i, j) & 0xff;
pixels[1][2] = bufferedImage.getRGB(i + 1, j) & 0xff;
pixels[2][0] = bufferedImage.getRGB(i - 1, j + 1) & 0xff;
pixels[2][1] = bufferedImage.getRGB(i, j + 1) & 0xff;
pixels[2][2] = bufferedImage.getRGB(i + 1, j + 1) & 0xff;
int maxValue = 0;
for (int m = 0; m < pixels.length; m++) {
for (int n = 0; n < pixels.length; n++) {
if (pixels[m][n] > maxValue) {
maxValue = pixels[m][n];
}
}
}
int result = maxValue + base;
if (result > 255) {
result = 255;
}
int rgb = (clamp(result) << 16) | (clamp(result) << 8) | clamp(result);
image.setRGB(i, j, rgb);
}
}
return image;
}
/**
* 顶帽变换
* 局限性比较大,需要优化。适用于亮度不均衡的高对比度图片,比如夜景
* @param bufferedImage 灰度化图片
* @param base 基准值,值越大,图片越亮
* @return
*/
private static BufferedImage topHatImage(BufferedImage bufferedImage, int base) {
BufferedImage image = new BufferedImage(bufferedImage.getWidth(), bufferedImage.getHeight(), bufferedImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
BufferedImage corrodeImage = grayCorrodeImage(bufferedImage, base);
BufferedImage expandImage = grayExpandImage(corrodeImage, base);
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
int color = bufferedImage.getRGB(i, j) & 0xff;
int openColor = expandImage.getRGB(i, j) & 0xff;
// 原图像与原图像开运算之差就是顶帽变换
int result = color - openColor;
if (result < 0) {
result = 0;
}
// 图像太暗
result += 100;
if (result > 255) {
result = 255;
}
int rgb = (clamp(result) << 16) | (clamp(result) << 8) | clamp(result);
image.setRGB(i, j, rgb);
}
}
return image;
}
/**
* 表面模糊
* 可以多次调用,半径逐渐缩小
* 其作用是在保留图像边缘的情况下,对图像的表面进行模糊处理。在对人物皮肤处理上,比高斯模糊更有效。
* 缺点:每一个像素点都有自己的卷积矩阵,比其它卷积操作更复杂、更耗时
* 参考:https://blog.csdn.net/matrix_space/article/details/52818857
* @param src 原图像
* @param radius 模糊半径,值要大于3才有明显效果
* @return
*/
private static BufferedImage surfaceBlur(BufferedImage src, int radius) {
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
src.getRGB(0, 0, width, height, pixels, 0, width);
// 均值滤波使用的卷积模板半径,这里使用5*5均值,所以半径使用2
int br = 0, bg = 0, bb = 0;
int r = 0, g = 0, b = 0;
double rw = 0, gw = 0, bw = 0;
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int color = pixels[row * width + col];
br = (color >> 16) & 0XFF;
bg = (color >> 8) & 0xff;
bb = color & 0xff;
// 获取每个像素点的r,g,b权重
double rValue = 0;
double gValue = 0;
double bValue = 0;
double rTotalWeight = 0;
double gTotalWeight = 0;
double bTotalWeight = 0;
for (int i = -radius; i <= radius; i++) {
int roffset = row + i;
roffset = (roffset < 0) ? 0 : (roffset >= height ? height - 1 : roffset);
for (int j = -radius; j <= radius; j++) {
int coffset = col + j;
coffset = (coffset < 0) ? 0 : (coffset >= width ? width - 1 : coffset);
int pixel = pixels[roffset * width + coffset];
r = (pixel >> 16) & 0XFF;
g = (pixel >> 8) & 0xff;
b = pixel & 0xff;
rw = getSurfaceBlurWeight(r,br);
gw = getSurfaceBlurWeight(g,bg);
bw = getSurfaceBlurWeight(b,bb);
rTotalWeight += rw;
gTotalWeight += gw;
bTotalWeight += bw;
rValue += rw * r;
gValue += gw * g;
bValue += bw * b;
}
}
r = (int)(rValue / rTotalWeight);
g = (int)(gValue / gTotalWeight);
b = (int)(bValue / bTotalWeight);
outPixels[row * width + col] = (255 << 24) | (clamp(r) << 16) | (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height, src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* 线性光
* 公式:基色+2混合色-255=结果色。
* 当混合色与基色互为反相时,基色+2(255-基色)-255=255-基色=混合色=结果色。
* (255-混合色)+2混合色-255=混合色=结果色。
* “线性光”模式的公式有大于或小于128色阶之分,却合成了一个统一的公式。
*
* 参考:https://baike.baidu.com/item/%E7%BA%BF%E6%80%A7%E5%85%89/18609981?fr=aladdin
* PS卡通化:https://jingyan.baidu.com/article/454316ab85c8acb7a7c03ac8.html
* @param baseImage 基色,比如元素图像
* @param mixImage 混合色,比如高反差保留图像
* @return
*/
private static BufferedImage linearLighten(BufferedImage baseImage, BufferedImage mixImage) {
BufferedImage bufferedImage = new BufferedImage(baseImage.getWidth(), baseImage.getHeight(), baseImage.getType());
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
int r=0,g=0,b=0;
int gr=0,gg=0,gb=0;
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
int color = baseImage.getRGB(i, j);
int gColor = mixImage.getRGB(i, j);
r = (color >> 16) & 0XFF;
g = (color >> 8) & 0xff;
b = color & 0xff;
gr = (gColor >> 16) & 0XFF;
gg = (gColor >> 8) & 0xff;
gb = gColor & 0xff;
int rgb = (clamp(r+2*gr-255) << 16) | (clamp(g+2*gg-255) << 8) | clamp(b+2*gb-255);
bufferedImage.setRGB(i, j, rgb);
}
}
return bufferedImage;
}
/**
* 高反差保留
* 高反差保留 = 原始图像 - 高斯模糊图像 + 127
* @param srcImage 原图
* @param radius 高斯模糊半径
* @return 高反差保留图片
*/
private static BufferedImage contrastReserve(BufferedImage srcImage, int radius) {
BufferedImage bufferedImage = new BufferedImage(srcImage.getWidth(), srcImage.getHeight(), srcImage.getType());
BufferedImage gaussianImage = gaussianProcess(srcImage, radius);
int imgWidth = bufferedImage.getWidth();
int imgHeight = bufferedImage.getHeight();
int r=0,g=0,b=0;
int gr=0,gg=0,gb=0;
for (int i = 1; i < imgWidth - 1; i++) {
for (int j = 1; j < imgHeight - 1; j++) {
int color = srcImage.getRGB(i, j);
int gColor = gaussianImage.getRGB(i, j);
r = (color >> 16) & 0XFF;
g = (color >> 8) & 0xff;
b = color & 0xff;
gr = (gColor >> 16) & 0XFF;
gg = (gColor >> 8) & 0xff;
gb = gColor & 0xff;
int rgb = (clamp(r-gr+127) << 16) | (clamp(g-gg+127) << 8) | clamp(b-gb+127);
bufferedImage.setRGB(i, j, rgb);
}
}
return bufferedImage;
}
/**
* 高斯模糊
* 参考:https://www.cnblogs.com/invisible2/p/9177018.html
* @param src
* @param radius 模糊半径
* @return
*/
private static BufferedImage gaussianProcess(BufferedImage src, int radius) {
int width = src.getWidth();
int height = src.getHeight();
int[] pixels = new int[width * height];
int[] outPixels = new int[width * height];
src.getRGB(0, 0, width, height, pixels, 0, width);
// 均值滤波使用的卷积模板半径,这里使用5*5均值,所以半径使用2
int r = 0, g = 0, b = 0;
// 获取每个像素点的高斯权重
double[][] weights = getGaussianWeight(radius);
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int rSum = 0;
int gSum = 0;
int bSum = 0;
for (int i = -radius; i <= radius; i++) {
int roffset = row + i;
roffset = (roffset < 0) ? 0 : (roffset >= height ? height - 1 : roffset);
for (int j = -radius; j <= radius; j++) {
int coffset = col + j;
coffset = (coffset < 0) ? 0 : (coffset >= width ? width - 1 : coffset);
int pixel = pixels[roffset * width + coffset];
r = (pixel >> 16) & 0XFF;
g = (pixel >> 8) & 0xff;
b = pixel & 0xff;
rSum += r * weights[i + radius][j + radius];
gSum += g * weights[i + radius][j + radius];
bSum += b * weights[i + radius][j + radius];
}
}
r = rSum;
g = gSum;
b = bSum;
outPixels[row * width + col] = (255 << 24) | (clamp(r) << 16) | (clamp(g) << 8) | clamp(b);
}
}
BufferedImage dest = new BufferedImage(width, height, src.getType());
dest.setRGB(0, 0, width, height, outPixels, 0, width);
return dest;
}
/**
* 获取(2*r+1)*(2*r+1)图像(半径为r)的高斯权重
* 以图像中心为坐标原点来计算所有点的权重
* 离中心点越近,权重越大
* @param r 半径
* @return 图像所有点的权重
*/
public static double[][] getGaussianWeight(int r) {
int n = 2*r+1;
double[][] a = new double[n][n];
double totalWeight = 0;
// 计算每个像素点的权重
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++) {
int x = j - r;
int y = i - r;
a[i][j] = getGaussianFunction(x, y);
totalWeight += a[i][j];
}
}
// 让它们的权重之和等于1
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++) {
a[i][j] = a[i][j] / totalWeight;
}
}
return a;
}
/**
* 高斯函数,根据坐标计算每个点的权重,3*3图像的原点默认是中间的那个点
* 参考:https://www.cnblogs.com/invisible2/p/9177018.html
* @param x 横坐标
* @param y 纵坐标
* @return
*/
public static double getGaussianFunction(int x, int y) {
double a = 1.5;
double b = -(x*x + y*y)/(2*a*a);
double c = Math.exp(b)/(2*Math.PI*a*a);
return c;
}
/**
* 表面模糊权重
* @param x
* @param y
* @return
*/
public static double getSurfaceBlurWeight(int x, int y) {
// 阈值
double t = 25;
double weight = 1 - Math.abs(x - y) / (t*2.5);
return weight < 0 ? 0 : weight;
}
}
用JAVA读取图片的三种方式
import java.net.*;//for URL
import java.io.*;//for catch (IOException e),File,InputStream, BufferedInputStream,and FileInputStream ect
public class HelloJava{
public static void main (String[] args){
Image image = null;
try {
// Read from a file
File sourceimage = new File("source.gif"); //source.gif图片要与HelloJava.java同在一目录下
image = ImageIO.read(sourceimage);
// Read from an input stream
InputStream is = new BufferedInputStream(
new FileInputStream("mid.jpg")); //mid.jpg图片要与HelloJava.java同在一目录下
image = ImageIO.read(is);
// Read from a URL
URL url = new URL("http://www.javaworld.com/images/012407-tipsbox.jpg");
image = ImageIO.read(url);
} catch (IOException e) {
}
}
参考实现:
@Override
public void accessOpenThumbnailFile(String bucketName, String path, String fileName, HttpServletResponse response) throws IOException {
String logoImageUrl = "http://192.168.1.195:9000/hc-epqcp/public/0e73038cdb2946d38d0f3be32b7e10b3.jpg";
// 当前方法是仅仅读取ip地址下的路径
URL url = new URL(logoImageUrl);
// 返回输出流
Thumbnails.of(url).scale(1f).outputQuality(0.5f)
.toOutputStream(response.getOutputStream());
}
文章来源:https://blog.csdn.net/weixin_44824381/article/details/135355398
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!