How to write a output file in java using GDAL library? - java

public class Learn {
public static String getFilename(){
String strFilename = "";
Scanner scnr = new Scanner(System.in);
System.out.print("Enter the file path: ");
strFilename = scnr.next();
return strFilename;
}
public static void main(String[] args) {
gdal.AllRegister();
Dataset inputdata = gdal.Open(getFilename(), gdalconstConstants.GA_ReadOnly);
Dataset dataset = null;
Driver driver = null;
driver = gdal.GetDriverByName("GTiff");
driver.Register();
Band band2=null;
Band poBand = null;
int xsize = inputdata.getRasterXSize();
int ysize = inputdata.getRasterYSize();
int bandCount =inputdata.GetRasterCount();
int pixels = xsize*ysize;
int buf_Type = 0, buf_Size = 0, buf_xSize = 0,buf_ySize = 0;
int[] intArray = new int[pixels];
ByteBuffer[] bands = new ByteBuffer[bandCount];
String filename_out = getFilename();
System.out.println(filename_out+" "+xsize+" "+ysize+" ");
dataset = driver.Create(filename_out, xsize, ysize, 1, gdalconst.GDT_Byte);
dataset.SetGeoTransform(inputdata.GetGeoTransform());
dataset.SetProjection(inputdata.GetProjection());
band2 = dataset.GetRasterBand(1); // writable band
for (int band=0; band<bandCount; band++){
poBand = inputdata.GetRasterBand(band+1);
buf_Type = poBand.getDataType();
buf_Size = pixels * gdal.GetDataTypeSize(buf_Type)/8;
buf_xSize = xsize*gdal.GetDataTypeSize(xsize)/8;
buf_ySize = ysize*gdal.GetDataTypeSize(ysize)/8;
System.out.println(buf_Type+","+gdal.GetDataTypeName(poBand.getDataType()));
ByteBuffer data = ByteBuffer.allocateDirect(buf_Size);
data.order(ByteOrder.nativeOrder());
// reading data into "data" buffer
poBand.ReadRaster_Direct(0, 0, poBand.getXSize(), poBand.getYSize(), xsize, ysize, buf_Type, data);
bands[band] = data;
}
//generating indices;
float[] NDVI= new float[xsize*ysize];
Byte[] Binary_pixels= new Byte[xsize*ysize];
for (int i=0; i< xsize*ysize; i++)
{
int Red = bands[3].get(i) & 0xFF;
int NIR = bands[4].get(i) & 0xFF;
//NDVI
if ((NIR+Red) !=0){
NDVI[i]= (float)(NIR-Red)/(NIR+Red);
// System.out.println("NDVI: " + NDVI[i]);
}
else {
NDVI[i]=0;
// System.out.println("NDVI: " + NDVI[i]);
if (NDVI[i] > 0.3 ){
Binary_pixels[i]= 1;
// System.out.println("Binary=1");
}
else{
Binary_pixels[i]=0;
// System.out.println("Binary = 0");
}
}
// writing data into band2.
// Here I want to write a raster file using the data Binary_pixels[] as a raster file with the same projection and Transformations as the input file.
// here my file is ".tif" file with 4 bands in it.
}
}
here am new to java coding that too using a GDAL library for Remote sensing image processing. need some help to write an image with same dimensions of input image and projection & Transforms.
Thanks in advance.

Look at this example.
Dataset dataset = null;
Driver driver = null;
Band band = null;
int xsize = 4000;
int ysize = 400;
driver = gdal.GetDriverByName("GTiff");
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(4 * xsize);
byteBuffer.order(ByteOrder.nativeOrder());
FloatBuffer floatBuffer = byteBuffer.asFloatBuffer();
int[] intArray = new int[xsize];
float[] floatArray = new float[xsize];
dataset = driver.Create(filename, xsize, ysize, 1, gdalconst.GDT_Float32);
band = dataset.GetRasterBand(1);
for (int iter = 0; iter < nbIters; iter++)
{
if (method == METHOD_DBB)
{
for (int i = 0; i < ysize; i++)
{
for (int j = 0; j < xsize; j++)
{
floatBuffer.put(j, (float) (i + j));
}
band.WriteRaster_Direct(0, i, xsize, 1, gdalconst.GDT_Float32, byteBuffer);
}
}
else
{
for (int i = 0; i < ysize; i++)
{
for (int j = 0; j < xsize; j++)
{
floatArray[j] = (float) (i + j);
}
band.WriteRaster(0, i, xsize, 1, floatArray);
}
}
}
dataset.delete();

Related

Convolution Kernel - image comes out as a mirror image

So I have some code for convoluting GreyScale image, in Java using Convolution Kernel. It seems to work reasonably well. However the image comes out as a mirror image. As if copying from end of the row rather than the start. I wonder can anyone help me understand what's happening here.
The problem appears to be in the convertToArrayLocation() method as if I try to recreate an image from the array they this method produces the image is mirrored.
public class GetTwoDimensionalPixelArray {
public static BufferedImage inputImage, output;
public static final int[][] IDENTITY = {{0, 0, 0}, {0, 1, 0}, {0, 0, 0}};
public static final int[][] EDGE_DETECTION_1 = {{-1, -1, -1}, {-1, 8, -1}, {-1, -1, -1}};
public static int[][] SHARPEN = {{0, -1, 0}, {-1, 5, -1}, {0, -1, 0}};
public static int WIDTH, HEIGHT;
public static int order = SHARPEN.length;
public static void main(String[] args) throws IOException {
System.out.println(WIDTH);
BufferedImage inputImage = ImageIO.read(new File("it-gs.png")); // load the image from this current folder
WIDTH = inputImage.getWidth();
HEIGHT = inputImage.getHeight();
int[][] result = convertToArrayLocation(inputImage); // pass buffered image to the method and get back the
// result
System.out.println("height" + result.length + "width" + result[0].length);
int[][] outputarray = convolution2D(result, WIDTH, HEIGHT, EDGE_DETECTION_1, EDGE_DETECTION_1.length,
EDGE_DETECTION_1.length);
int opwidth = outputarray[0].length;
int opheight = outputarray.length;
System.out.println("W" + opwidth + "H" + opheight);
BufferedImage img = new BufferedImage(opheight, opwidth, BufferedImage.TYPE_BYTE_GRAY);
for (int r = 0; r < opheight; r++) {
for (int t = 0; t < opwidth; t++) {
img.setRGB(r, t, outputarray[r][t]);
}
}
try {
File imageFile = new File("C:\\Users\\ciara\\eclipse-workspace\\it.png");
ImageIO.write(img, "png", imageFile);
} catch (Exception e) {
System.out.println(e);
}
}
private static int[][] convertToArrayLocation(BufferedImage inputImage) {
final byte[] pixels = ((DataBufferByte) inputImage.getRaster().getDataBuffer()).getData();
// get pixel value as single array from buffered Image
final int width = inputImage.getWidth(); // get image width value
final int height = inputImage.getHeight(); // get image height value
System.out.println("height" + height + "width");
int[][] result = new int[height][width]; // Initialize the array with height and width
// this loop allocates pixels value to two dimensional array
for (int pixel = 0, row = 0, col = 0; pixel < pixels.length; pixel++) {
int argb = 0;
argb = (int) pixels[pixel];
// if pixel value is negative, change to positive //still weird to me
if (argb < 0) {
argb += 256;
}
result[row][col] = argb;
col++;
if (col == width) {
col = 0;
row++;
}
}
return result;
}
public static int[][] convolution2D(int[][] input, int width, int height,
int[][] kernel, int kernelWidth, int kernelHeight) {
int smallWidth = width - kernelWidth + 1;
int smallHeight = height - kernelHeight + 1;
int[][] output = new int[smallHeight][smallWidth];
for (int i = 0; i < smallHeight; ++i) {
for (int j = 0; j < smallWidth; ++j) {
output[i][j] = 0;
}
}
for (int i = 0; i < smallHeight; ++i) {
for (int j = 0; j < smallWidth; ++j) {
output[i][j] = singlePixelConvolution(input, i, j, kernel, kernelWidth, kernelHeight);
}
}
return output;
}
public static int singlePixelConvolution(int[][] input, int x, int y, int[][] k,
int kernelWidth, int kernelHeight) {
int output = 0;
for (int i = 0; i < kernelHeight; ++i) {
for (int j = 0; j < kernelWidth; ++j) {
try {
output = output + (input[x + i][y + j] * k[i][j]);
} catch (Exception e) {
continue;
}
}
}
return output;
}
}
As you probably already know now, this is not an error but the expected result for convolution. Convolution mirror its output unlike correlation that does not. https://en.wikipedia.org/wiki/Convolution

Tensorflow in Android: java.nio.BufferOverFlowException

Good morning. I'm a developer trying to put a tensorflow model into Android.
I've encountered an error that I've never seen before while trying to fix it with multiple errors.
The java.nio.BufferOverFlowException error i'm facing now is that it didn't happen before, but it happened suddenly.
My code uses a byte array, but i cannot specify which part is the problem.
This source that takes a float array as input and returns an array with 10 classes after passing through the model.
The returned values have softmax value.
public float[] hypothesis(float[] inputFloats, int nFeatures, int nClasses, Context context)
{
try {
int nInstance = inputFloats.length / nFeatures;
// FloatBuffer.wrap(inputFloats);
Toast.makeText(context, "", Toast.LENGTH_LONG).show();
inferenceInterface.feed(INPUT_NODE, FloatBuffer.wrap(inputFloats), INPUT_SIZE);
inferenceInterface.run(OUTPUT_NODES_HYPO);
float[] result = new float[nInstance * nClasses];
inferenceInterface.fetch(OUTPUT_NODE_HYPO, result);
return result;
}
catch(Exception e){
Toast.makeText(context, e+" ...", Toast.LENGTH_LONG).show();
return null;
}
}
The length of the inputfloats is 720 and the nFeatures is 720. nClasses is 10.
Although the value is not correct, it worked before.
e in the catch statement prints java.nio.BufferOverFlowException.
Could there be a problem in the middle of converting a byte array to a float array?
Related source.
public float[] bytetofloat(byte[] array){
int[] returnArr = new int[array.length/4];
float[] returnArr1 = new float[array.length/4];
for(int i = 0 ; i < returnArr.length; i++){
//array[i] = 0;
returnArr[i] = array[i*4] & 0xFF;
if(returnArr[i] < 0 || returnArr[i]>255)
Log.d("ARRAY", returnArr[i]+" ");
returnArr1[i] = (float)returnArr[i];
}
return returnArr1;
}
public Bitmap RGB2GRAY(Bitmap image){
int width = image.getWidth();
int height = image.getHeight();
Bitmap bmOut;
bmOut = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_4444);
for(int x = 0; x < width; x++){
for(int y = 0 ; y < height; y++){
int pixel = image.getPixel(x, y);
int A = Color.alpha(pixel);
int R = Color.red(pixel);
int G = Color.green(pixel);
int B = Color.blue(pixel);
R = G = B = (int)(0.2126 * R + 0.7152 * G + 0.0722 * B);
bmOut.setPixel(x, y, Color.argb(A, R, G, B));
}
}
return bmOut;
}
private void activityPrediction(float[] inputArray){
try {
float[] result = activityInference.hypothesis(inputArray, 20*36, 10, getApplicationContext());
predictionView.setText(Arrays.toString(result));
}
catch (Exception e){
Toast.makeText(getApplicationContext(), e.getMessage(), Toast.LENGTH_LONG).show();
}
}
private byte[] bitmapToByteArray(Bitmap bitmap)
{
int chunkNumbers = 10;
int bitmapSize = bitmap.getRowBytes() * bitmap.getHeight();
byte[] imageBytes = new byte[bitmapSize];
int rows, cols;
int chunkHeight, chunkWidth;
rows = cols = (int) Math.sqrt(chunkNumbers);
chunkHeight = bitmap.getHeight() / rows;
chunkWidth = bitmap.getWidth() / cols;
int yCoord = 0;
int bitmapsSizes = 0;
for (int x = 0; x < rows; x++)
{
int xCoord = 0;
for (int y = 0; y < cols; y++)
{
Bitmap bitmapChunk = Bitmap.createBitmap(bitmap, xCoord, yCoord, chunkWidth, chunkHeight);
byte[] bitmapArray = getBytesFromBitmapChunk(bitmapChunk);
System.arraycopy(bitmapArray, 0, imageBytes, bitmapsSizes, bitmapArray.length);
bitmapsSizes = bitmapsSizes + bitmapArray.length;
xCoord += chunkWidth;
bitmapChunk.recycle();
bitmapChunk = null;
}
yCoord += chunkHeight;
}
return imageBytes;
}
private byte[] getBytesFromBitmapChunk(Bitmap bitmap)
{
int bitmapSize = bitmap.getRowBytes() * bitmap.getHeight();
ByteBuffer byteBuffer = ByteBuffer.allocate(bitmapSize);
bitmap.copyPixelsToBuffer(byteBuffer);
byteBuffer.rewind();
return byteBuffer.array();
}
'e.printStackTrace()' result
at com.example.leehanbeen.platerecognize.ActivityInference.hypothesis(ActivityInference.java:58)
at com.example.leehanbeen.platerecognize.MainActivity.activityPrediction(MainActivity.java:148)
at com.example.leehanbeen.platerecognize.MainActivity.access$100(MainActivity.java:28)
at com.example.leehanbeen.platerecognize.MainActivity$2.onClick(MainActivity.java:69)
around MainActivity.java:69
byte[] byteArrayRes = bitmapToByteArray(image_bitmap);
float[] inputArray = bytetofloat(byteArrayRes);
activityPrediction(inputArray);
MainActivity.java:28
public class MainActivity extends AppCompatActivity {
MainActivity.java:148
float[] result = activityInference.hypothesis(inputArray, 20*36, 10, getApplicationContext());
around ActivityInference.java:58
float[] result = new float[nInstance * nClasses];
inferenceInterface.fetch(OUTPUT_NODE_HYPO, result);

org.eclipse.swt.SWTException: Unsupported color depth

I have created a sample SWT application. I am uploading few images into the application. I have to resize all the images which are above 16x16 (Width*Height) resolution and save those in separate location.
For this reason I am scaling the image and saving the scaled image to my destination location. Below is the piece of code which I am using to do that.
Using getImageData() to get the image data and to save I am using ImageLoader save() method.
final Image mySampleImage = ImageResizer.scaleImage(img, 16, 16);
final ImageLoader imageLoader = new ImageLoader();
imageLoader.data = new ImageData[] { mySampleImage.getImageData() };
final String fileExtension = inputImagePath.substring(inputImagePath.lastIndexOf(".") + 1);
if ("GIF".equalsIgnoreCase(fileExtension)) {
imageLoader.save(outputImagePath, SWT.IMAGE_GIF);
} else if ("PNG".equalsIgnoreCase(fileExtension)) {
imageLoader.save(outputImagePath, SWT.IMAGE_PNG);
}
ImageLoader imageLoader.save(outputImagePath, SWT.IMAGE_GIF); is throwing the below exeception when I am trying to save few specific images (GIF or PNG format).
org.eclipse.swt.SWTException: Unsupported color depth
at org.eclipse.swt.SWT.error(SWT.java:4533)
at org.eclipse.swt.SWT.error(SWT.java:4448)
at org.eclipse.swt.SWT.error(SWT.java:4419)
at org.eclipse.swt.internal.image.GIFFileFormat.unloadIntoByteStream(GIFFileFormat.java:427)
at org.eclipse.swt.internal.image.FileFormat.unloadIntoStream(FileFormat.java:124)
at org.eclipse.swt.internal.image.FileFormat.save(FileFormat.java:112)
at org.eclipse.swt.graphics.ImageLoader.save(ImageLoader.java:218)
at org.eclipse.swt.graphics.ImageLoader.save(ImageLoader.java:259)
at mainpackage.ImageResizer.resize(ImageResizer.java:55)
at mainpackage.ImageResizer.main(ImageResizer.java:110)
Let me know If there is any other way to do the same (or) there is any way to resolve this issue.
Finally I got a solution by referring to this existing eclipse bug Unsupported color depth eclipse bug.
In the below code i have created a PaletteData with RGB values and updated my Image Data.
My updateImagedata() method will take the scaled image and will return the proper updated imageData if the image depth is 32 or more.
private static ImageData updateImagedata(Image image) {
ImageData data = image.getImageData();
if (!data.palette.isDirect && data.depth <= 8)
return data;
// compute a histogram of color frequencies
HashMap<RGB, ColorCounter> freq = new HashMap<>();
int width = data.width;
int[] pixels = new int[width];
int[] maskPixels = new int[width];
for (int y = 0, height = data.height; y < height; ++y) {
data.getPixels(0, y, width, pixels, 0);
for (int x = 0; x < width; ++x) {
RGB rgb = data.palette.getRGB(pixels[x]);
ColorCounter counter = (ColorCounter) freq.get(rgb);
if (counter == null) {
counter = new ColorCounter();
counter.rgb = rgb;
freq.put(rgb, counter);
}
counter.count++;
}
}
// sort colors by most frequently used
ColorCounter[] counters = new ColorCounter[freq.size()];
freq.values().toArray(counters);
Arrays.sort(counters);
// pick the most frequently used 256 (or fewer), and make a palette
ImageData mask = null;
if (data.transparentPixel != -1 || data.maskData != null) {
mask = data.getTransparencyMask();
}
int n = Math.min(256, freq.size());
RGB[] rgbs = new RGB[n + (mask != null ? 1 : 0)];
for (int i = 0; i < n; ++i)
rgbs[i] = counters[i].rgb;
if (mask != null) {
rgbs[rgbs.length - 1] = data.transparentPixel != -1 ? data.palette.getRGB(data.transparentPixel)
: new RGB(255, 255, 255);
}
PaletteData palette = new PaletteData(rgbs);
ImageData newData = new ImageData(width, data.height, 8, palette);
if (mask != null)
newData.transparentPixel = rgbs.length - 1;
for (int y = 0, height = data.height; y < height; ++y) {
data.getPixels(0, y, width, pixels, 0);
if (mask != null)
mask.getPixels(0, y, width, maskPixels, 0);
for (int x = 0; x < width; ++x) {
if (mask != null && maskPixels[x] == 0) {
pixels[x] = rgbs.length - 1;
} else {
RGB rgb = data.palette.getRGB(pixels[x]);
pixels[x] = closest(rgbs, n, rgb);
}
}
newData.setPixels(0, y, width, pixels, 0);
}
return newData;
}
To find minimum index:
static int closest(RGB[] rgbs, int n, RGB rgb) {
int minDist = 256*256*3;
int minIndex = 0;
for (int i = 0; i < n; ++i) {
RGB rgb2 = rgbs[i];
int da = rgb2.red - rgb.red;
int dg = rgb2.green - rgb.green;
int db = rgb2.blue - rgb.blue;
int dist = da*da + dg*dg + db*db;
if (dist < minDist) {
minDist = dist;
minIndex = i;
}
}
return minIndex;
}
ColourCounter Class:
class ColorCounter implements Comparable<ColorCounter> {
RGB rgb;
int count;
public int compareTo(ColorCounter o) {
return o.count - count;
}
}

Java BufferedImage write / read (different rgb values)

I wrote an image with this code:
BufferedImage newImage = new BufferedImage(width, height, BufferedImage.TYPE_3BYTE_BGR);
index = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
int r = ciphered[index++];
int g = ciphered[index++];
int b = ciphered[index++];
Color newColor = new Color(r, g, b);
newImage.setRGB(j, i, newColor.getRGB());
}
}
File ouptut = new File("/Users/newbie/Desktop/encrypted.jpg");
ImageIO.write(newImage, "jpg", ouptut);
When I try to read the image ("encrypted.jpg") I get different RGB values. I read the image with the following code:
File input = new File("/Users/newbie/Desktop/encrypted.jpg");
BufferedImage image = new BufferedImage(512, 512, BufferedImage.TYPE_INT_RGB);
image = ImageIO.read(input);
int[] t = new int[width * height * 3];
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Color c = new Color(image.getRGB(j, i));
int r = c.getRed();
int g = c.getGreen();
int b = c.getBlue();
t[index++] = r;
t[index++] = g;
t[index++] = b;
}
}
I don't understand what I'm doing wrong. I just get different rgb values from the ones I've inserted.

Comparing 2 Images and marking differences with rectangles

Edit: Updated the code, the code below now correctly draws rectangles around multiple shapes, but still has a minor issue of sometimes creating multiple rectangles on one single shape.
I have 2 Images, that i compare pixel by pixel with each other and i want my programm to create rectangles around the area of difference (multiple rectangles with multiple instances of differences). So far i managed to do this with a single rectangle, so if i had multiple "instances", they'd all be in one big rectangle. Now i'm trying to make the programm create multiple rectangles, but run into an IndexOutOfBoundsException.
The Programm itself overlays the 2 Images being compared with opacity and outputs the resulting overlaid image along with the rectangles into a new File. Both Images being compared have a consistent equal width and height.
I'm calling the rectangles i want to be drawn "regions" within the code.
The Region List is being continiously updated while the comparison is running.
The first question i asked myself was, when does a point of difference (pixel difference) belong to a region?
My attempt was to define a "tolerance", so as long as the pixel being compared is within the tolerance of the last found point of difference, it belongs to the same region. I quickly realized that this doesn't work as soon as i have a shape in form of a giant U on my image, with the top points being far enough apart to be not within the tolerance. And now i'm kind-of stuck, because i feel like i'm on the wrong path.
Below is the code i have so far:
private void compareImages() throws IOException{
BufferedImage img1;
BufferedImage img2;
try {
img1 = ImageIO.read(new File(path_to_img1));
img2 = ImageIO.read(new File(path_to_img2));
} catch (Throwable e) {
System.out.println("Unable to load the Images!");
return;
}
BufferedImage dest = new BufferedImage(img1.getWidth(), img1.getHeight(), BufferedImage.TYPE_INT_ARGB);
Graphics2D gfx = dest.createGraphics();
gfx.setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_OVER, 0.65f));
//Compare Images pixel by pixel
int sX = 9999; //Start X
int sY = 9999; //Start Y
int eX = 0; //End X
int eY = 0; //End Y
boolean isDrawable = false;
boolean loadedRegion = false;
List<Rectangle> regions = new ArrayList<>();
List<Rectangle> check_regions = new ArrayList<>();
Rectangle tmp_comparison;
int regionID = 0;
int tolerance = 25;
for (int i = 0; i < img1.getHeight(); i++) {
for (int j = 0; j < img1.getWidth(); j++) {
loadedRegion = false;
regionID = 0;
sX = 9999;
sY = 9999;
eX = 0;
eY = 0;
if ( img1.getRGB(j, i) != img2.getRGB(j, i) ){
isDrawable = true;
if (regions.size() != 0){
//Attempting to locate a matching existing Region
tmp_comparison = new Rectangle(j, i, 1, 1);
for (int trID = 0; trID<regions.size(); trID++){
if (tmp_comparison.intersects(check_regions.get(trID).getBounds()) == true) {
// Region found
sX = (int) regions.get(trID).getX();
sY = (int) regions.get(trID).getY();
eX = (int) regions.get(trID).getWidth();
eY = (int) regions.get(trID).getHeight();
regionID = trID;
loadedRegion = true;
break;
}
}
}
//Update Region Dimension
if (j<sX){
sX = j;
}
if (j>eX){
eX = j;
}
if (i<sY){
sY = i;
}
if (i>eY){
eY = i;
}
if (regions.size() == 0 || loadedRegion == false){
regions.add(new Rectangle(sX, sY, eX, eY));
check_regions.add(new Rectangle(sX-tolerance, sY-tolerance, eX-sX+(tolerance*2), eY-sY+(tolerance*2)));
} else {
regions.set(regionID, new Rectangle(sX, sY, eX, eY));
check_regions.set(regionID, new Rectangle(sX-tolerance, sY-tolerance, eX-sX+(tolerance*2), eY-sY+(tolerance*2)));
}
}
}
}
// If there are any differences, draw the Regions
// Regions are 10px bigger in all directions as compared to the actual rectangles of difference
if (isDrawable == true){
gfx.setPaint(Color.red);
for (int i = 0; i<regions.size(); i++) {
int dsX = 0;
int dsY = 0;
int deX = 0;
int deY = 0;
sX = (int) regions.get(i).getX();
sY = (int) regions.get(i).getY();
eX = (int) regions.get(i).getWidth();
eY = (int) regions.get(i).getHeight();
if (sX>=10){dsX = sX-10;}
if (eX<=img1.getWidth()-10){deX = eX-sX+20;}
if (sY>=10){dsY = sY-10;}
if (eY<=img1.getHeight()-10){deY = eY-sY+20;}
gfx.draw(new Rectangle2D.Double(dsX, dsY, deX, deY));
}
}
gfx.drawImage(img1, 0, 0, null);
gfx.drawImage(img2, 0, 0, null);
gfx.dispose();
File out = new File("C:\\output.png");
ImageIO.write(dest, "PNG", out);
}
Below is the code that creates one big rectangle around all the differences found in the images being compared.
private void oneRectangle() throws IOException{
BufferedImage img1;
BufferedImage img2;
try {
img1 = ImageIO.read(new File(path_to_img1));
img2 = ImageIO.read(new File(path_to_img2));
} catch (Throwable e) {
System.out.println("Unable to load the Images!");
return;
}
BufferedImage dest = new BufferedImage(img1.getWidth(), img1.getHeight(), BufferedImage.TYPE_INT_ARGB);
Graphics2D gfx = dest.createGraphics();
gfx.setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_OVER, 0.65f));
//Compare Images pixel by pixel
boolean isDrawable = false;
int sX = 9999;
int sY = 9999;
int eX = 0;
int eY = 0;
for (int i = 0; i < img1.getHeight(); i++) {
for (int j = 0; j < img1.getWidth(); j++) {
if ( img1.getRGB(j, i) != img2.getRGB(j, i) ){
isDrawable = true;
if (j<sX){
sX = j;
}
if (j>eX){
eX = j;
}
if (i<sY){
sY = i;
}
if (i>eY){
eY = i;
}
}
}
}
// Draw rectangle if there are any differences
if (isDrawable == true){
gfx.setPaint(Color.red);
int dsX = 0;
int dsY = 0;
int deX = 0;
int deY = 0;
if (sX>=10){dsX = sX-10;}
if (eX<=img1.getWidth()-10){deX = eX-sX+20;}
if (sY>=10){dsY = sY-10;}
if (eY<=img1.getHeight()-10){deY = eY-sY+20;}
gfx.fill(new Rectangle2D.Double(dsX, dsY, deX, deY));
}
gfx.drawImage(img1, 0, 0, null);
gfx.drawImage(img2, 0, 0, null);
gfx.dispose();
File out = new File("C:\\output.png");
ImageIO.write(dest, "PNG", out);
}

Categories

Resources