-----------------CORRECTED VERSION-----------
I am currently working on a project using Processing. I need to do image processing inside my project, initially I thought of using opencv. But unfortunately I found out that opencv for Processing is not the complete version of the original one.
How can I start doing image processing using Processing? I found that since processing is a wrapper of java, java language is accepted. Can I use JavaCV inside processing? If so, how?
Here is the sample code:-
import gab.opencv.*;
import org.opencv.imgproc.Imgproc;
import org.opencv.core.Core;
import org.opencv.highgui.Highgui;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.CvType;
import org.opencv.core.Point;
import org.opencv.core.Size;
import org.opencv.core.Core.MinMaxLocResult;
PImage imgBack, rightSection, leftSection;
PImage img;
void setup(){
imgBack=loadImage("tk100backback.jpg");
leftSection=imgBack.get(0,0,14,200);
rightSection=imgBack.get(438,0,32,200);
img=createImage(46,200,RGB);
img.set(0,0,rightSection);
img.set(32,0,leftSection);
size(46,200);
Mat src= Highgui.imread(img.toString());
Mat tmp=Highgui.imread("templateStarMatching.jpg");
int result_cols=src.cols()-tmp.cols()+1;
int result_rows=src.rows()-tmp.rows()+1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1);
Imgproc.matchTemplate(src, tmp, result, Imgproc.TM_CCOEFF_NORMED);
MatOfPoint minLoc = new MatOfPoint();
MatOfPoint maxLoc = new MatOfPoint();
MinMaxLocResult mrec=new MinMaxLocResult();
mrec=Core.minMaxLoc(result,null);
System.out.println(mrec.minVal);
System.out.println(mrec.maxVal);
Point point = new Point(mrec.maxLoc.x+tmp.width(), mrec.maxLoc.y+tmp.height());
// cvRectangle(src, maxLoc, point, CvScalar.WHITE, 2, 8, 0);//Draw a Rectangle for Matched Region
}
void draw(){
image(img,0,0);
}
It is giving me error continuously that Core doesn't exist and Highgui not properly installed, but they are properly installed
It looks like you're trying to do template matching with OpenCV? It sounds like your errors are with the install of OpenCV, not your code.
OpenCV Issues
1. Did you install OpenCV previously? It can cause issues with the Processing version.
2. If not, try reducing your code down until you reach the line that first causes an error. If it's on the import statements, you know it's an OpenCV install issue.
3. You don't list your OS, but if you're on a Mac you can follow my detailed instructions for installing it.
Can I Do It With Another Library?
Yes, see some of the comments to your question. But the specific process I think you are trying to do will be difficult. I think you're better off getting OpenCV working.
The below code reads an image in Processing and then applies a filter (in this case it pixilates it).
Note that img.pixels[i] is a 1 dimensional array, while pictures are 2d. A trick to access the 2d location is img.pixels[r*img.width + c] where r and c are the pixels row and column, respectively
// Declaring a variable of type PImage
PImage img;
void setup() {
// Make a new instance of a PImage by loading an image file
![enter image description here][1]img = loadImage("http://www.washingtonpost.com/wp-srv/special/lifestyle/the-age-of-obama/img/obama-v2/obama09.jpg");
//img = loadImage("background.jpg");
size(img.width, img.height);
int uberPixel = 25;
for(int row = 0; row < img.height; row+= uberPixel){
for(int col = 0; col < img.width; col+= uberPixel){
int colo[] = new int[3];
int cnt = 0;
for(int r = row; r <= row + uberPixel; r ++){
for(int c = col; c <= col +uberPixel; c ++){
if(r*img.width + c < img.pixels.length){
colo[0] += red(img.pixels[r*img.width + c]);
colo[1] += green(img.pixels[r*img.width + c]);
colo[2] += blue(img.pixels[r*img.width + c]);
cnt++;
}
}
}
//average color
for(int i = 0; i < 3; i ++){
colo[i] /= cnt;
}
//change pixel
for(int r = row; r <= row + uberPixel; r ++){
for(int c = col; c <= col +uberPixel; c ++){
if(r*img.width + c < img.pixels.length){
img.pixels[r*img.width+c] = color(colo[0],colo[1],colo[2]);
}
}
}
}
}
image(img,0,0);
}
void draw() {
image(img,0,0);
}
Result:
Here is an example to use javacv in Processing:
import static com.googlecode.javacv.cpp.opencv_core.*;
import static com.googlecode.javacv.cpp.opencv_imgproc.*;
import static com.googlecode.javacv.cpp.opencv_highgui.*;
void setup ()
{
size( 256, 256 );
String fn = sketchPath("data/lena.jpg");
IplImage ip= cvLoadImage(fn);
if ( ip != null )
{
cvSmooth( ip, ip, CV_GAUSSIAN, 3 );
PImage im = ipToPImage(ip);
image( im, 0, 0 );
cvReleaseImage(ip);
}
}
PImage ipToPImage ( IplImage ip )
{
java.awt.image.BufferedImage bImg = ip.getBufferedImage();
PImage im = new PImage( bImg.getWidth(), bImg.getHeight(), ARGB );
bImg.getRGB( 0, 0, im.width, im.height, im.pixels, 0, im.width );
im.updatePixels();
return im;
}
Related
I have a problem with getting gray scale of a .jpg file. I am trying to create a new .jpg file as gray scaled but I am just copying the image nothing more. Here is my code:
package training01;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.io.*;
import javax.imageio.ImageIO;
import javax.swing.JFrame;
public class GrayScale {
BufferedImage image;
int width;
int height;
public GrayScale() {
try {
File input = new File("digital_image_processing.jpg");
image = ImageIO.read(input);
width = image.getWidth();
height = image.getHeight();
for(int i = width;i < width;i++) {
for(int j = height;j < height;j++) {
Color c = new Color(image.getRGB(i, j));
int red = c.getRed();
int green = c.getGreen();
int blue = c.getBlue();
int val = (red+green+blue)/3;
Color temp = new Color(val,val,val);
image.setRGB(i, j, temp.getRGB());
}
}
File output = new File("digital_image_processing1.jpg");
ImageIO.write(image, "jpg", output);
}catch(Exception e) {
System.out.println(e);
}
}
public static void main(String[] args) {
GrayScale gs = new GrayScale();
}
}
You need to change the following. Start your i and j at 0.
for(int i = width;i < width;i++) {
for(int j = height;j < height;j++) {
However, here is a faster way to do it. Write it to a new BufferedImage object that is set for gray scale.
image = ImageIO.read(input);
width = image.getWidth();
height = image.getHeight();
bwImage = new BufferedImage(width,
height, BufferedImage.TYPE_BYTE_GRAY);
Graphics g = bwImage.getGraphics();
g.drawImage(image,0,0,null);
Then save the bwImage.
The main problem with your code, is that it won't loop, because you initialize i, j to width, height which is already greater than the exit condition of the for loops (i < width, j < height). Start iterating at 0 by initializing i and j to 0, and your code will work as intended.
For better performance, you also want to change the order of the loops. As BufferedImages are stored as a continuous array, row by row, you will utilize the CPU cache much better if you loop over the x axis (row) in the inner loop.
Side note: I also suggest renaming i and j to x and y for better readability.
Finally, your method of converting RGB to gray by averaging the colors will work, but is not the most common way to convert to gray scale, as the human eye does not perceive the intensities of the colors as the same. See Wikipedia on gray scale conversion for a better understanding of correct conversion and the theory behind it.
However, all of this said, for JPEG images stored as YCbCr (the most common way to store JPEGs), there is a much faster, memory efficient and simpler way of converting the image to gray scale, and that is simply reading the Y (luminance) channel of the JPEG and use that as gray scale directly.
Using Java and ImageIO, you can do it like this:
public class GrayJPEG {
public static void main(String[] args) throws IOException {
try (ImageInputStream stream = ImageIO.createImageInputStream(new File(args[0]))) {
ImageReader reader = ImageIO.getImageReaders(stream).next(); // Will throw exception if no reader available
try {
reader.setInput(stream);
ImageReadParam param = reader.getDefaultReadParam();
// The QnD way, just specify the gray type directly
//param.setDestinationType(ImageTypeSpecifier.createFromBufferedImageType(BufferedImage.TYPE_BYTE_GRAY));
// The very correct way, query the reader if it supports gray, and use that
Iterator<ImageTypeSpecifier> types = reader.getImageTypes(0);
while (types.hasNext()) {
ImageTypeSpecifier type = types.next();
if (type.getColorModel().getColorSpace().getType() == ColorSpace.TYPE_GRAY) {
param.setDestinationType(type);
break;
}
}
BufferedImage image = reader.read(0, param);
ImageIO.write(image, "JPEG", new File(args[0] + "_gray.jpg"));
}
finally {
reader.dispose();
}
}
}
}
So today I started with a new project. I want to make a simple heightmap generator in java, so I tried the following:
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
public class Heightmap {
public static int width = 200;
public static int height = 200;
public static void main(String[] args) {
BufferedImage bufferedImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY );
for(int x = 0; x < width; x++){
for(int y = 0; y < height; y++){
bufferedImage.setRGB(x, y, (byte )(Math.random() * 256 + 128) ); // + 128 because byte goes from -128 to 127
}
}
File outputFile = new File("heightmap.png");
try {
ImageIO.write(bufferedImage, "png", outputFile);
}catch (IOException ioex){
ioex.printStackTrace();
}
}
}
The code is very simple, I plan to try perlin noise as the next step. But first I need to resolve this problem:
Generated Heightmap
The pixels in heightmap.png are either completely white, or completely black. There's no grays in the image, which of course is necessary in a heightmap. Does anyone know what I did wrong?
is it the BufferedImage.TYPE_BYTE_GRAY part? If so, what should I use instead?
After a friend set me on the right track, I found the solution.
Instead of BufferedImage.TYPE_BYTE_GRAY I used BufferdImage.TYPE_INT_RGB. So this is indeed where I went wrong. Also I added the object Color randomColor, wherein the RGB values all share the same integer with a value from 0 to 255. Then in BufferedImage.setRGB I use the color code of randomColor (so R,G,B = 255 gives #FFFFFF, which is white) as the value of pixel (x,y):
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
public class Heightmap {
public static int width = 200;
public static int height = 200;
public static void main(String[] args) {
BufferedImage bufferedImage = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB );
for(int x = 0; x < width; x++){
for(int y = 0; y < height; y++){
int randomValue = (int)(Math.random() * 256);
Color randomColor = new Color( randomValue, randomValue, randomValue);
bufferedImage.setRGB(x, y, randomColor.getRGB());
}
}
File outputFile = new File("heightmap.png");
try {
ImageIO.write(bufferedImage, "png", outputFile);
}catch (IOException ioex){
ioex.printStackTrace();
}
}
}
Now the heightmap.png gives what I expected: Heightmap.png
I want to transfer my bytearray into a binary image
But I don't how to do it.
array value only have 0 and 1.
0 = black , 1 = white,
byte [] arr = new byte[32*32];
for(int i=0;i<arr.length;i++){
arr[i]= i%2==0?(byte)0:(byte)1
}
please help me , thanks
It depends on what you are going to do with that binary image.
If you only need it for your computation, your array may do the job for you better,
although a 2-dimensional array may be more convenient to use.
If you want to construct a BufferedImage object, you can specify it to be
1-bit per pixel type (see below), and fill its content using the setRGB() method.
Such image can then be saved to file or shown in GUI, or accessed with getRGB() method.
Here is a working example (GenerateChecker.java):
import java.awt.image.BufferedImage;
import javax.imageio.ImageIO;
import java.io.IOException;
import java.io.File;
public class GenerateChecker
{
private static final int width = 32;
private static final int height = 32;
public static void main(String args[]) throws IOException
{
BufferedImage im = new BufferedImage(32, 32, BufferedImage.TYPE_BYTE_BINARY);
int white = (255 << 16) | (255 << 8) | 255;
int black = 0;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++)
im.setRGB(x, y, (((x + y)&1) == 0) ? black : white);
File outputfile = new File("checker.png");
ImageIO.write(im, "png", outputfile);
}
}
~
I am trying to get every single color of every single pixel of an image.
My idea was following:
int[] pixels;
BufferedImage image;
image = ImageIO.read(this.getClass.getResources("image.png");
int[] pixels = ((DataBufferInt)image.getRaster().getDataBuffer()).getData();
Is that right? I can't even check what the "pixels" array contains, because i get following error:
java.awt.image.DataBufferByte cannot be cast to java.awt.image.DataBufferInt
I just would like to receive the color of every pixel in an array, how do i achieve that?
import java.io.*;
import java.awt.*;
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
public class GetPixelColor {
public static void main(String args[]) throws IOException {
File file = new File("your_file.jpg");
BufferedImage image = ImageIO.read(file);
// Getting pixel color by position x and y
int clr = image.getRGB(x, y);
int red = (clr & 0x00ff0000) >> 16;
int green = (clr & 0x0000ff00) >> 8;
int blue = clr & 0x000000ff;
System.out.println("Red Color value = " + red);
System.out.println("Green Color value = " + green);
System.out.println("Blue Color value = " + blue);
}
}
of course you have to add a for loop for all pixels
The problem (also with the answer that was linked from the first answer) is that you hardly ever know what exact type your buffered image will be after reading it with ImageIO. It could contain a DataBufferByte or a DataBufferInt. You may deduce it in some cases via BufferedImage#getType(), but in the worst case, it has type TYPE_CUSTOM, and then you can only fall back to some instanceof tests.
However, you can convert your image into a BufferedImage that is guaranteed to have a DataBufferInt with ARGB values - namely with something like
public static BufferedImage convertToARGB(BufferedImage image)
{
BufferedImage newImage = new BufferedImage(
image.getWidth(), image.getHeight(),
BufferedImage.TYPE_INT_ARGB);
Graphics2D g = newImage.createGraphics();
g.drawImage(image, 0, 0, null);
g.dispose();
return newImage;
}
Otherwise, you can call image.getRGB(x,y), which may perform the required conversions on the fly.
BTW: Note that obtaining the data buffer of a BufferedImage may degrade painting performance, because the image can no longer be "managed" and kept in VRAM internally.
import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
public class Main {
public static void main(String[] args) throws IOException {
BufferedImage bufferedImage = ImageIO.read(new File("norris.jpg"));
int height = bufferedImage.getHeight(), width = bufferedImage.getWidth();
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
int RGBA = bufferedImage.getRGB(x, y);
int alpha = (RGBA >> 24) & 255;
int red = (RGBA >> 16) & 255;
int green = (RGBA >> 8) & 255;
int blue = RGBA & 255;
}
}
}
}
Assume the buffered image represents an image with 8-bit RGBA color components packed into integer pixels, I search for "RGBA color space" on wikipedia and found following:
In the byte-order scheme, "RGBA" is understood to mean a byte R,
followed by a byte G, followed by a byte B, and followed by a byte A.
This scheme is commonly used for describing file formats or network
protocols, which are both byte-oriented.
With simple Bitwise and Bitshift you can get the value of each color and the alpha value of the pixel.
Very interesting is also the other order scheme of RGBA:
In the word-order scheme, "RGBA" is understood to represent a complete
32-bit word, where R is more significant than G, which is more
significant than B, which is more significant than A. This scheme can
be used to describe the memory layout on a particular system. Its
meaning varies depending on the endianness of the system.
byte[] pixels
not
int[] pixels
try this : Java - get pixel array from image
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
public class ImageUtil {
public static Color[][] loadPixelsFromImage(File file) throws IOException {
BufferedImage image = ImageIO.read(file);
Color[][] colors = new Color[image.getWidth()][image.getHeight()];
for (int x = 0; x < image.getWidth(); x++) {
for (int y = 0; y < image.getHeight(); y++) {
colors[x][y] = new Color(image.getRGB(x, y));
}
}
return colors;
}
public static void main(String[] args) throws IOException {
Color[][] colors = loadPixelsFromImage(new File("image.png"));
System.out.println("Color[0][0] = " + colors[0][0]);
}
}
I know this has already been answered, but the answers given are a bit convoluted and could use improvement.
The simple idea is to just loop through every (x,y) pixel in the image, and get the color of that pixel.
BufferedImage image = MyImageLoader.getSomeImage();
for ( int x = 0; x < image.getWidth(); x++ ) {
for( int y = 0; y < image.getHeight(); y++ ) {
Color pixel = new Color( image.getRGB( x, y ) );
// Do something with pixel color here :)
}
}
You could then perhaps wrap this method in a class, and implement Java's Iterable API.
class IterableImage implements Iterable<Color> {
private BufferedImage image;
public IterableImage( BufferedImage image ) {
this.image = image;
}
#Override
public Iterator<Color> iterator() {
return new Itr();
}
private final class Itr implements Iterator<Color> {
private int x = 0, y = 0;
#Override
public boolean hasNext() {
return x < image.getWidth && y < image.getHeight();
}
#Override
public Color next() {
x += 1;
if ( x >= image.getWidth() ) {
x = 0;
y += 1;
}
return new Color( image.getRGB( x, y ) );
}
}
}
The usage of which might look something like the following
BufferedImage image = MyImageLoader.getSomeImage();
for ( Color color : new IterableImage( image ) ) {
// Do something with color here :)
}
im using the imageJ library to read a .tiff image file. But when im trying to read the pixels of image1 in variable c, i get an error saying "incompatible types: required int, found int[].
im quiet new to java, so can somebody tell me how to get around this problem. The code is otherwise working fine with other image formats.
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import javax.imageio.ImageIO;
import ij.ImagePlus;
public class GetPixelCoordinates {
//int y, x, tofind, col;
/**
* #param args the command line arguments
* #throws IOException
*/
public static void main(String args[]) throws IOException {
try {
//read image file
ImagePlus img = new ImagePlus("E:\\abc.tiff");
//write file
FileWriter fstream = new FileWriter("E:\\log.txt");
BufferedWriter out = new BufferedWriter(fstream);
//find cyan pixels
for (int y = 0; y < img.getHeight(); y++) {
for (int x = 0; x < image.getWidth(); x++) {
int c = img.getPixel(x,y);
Color color = new Color(c);
if (color.getRed() < 30 && color.getGreen() >= 225 && color.getBlue() >= 225) {
out.write("CyanPixel found at=" + x + "," + y);
out.newLine();
}
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
If you look at the documentation for getPixel(int,int) in ImagePlus you'll see that it returns an array of ints rather than a single int:
Returns the pixel value at (x,y) as a 4 element array. Grayscale values are retuned in the first element. RGB values are returned in the first 3 elements. For indexed color images, the RGB values are returned in the first 3 three elements and the index (0-255) is returned in the last.
It looks as if you're dealing with an RGB image, so you should be able to do the following instead:
int [] colorArray = image1.getPixel(x,y);
int redValue = colorArray[0];
int greenValue = colorArray[1];
int blueValue = colorArray[2];