How to get the Chain Code from OpenCVs findContours method - java

I already have spend some time trying to implement a chain code from a contour of a plant's leaf until I came across this question which claimed that the OpenCV method findContours can be used to find the chain code.
So far I have this code:
public static List<MatOfPoint> chainCode;
public static void main(String[] args) {
// load the Core OpenCV library by name
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// create a display window
Imshow window1 = new Imshow("My Image - original");
// load an image from file (read and decode JPEG file)
Mat inputImg = Highgui.imread("files/11. Acer palmaturu/iPAD2_C11_EX01.JPG");
Mat grayImg = new Mat(inputImg.height(), inputImg.width(), CvType.CV_8UC1);
//turn into binary image and invert
Imgproc.cvtColor(inputImg, grayImg, Imgproc.COLOR_RGB2GRAY);
Imgproc.threshold(grayImg, grayImg, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
Mat invertcolormatrix= new Mat(grayImg.rows(),grayImg.cols(), grayImg.type(), new Scalar(255,255,255));
Core.subtract(invertcolormatrix, grayImg, grayImg);
//get chain code
chainCode = new Vector<MatOfPoint>();
Imgproc.findContours(grayImg, chainCode, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
//loop through indices of individual contours to find largest contour
double largest_area = 0;
int largest_contour_index = 0;
for(int i = 0; i< chainCode.size(); i++){
//find the area of the contour
double a = Imgproc.contourArea(chainCode.get(i),false);
//find largest contour and it's index
if(a > largest_area){
largest_area= a;
largest_contour_index = i;
}
}
//draw largest contour
if(chainCode.size() > 0){
Imgproc.drawContours(inputImg, chainCode, largest_contour_index, new Scalar (0, 0, 255), 1);
System.out.println("chain code at " + largest_contour_index + "; " + chainCode.get(largest_contour_index));
}
//show image
window1.showImage(inputImg);
}
Right now I am stuck from where I can get the original chain code as in V = 000567454....
Or should I try it with a completely different approach? Can anybody give me some help on this please as I am really stuck!
I want to use the chain code for further processing and image recognition with a Fourier transform later on.

Related

Find rectangle with inner rectangle using OpenCV

I am trying to extract a table row containing a filled rectangle in an image file using openCV. I have used findcontours and boundingrect. Disclaimer: I am completely new to opencv and image processing, so this might not be an optimal solution.
This is what I have done so far, it is getting me all the tables in the image including the row i want. How can i filter to just that row?
Imgcodecs imageCodecs = new Imgcodecs();
Mat sourceMat = imageCodecs.imread("image.png");
Mat grayMat = imageCodecs.imread("image.png");
Mat threshold = imageCodecs.imread("image.png");
Mat threshold1 = imageCodecs.imread("image.png");
Imgproc.cvtColor(sourceMat, grayMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(grayMat, threshold, 70, 255, Imgproc.THRESH_BINARY_INV);
Imgproc.threshold(grayMat, threshold1, 270, 255, Imgproc.THRESH_BINARY);
Core.bitwise_not(grayMat, threshold);
Imgcodecs imgcodecs1 = new Imgcodecs();
imgcodecs1.imwrite("imagethreshold.png", threshold);
List<MatOfPoint> whiteContours = new ArrayList<>();
MatOfPoint heirarchy = new MatOfPoint();
Imgproc.findContours(threshold.clone(), whiteContours, heirarchy, Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);
int count = 0;
// find appropriate bounding rectangles
for (int i = 0; i < whiteContours.size(); i++) {
RotatedRect boundingRect = Imgproc.minAreaRect(new MatOfPoint2f(whiteContours.get(i).toArray()));
Point rotated_rect_points[] = new Point[4];
boundingRect.points(rotated_rect_points);
Rect rect = Imgproc.boundingRect(new MatOfPoint(rotated_rect_points));
Mat roiMat = sourceMat.submat(rect);
if (rect.area()>15000 && heirarchy.get(0,i) != null) {
// checking if heirarchy has parent, next or previous contour is -1
if(heirarchy.get(0,i)[3]!=-1 && heirarchy.get(0,i)[0] !=-1 && heirarchy.get(0,i)[1] ==-1){
// write to image file
Imgcodecs imgcodecs = new Imgcodecs();
imgcodecs.imwrite("image" + count + ".png", roiMat);
count++;
}
}
}```

OpenCV Java : Card Extraction from Image

I am trying to implement some image processing using OpenCV and Java to extract a card out of an image.
Following is my approach:
Convert to BGR image
Convert to GRAY image
Apply GaussianBlur
Apply Canny Edge detection
Dilate
Find contours
Find the largest contour
Find corners of the largest contour using approxPolyDP
Getting a top-down view of the cropped image along the largest contour
At step no 8, I am facing some issues, as I am not getting the appropriate corners/vertices. Following sample images shows the scenario :
The original Image
After edge detection and dilation. (What is to be done to get appropriate edges?? Here I've got broken edges. Could not get Hough transform working)
After finding vertices. (shown in green)
Following is the code :
System.loadLibrary( Core.NATIVE_LIBRARY_NAME );
//load Image
File input = new File("card4.png");
BufferedImage image = ImageIO.read(input);
byte[] data = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
//put read image to Mat
mat = new Mat(image.getHeight(), image.getWidth(), CvType.CV_8UC3); //original Mat
mat.put(0, 0, data);
mat_f = new Mat(image.getHeight(), image.getWidth(), CvType.CV_8UC3); //for storing manipulated Mat
//conversion to grayscale, blurring and edge detection
Imgproc.cvtColor(mat, mat_f, Imgproc.COLOR_RGB2BGR);
Imgproc.cvtColor(mat_f, mat_f, Imgproc.COLOR_RGB2GRAY);
Imgproc.GaussianBlur(mat_f, mat_f, new Size(13,13), 0);
Imgproc.Canny(mat_f, mat_f, 300, 600, 5, true);
Imgproc.dilate(mat_f, mat_f, new Mat(), new Point(-1, -1), 2);
Imgcodecs.imwrite("D:\\JAVA\\Image_Proc\\CVTest1.jpg",mat_f);
//finding contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mat_f, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double maxArea=0;
int maxAreaIdx=0;
//finding largest contour
for (int idx = 0; idx != contours.size(); ++idx)
{
Mat contour = contours.get(idx);
double contourarea = Imgproc.contourArea(contour);
if (contourarea > maxArea)
{
maxArea = contourarea;
maxAreaIdx = idx;
}
}
//Rect rect = Imgproc.boundingRect(contours.get(maxAreaIdx));
//Imgproc.rectangle(mat, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(0,0,255),7);
// mat = mat.submat(rect.y, rect.y + rect.height, rect.x, rect.x + rect.width);
//Polygon approximation
MatOfPoint2f approxCurve = new MatOfPoint2f();
MatOfPoint2f oriCurve = new MatOfPoint2f(contours.get(maxAreaIdx).toArray());
Imgproc.approxPolyDP(oriCurve, approxCurve, 6.0, true);
//drawing red markers at vertices
Point [] array = approxCurve.toArray();
for(int i=0; i < array.length;i++) {
Imgproc.circle(mat, array[i], 2, new Scalar(0, 255, 0), 5);
}
Imgcodecs.imwrite("D:\\JAVA\\Image_Proc\\CVTest.jpg",mat);
Seeking help in getting the appropriate corner vertices...
Thanks in advance..
In order to archive the good result using your approach then your cards have to contain 4 corners. But i prefer to use the HoughLine approach for this task.
Step 1: Resize image for higher performance
Step 2: Edges detection
Transform the image into gray scale
Blur image to clear noises
Edge detection using Canny filters
You can use the dilation for make the white bigger for the next step
Step 3: Find card's corners
Find contour of image
From the list of contour get the largest contour
Get the convexHull of it
Use approxPolyDP to simplify the convex hull (this should give a quadrilateral)
From now you can draw contour to get the rectangle after restore scale
From the quadrilateral you can get the 4 corners.
Find Homography
Warp the input image using the computed homography matrix
Here is sample code in Java
// STEP 1: Resize input image to img_proc to reduce computation
double ratio = DOWNSCALE_IMAGE_SIZE / Math.max(frame.width(), frame.height());
Size downscaledSize = new Size(frame.width() * ratio, frame.height() * ratio);
Mat dst = new Mat(downscaledSize, frame.type());
Imgproc.resize(frame, dst, downscaledSize);
Mat grayImage = new Mat();
Mat detectedEdges = new Mat();
// STEP 2: convert to grayscale
Imgproc.cvtColor(dst, grayImage, Imgproc.COLOR_BGR2GRAY);
// STEP 3: try to filter text inside document
Imgproc.medianBlur(grayImage, detectedEdges, 9);
// STEP 4: Edge detection
Mat edges = new Mat();
// Imgproc.erode(edges, edges, new Mat());
// Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
// canny detector, with ratio of lower:upper threshold of 3:1
Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
// STEP 5: makes the object in white bigger to join nearby lines
Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
Image imageToShow = Utils.mat2Image(edges);
updateImageView(cannyFrame, imageToShow);
// STEP 6: Compute the contours
List<MatOfPoint> contours = new ArrayList<>();
Imgproc.findContours(edges, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// STEP 7: Sort the contours by length and only keep the largest one
MatOfPoint largestContour = getMaxContour(contours);
// STEP 8: Generate the convex hull of this contour
Mat convexHullMask = Mat.zeros(frame.rows(), frame.cols(), frame.type());
MatOfInt hullInt = new MatOfInt();
Imgproc.convexHull(largestContour, hullInt);
MatOfPoint hullPoint = OpenCVUtil.getNewContourFromIndices(largestContour, hullInt);
// STEP 9: Use approxPolyDP to simplify the convex hull (this should give a quadrilateral)
MatOfPoint2f polygon = new MatOfPoint2f();
Imgproc.approxPolyDP(OpenCVUtil.convert(hullPoint), polygon, 20, true);
List<MatOfPoint> tmp = new ArrayList<>();
tmp.add(OpenCVUtil.convert(polygon));
restoreScaleMatOfPoint(tmp, ratio);
Imgproc.drawContours(convexHullMask, tmp, 0, new Scalar(25, 25, 255), 2);
// Image extractImageToShow = Utils.mat2Image(convexHullMask);
// updateImageView(extractFrame, extractImageToShow);
MatOfPoint2f finalCorners = new MatOfPoint2f();
Point[] tmpPoints = polygon.toArray();
for (Point point : tmpPoints) {
point.x = point.x / ratio;
point.y = point.y / ratio;
}
finalCorners.fromArray(tmpPoints);
boolean clockwise = true;
double currentThreshold = this.threshold.getValue();
if (finalCorners.toArray().length == 4) {
Size size = getRectangleSize(finalCorners);
Mat result = Mat.zeros(size, frame.type());
// STEP 10: Homography: Use findHomography to find the affine transformation of your paper sheet
Mat homography = new Mat();
MatOfPoint2f dstPoints = new MatOfPoint2f();
Point[] arrDstPoints = { new Point(result.cols(), result.rows()), new Point(0, result.rows()), new Point(0, 0), new Point(result.cols(), 0) };
dstPoints.fromArray(arrDstPoints);
homography = Calib3d.findHomography(finalCorners, dstPoints);
// STEP 11: Warp the input image using the computed homography matrix
Imgproc.warpPerspective(frame, result, homography, size);
}

OpenCV - Closing contours (Java)

I'm currently trying to close the contours on the right of this picture:
Sample.
The reason for the opened contour lies in kabeja, a library to convert DXF files to images. It seems that on some images it doesn't convert the last pixel column (or row) and that's why the Sample picture is open.
I had the idea to use Core.copyMakeBorder() in Opencv, to add some space to the picture. After that I tried to use Imgproc.approxPolyDP() to close the contour, but this doesn't work. I tried this with different Epsilon values: Pics EDIT: Can't post more than 2 links
The reason for that is maybe that the contour surrounds the line. It never closes the contour where i want it to do.
I tried another method using Imgproc.convexHull(), which delivers this one: ConvexHull.
This could be useful for me, but i have no idea how to take out the part of the convex hull i need and merge it together with the contour to close it.
I hope that someone has an idea.
Here is my method for Imgproc.approxPolyDP()
public static ArrayList<MatOfPoint> makeComplete(Mat mat) {
System.out.println("makeComplete: START");
Mat dst = new Mat();
Core.copyMakeBorder(mat, dst, 10, 10, 10, 10, Core.BORDER_CONSTANT);
ArrayList<MatOfPoint> cnts = Tools.getContours(dst);
ArrayList<MatOfPoint2f> opened = new ArrayList<>();
//convert to MatOfPoint2f to use approxPolyDP
for (MatOfPoint m : cnts) {
MatOfPoint2f temp = new MatOfPoint2f(m.toArray());
opened.add(temp);
System.out.println("First loop runs");
}
ArrayList<MatOfPoint> closed = new ArrayList<>();
for (MatOfPoint2f conts : opened) {
MatOfPoint2f temp = new MatOfPoint2f();
Imgproc.approxPolyDP(conts, temp, 3, true);
MatOfPoint closedTemp = new MatOfPoint(temp.toArray());
closed.add(closedTemp);
System.out.println("Second loop runs");
}
System.out.println("makeComplete: END");
return closed;
}
And here the code for Imgproc.convexHull()
public static ArrayList<MatOfPoint> getConvexHull(Mat mat) {
Mat dst = new Mat();
Core.copyMakeBorder(mat, dst, 10, 10, 10, 10, Core.BORDER_CONSTANT);
ArrayList<MatOfPoint> cnts = Tools.getContours(dst);
ArrayList<MatOfPoint> out = new ArrayList<MatOfPoint>();
MatOfPoint mopIn = cnts.get(0);
MatOfInt hull = new MatOfInt();
Imgproc.convexHull(mopIn, hull, false);
MatOfPoint mopOut = new MatOfPoint();
mopOut.create((int) hull.size().height, 1, CvType.CV_32SC2);
for (int i = 0; i < hull.size().height; i++) {
int index = (int) hull.get(i, 0)[0];
double[] point = new double[]{
mopIn.get(index, 0)[0], mopIn.get(index, 0)[1]
};
mopOut.put(i, 0, point);
}
out.add(mopOut);
return out;
}
Best regards,
Brk
Assuming the assumption is correct, that the last row (for column it is similar) isn't converting (i.e. missing), then try the following. Assume x goes from left to right and y from top to bottom. We add one row of empty (white?) pixels at the image bottom and then go from left to right. Below is pseudo code:
// EMPTY - value of backgroung e.g. white for the sample image
PixelType curPixel = EMPTY;
int y = height - 1; // last row, the one we added
for (int x = 0; x < width; ++x)
{
// img(y,x) - current pixel, is "empty"
// img (y-1, x) - pixel above the current
if (img(y-1, x) != img(y, x))
{
// pixel above isn't empty, so we make current pixel non-empty
img(y, x) = img(y-1, x);
// if we were drawing, then stop, otherwise - start
if (curPixel == EMPTY)
curPixel = img(y-1, x);
else
curPixel = EMPTY;
}
else
{
img(y, x) = curPixel;
}
}

OpenCV detecting largest rectangle yields puzzling results

My aim is to detect the largest rectangle in an image, whether its skewed or not. After some research and googling I came up with a code that theoretically should work, however in half of the cases I see puzzling results.
I used OpenCV for Android, here is the Code:
private void find_parallels() {
Utils.bitmapToMat(selectedPicture,img);
Mat temp = new Mat();
Imgproc.resize(img,temp,new Size(640,480));
img = temp.clone();
Mat imgGray = new Mat();
Imgproc.cvtColor(img,imgGray,Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(imgGray,imgGray,new Size(5,5),0);
Mat threshedImg = new Mat();
Imgproc.adaptiveThreshold(imgGray,threshedImg,255,Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,Imgproc.THRESH_BINARY,11,2);
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Mat imageContours = imgGray.clone();
Imgproc.cvtColor(imageContours,imageContours,Imgproc.COLOR_GRAY2BGR);
Imgproc.findContours(threshedImg,contours,hierarchy,Imgproc.RETR_TREE,Imgproc.CHAIN_APPROX_SIMPLE);
max_area = 0;
int num = 0;
for (int i = 0; i < contours.size(); i++) {
area = Imgproc.contourArea(contours.get(i));
if (area > 100) {
MatOfPoint2f mop = new MatOfPoint2f(contours.get(i).toArray());
peri = Imgproc.arcLength(mop, true);
Imgproc.approxPolyDP(mop, approx, 0.02 * peri, true);
if(area > max_area && approx.toArray().length == 4) {
biggest = approx;
num = i;
max_area = area;
}
}
}
selectedPicture = Bitmap.createBitmap(640,480, Bitmap.Config.ARGB_8888) ;
Imgproc.drawContours(img,contours,num,new Scalar(0,0,255));
Utils.matToBitmap(img, selectedPicture);
imageView1.setImageBitmap(selectedPicture);}
In some cases it works excellent as can be seen in this image(See the white line between monitor bezel and screen.. sorry for the color):
Example that works:
However when in this image, and most images where the screen is greyish it gives crazy result.
Example that doesn't work:
Try use morphology, dilate and then erode with same kernel should make it better.
Or use pyrDown + pyrUp, or just blur it.
In short use low-pass filter class of methods, because your object of interest is much larger than noise.

Illumination Normalization not returning expected results

I am using OpenCV4Android to process my images. I wanted to preform Illumination Normalization which was linked to me with this work:
http://lear.inrialpes.fr/pubs/2007/TT07/Tan-amfg07a.pdf
furthermore I was given COMPLETE IMPLEMENTATION in C++ (OpenCV):
https://github.com/bytefish/opencv/blob/master/misc/tan_triggs.cpp
I tried to rewrite this code do Java, but I think there might be mistake somewhere. So, what I get from this alghorithm is close but not good enough. Check the expected results on the PDF above on page for example 12. And this is what i get:
https://dl.dropboxusercontent.com/u/108321090/a1.png
https://dl.dropboxusercontent.com/u/108321090/Screenshot_2013-12-31-14-09-25.png
So there is still too much noise between background and face features, but I think it's my fault here. This is my code:
//GET IMAGE URI
Uri selectedImage = imageReturnedIntent.getData();
//CREATE BITMAP FROM IT
BitmapFactory.Options bmpFactoryOptions = new BitmapFactory.Options();
bmpFactoryOptions.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap bmp = BitmapFactory.decodeStream(getContentResolver().openInputStream(selectedImage),
null, bmpFactoryOptions);
//CREATE OPENCV MAT OBJECT
Mat imageMat = new Mat();
Utils.bitmapToMat(bmp, imageMat);
//CONVERT TO GRAYSCALE
Mat grayMat = new Mat();
Imgproc.cvtColor(imageMat, grayMat, Imgproc.COLOR_BGR2GRAY);
//CUT OUT FACE FROM WHOLE IMAGE
(...) face detection cascades localize face and writes the region where face is located
in array, then I create mat with only face in it:
Mat cleanFaceMatGRAY = new Mat();
cleanFaceMatGRAY = new Mat(faceDetectMatGRAY, facesArray[0]);
//PROCESSING OF MAT WITH FACE (alghorithm from PDF & .cpp file)
Mat I = tan_triggs_preprocessing(cleanFaceMatGRAY);
Core.normalize(I, I,0, 255, Core.NORM_MINMAX, CvType.CV_8UC1);
//DISPLAY MAT IN IMAGEVIEW
ivPickedPhoto.setImageBitmap(AppTools.createBitmapFromMat(I, Bitmap.Config.ARGB_8888));
And method with algorithm (as u can see its total copy-paste from .cpp file with edited/rewrited methods to OpenCV4Android):
private Mat tan_triggs_preprocessing(Mat image) {
float alpha = 0.1f;
float tau = 10.0f;
float gamma = 0.2f;
int sigma0 = 1;
int sigma1 = 2;
// Convert to floating point:
Mat X = image;
X.convertTo(X, CvType.CV_32FC1);
// Start preprocessing:
Mat I = new Mat();
Core.pow(X, gamma, I);
// Calculate the DOG Image:
{
Mat gaussian0 = new Mat();
Mat gaussian1 = new Mat();
// Kernel Size:
int kernel_sz0 = (3*sigma0);
int kernel_sz1 = (3*sigma1);
// Make them odd for OpenCV:
kernel_sz0 += ((kernel_sz0 % 2) == 0) ? 1 : 0;
kernel_sz1 += ((kernel_sz1 % 2) == 0) ? 1 : 0;
Size ksize1 = new Size(kernel_sz0,kernel_sz0);
Size ksize2 = new Size(kernel_sz1,kernel_sz1);
Imgproc.GaussianBlur(I, gaussian0, ksize1, sigma0, sigma0, Imgproc.BORDER_CONSTANT);
Imgproc.GaussianBlur(I, gaussian1, ksize2, sigma1, sigma1, Imgproc.BORDER_CONSTANT);
Core.subtract(gaussian0, gaussian1, I);
}
{
double meanI = 0.0;
{
Mat tmp = new Mat();
Mat abstmp = new Mat();
Core.absdiff(I, new Scalar(0), abstmp);
Core.pow(abstmp, alpha, tmp);
meanI = Core.mean(tmp).val[0];
}
Core.divide( Math.pow(meanI, 1.0/alpha), I, I);
}
{
double meanI = 0.0;
{
Mat tmp = new Mat();
Mat abstmp = new Mat();
Mat mintmp = new Mat();
Core.absdiff(I, new Scalar(0), abstmp);
Core.min(abstmp, new Scalar(tau), mintmp);
Core.pow(mintmp, alpha, tmp);
meanI = Core.mean(tmp).val[0];
}
Core.divide( Math.pow(meanI, 1.0/alpha), I, I);
}
// Squash into the tanh:
{
for(int r = 0; r < I.rows(); r++) {
for(int c = 0; c < I.cols(); c++) {
I.get(r,c)[0] = Math.tanh(I.get(r,c)[0]) / tau;
}
}
Core.multiply(I,new Scalar(tau), I);
}
return I;
}
And what I didn't understand while I was rewriting this code was the iteration over the matrix. In .cpp there was
I.at<float>(r,c)
Where I have replaced it with just:
I.get(r,c)[0]
Do you think I might have lost some data here so thats why image is shady?

Categories

Resources