I am trying to extract a table row containing a filled rectangle in an image file using openCV. I have used findcontours and boundingrect. Disclaimer: I am completely new to opencv and image processing, so this might not be an optimal solution.
This is what I have done so far, it is getting me all the tables in the image including the row i want. How can i filter to just that row?
Imgcodecs imageCodecs = new Imgcodecs();
Mat sourceMat = imageCodecs.imread("image.png");
Mat grayMat = imageCodecs.imread("image.png");
Mat threshold = imageCodecs.imread("image.png");
Mat threshold1 = imageCodecs.imread("image.png");
Imgproc.cvtColor(sourceMat, grayMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(grayMat, threshold, 70, 255, Imgproc.THRESH_BINARY_INV);
Imgproc.threshold(grayMat, threshold1, 270, 255, Imgproc.THRESH_BINARY);
Core.bitwise_not(grayMat, threshold);
Imgcodecs imgcodecs1 = new Imgcodecs();
imgcodecs1.imwrite("imagethreshold.png", threshold);
List<MatOfPoint> whiteContours = new ArrayList<>();
MatOfPoint heirarchy = new MatOfPoint();
Imgproc.findContours(threshold.clone(), whiteContours, heirarchy, Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);
int count = 0;
// find appropriate bounding rectangles
for (int i = 0; i < whiteContours.size(); i++) {
RotatedRect boundingRect = Imgproc.minAreaRect(new MatOfPoint2f(whiteContours.get(i).toArray()));
Point rotated_rect_points[] = new Point[4];
boundingRect.points(rotated_rect_points);
Rect rect = Imgproc.boundingRect(new MatOfPoint(rotated_rect_points));
Mat roiMat = sourceMat.submat(rect);
if (rect.area()>15000 && heirarchy.get(0,i) != null) {
// checking if heirarchy has parent, next or previous contour is -1
if(heirarchy.get(0,i)[3]!=-1 && heirarchy.get(0,i)[0] !=-1 && heirarchy.get(0,i)[1] ==-1){
// write to image file
Imgcodecs imgcodecs = new Imgcodecs();
imgcodecs.imwrite("image" + count + ".png", roiMat);
count++;
}
}
}```
Related
I have to do develop a similar algorithm as in Remove top section of image above border line to detect text document, but in Java 1.8 using JavaCV.
The method signature in Python is
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
However in Java it appears to be:
MatVector mt = new MatVector();
findContours(dst, mt, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
I'm stuck into finding the contours and sorting them from biggest to lowest. How do I go about sorting from biggest to lower contours?
My code:
Mat image = imread(imagePath);
Mat gray = new Mat();
cvtColor(mat, gray, COLOR_BGR2GRAY);
Mat grayImg = convertToGray(mat);
GaussianBlur(grayImg, grayImg, new Size(3, 3), 0);
Mat dst = new Mat();
threshold(grayImg, dst, 0, 255,THRESH_BINARY + THRESH_OTSU);
// Find contours and sort for largest contour
MatVector mt = new MatVector();
findContours(dst, mt, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
How to access contours suggestion from https://github.com/bytedeco/javacv/issues/1270:
// accessing contours
MatVector contours = ...
for (int i = 0; i < contours.size(); ++i) {
IntIndexer points = contours.get(i).createIndexer();
int size = (int) points.size(0); // points are stored in a Mat with a single column and multiple rows, since size(0), each element has two channels - for x and y - so the type is CV_32SC2 for integer points
for (int j = 0; j < size; ++j) {
int x = points.get(2 * j);
int y = points.get(2 * j + 1);
// do something with x and y
}
}
Thank you
As #fmw42 said, I refactored the code to look into the contourArea().
See below,
Mat mask = new Mat();
Mat gray = new Mat();
Mat denoised = new Mat();
Mat bin = new Mat();
Mat hierarchy = new Mat();
MatVector contours = new MatVector();
cvtColor(mat, gray, COLOR_BGR2GRAY);
//Normalize
GaussianBlur(gray, denoised, new Size(5, 5), 0);
threshold(denoised, mask, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
normalize(gray, gray, 0, 255, NORM_MINMAX, -1, mask);
// Convert image to binary
threshold(gray, bin, 150, 255, THRESH_BINARY);
// Find contours
findContours(bin, contours, hierarchy, RETR_TREE, CHAIN_APPROX_NONE);
long contourCount = contours.size();
System.out.println("Countour count " + contourCount);
double maxArea = 0;
int maxAreaId = 0;
for (int i = 0; i < contourCount; ++i) {
// Calculate the area of each contour
Mat contour = contours.get(i);
double area = contourArea(contour);
if(area > maxArea){
maxAreaId = i;
maxArea = area;
}
}
I have a text image and I have found the contours for every word. Now I want to crop all the contours in the order they appear in the text. I have tried to sort the contours from left to right but did not succeed. How do I do this?
What I am getting in result is the word images in random order but I want them in the same order as in the image.
Mat finalMat = mats[1];
Mat mat2 = mats[0];
Mat gray = new Mat(finalMat.rows(),finalMat.height(),CvType.CV_8UC1);
Imgproc.cvtColor(finalMat,gray,Imgproc.COLOR_BGR2GRAY);
Mat mat = new Mat();
Imgproc.dilate(mat2, mat, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(12, 5)));
Mat hierarchy = new Mat();
List<MatOfPoint> contours = new ArrayList<>();
Imgproc.findContours(mat,contours,hierarchy,Imgproc.RETR_EXTERNAL,
Imgproc.CHAIN_APPROX_SIMPLE);
Collections.sort(contours, new Comparator<MatOfPoint>() {
#Override
public int compare(MatOfPoint o1, MatOfPoint o2) {
Rect rect1 = Imgproc.boundingRect(o1);
Rect rect2 = Imgproc.boundingRect(o2);
int result = 0;
double total = rect1.tl().y/rect2.tl().y;
if (total>=0.9 && total<=1.4 ){
result = Double.compare(rect1.tl().x, rect2.tl().x);
}
return result;
}
});
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++) {
if (Imgproc.contourArea(contours.get(contourIdx)) > 50 ){
Rect rect = Imgproc.boundingRect(contours.get(contourIdx));
if (rect.height > 28){
Mat crop;
if((rect.y + rect.height+15)<gray.rows() && rect.y-3>1)
{
Imgproc.rectangle(finalMat, new Point(rect.x,rect.y-3), new Point(rect.x+rect.width,rect.y-3+rect.height+15),new Scalar(255,0,0));
crop = gray.submat(rect.y-3, rect.y-3 + rect.height+15, rect.x, rect.x + rect.width);
}
else {
Imgproc.rectangle(finalMat, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(255,0,0));
crop = gray.submat(rect.y, rect.y + rect.height, rect.x, rect.x + rect.width);
}
Bitmap bmp = Bitmap.createBitmap(crop.width(), crop.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(crop, bmp);
MediaStore.Images.Media.insertImage(getContentResolver(), bmp, contourIdx+".jpg" , null);
}
}
}
How can I detect the four corner points of the biggest square (at center of the image) using opencv in java
I have solved this using findContours.
Original Image
Output Image
Please find the code below. I don't now how to detect the end points of center square. I tried to detect lines using HoughLinesP but it is returning only 1 verticle line instead of giving all the 4 lines.
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
String path = "/Users/saurabhsaluja/Desktop/cimg.jpg";
Mat img = Imgcodecs.imread(path);
Mat destination = new Mat(img.rows(),img.cols(),img.type());
Core.addWeighted(img, 1.3, destination, -0.7, 0, destination);
Mat cannyOutput = new Mat();
int threshold = 15;
Mat srcGray = new Mat();
Imgproc.cvtColor(destination, srcGray, Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(srcGray, cannyOutput, threshold, threshold* 4);
Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(10,10));
Mat element2 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(10,10));
Imgproc.dilate(cannyOutput, cannyOutput, element);
Imgproc.dilate(cannyOutput, cannyOutput, element2);
element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
element2 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
Imgproc.erode(cannyOutput, cannyOutput, element);
Imgproc.erode(cannyOutput, cannyOutput, element2);
Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/cannyOutput.jpg", cannyOutput); //THE IMAGE YOU ARE LOOKING AT
Mat lines = new Mat();
Imgproc.HoughLinesP(cannyOutput, lines, 1, Math.PI / 180, 50, 20, 20);
for(int i = 0; i < lines.cols(); i++) {
double[] val = lines.get(0, i);
Imgproc.line(img, new Point(val[0], val[1]), new Point(val[2], val[3]), new Scalar(0, 0, 255), 2);
}
Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/finalimg.jpg", img);
Solution:
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(cannyOutput, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double inf = 0;
Rect max_rect = null;
for(int i=0; i< contours.size();i++){
Rect rect = Imgproc.boundingRect(contours.get(i));
double area = rect.area();
if(inf < area) {
max_rect = rect;
inf = area;
//Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/input"+i+".jpg", img);
}
if(area > 50000) {
System.out.println(area);
Imgproc.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(0,0,0),5);
}
}
Now just get the biggest by looking area of each counter.
Thanks.
Solution Image:
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(cannyOutput, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double inf = 0;
Rect max_rect = null;
for(int i=0; i< contours.size();i++){
Rect rect = Imgproc.boundingRect(contours.get(i));
double area = rect.area();
if(inf < area) {
max_rect = rect;
inf = area;
//Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/input"+i+".jpg", img);
}
if(area > 50000) {
System.out.println(area);
Imgproc.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(0,0,0),5);
}
}
Output:
I'm trying to get this rectangle from that image:
Found this solution using OpenCV:
private Bitmap findRectangle(Bitmap src) throws Exception {
Mat imageMat = new Mat();
Utils.bitmapToMat(src, imageMat);
Mat imgSource=imageMat.clone();
Imgproc.cvtColor(imgSource, imageMat, Imgproc.COLOR_BGR2GRAY);
//find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imageMat, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_NONE);
Imgproc.Canny(imageMat,imageMat,0,255);
Bitmap canny=Bitmap.createBitmap(imageMat.cols(),imageMat.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(imageMat,canny);
Imgproc.GaussianBlur(imageMat, imageMat, new org.opencv.core.Size(1, 1), 2, 2);
Bitmap blur=Bitmap.createBitmap(imageMat.cols(),imageMat.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(imageMat,blur);
MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
//check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
int contourSize = (int)temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize*0.05, true);
if (approxCurve_temp.total() == 4) {
MatOfPoint points = new MatOfPoint( approxCurve_temp.toArray() );
Rect rect = Imgproc.boundingRect(points);
Imgproc.rectangle(imgSource, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height), new Scalar(255, 0, 0, 255), 3);
}
}
Bitmap analyzed=Bitmap.createBitmap(imgSource.cols(),imgSource.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(imgSource,analyzed);
return analyzed;
}
The best i got was this:
The problem is that the rectangle isn't perfect, maybe find the white numbers inside of that can be a best option, but i don't know too much of OpenCV.
Original image:
This is a very simple C++ implementation which tries to search for the text box. The accuracy of the detection depends on three parameters:
The thresh value provided to cv::threshold function to convert gray image to binary.
The height/width ratio, since the height of the text box is relatively smaller than the width, and the area of the text box.
Mat img = imread("image.jpg",-1), gray, binary;
/*pre-processing steps*/
uchar thresh = 80;
cvtColor(img, gray, cv::COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(7,7), 0);
// change the thresh value to fine tune this program for your images
threshold(gray, binary, thresh, 255, cv::THRESH_BINARY_INV);
/*contour searching*/
std::vector<std::vector<Point>> contours;
std::vector<Vec4i> hierarchy;
findContours(binary, contours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
/*Filtering contours based on height/width ratio and bounding box area*/
std::vector<Rect> boxes;
double box_ratio = 0.3;
int box_area = 20000;
for(auto& cnt : contours)
{
auto box = minAreaRect(cnt).boundingRect();
// we are searching for a rectangle which a has relatively large area,
// and the height is smaller than the width, so the
// height/width ratio should be small. Change the these two values for fine tuning
if((min(box.width,box.height)/double(max(box.width,box.height)) < box_ratio) && box.area() > box_area )
{
boxes.push_back(box);
}
}
Mat txt_box = img(boxes.at(0));
Here is the almost same solution on java:
private Bitmap findRoi(Bitmap sourceBitmap) {
Mat sourceMat = new Mat(sourceBitmap.getWidth(), sourceBitmap.getHeight(), CV_8UC3);
Utils.bitmapToMat(sourceBitmap, sourceMat);
Mat grayMat = new Mat(sourceBitmap.getWidth(), sourceBitmap.getHeight(), CV_8UC3);
Imgproc.cvtColor(sourceMat, grayMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(grayMat, grayMat, 125, 200, Imgproc.THRESH_BINARY);
// find contours
List<MatOfPoint> whiteContours = new ArrayList<>();
Rect largestRect = null;
Imgproc.findContours(grayMat, whiteContours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// find appropriate bounding rectangles
for (MatOfPoint contour : whiteContours) {
RotatedRect boundingRect = Imgproc.minAreaRect(new MatOfPoint2f(contour.toArray()));
double rectangleArea = boundingRect.size.area();
// test min ROI area in pixels
if (rectangleArea > 10000) {
Point rotated_rect_points[] = new Point[4];
boundingRect.points(rotated_rect_points);
Rect rect = Imgproc.boundingRect(new MatOfPoint(rotated_rect_points));
// test horizontal ROI orientation and aspect ratio
if (rect.width > 3 * rect.height) {
if (largestRect == null) {
largestRect = rect;
} else {
if (rect.width > largestRect.width) {
largestRect = rect;
}
}
}
}
}
Mat roiMat = new Mat(sourceMat, largestRect);
Bitmap bitmap = Bitmap.createBitmap(roiMat.cols(), roiMat.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(roiMat, bitmap);
return bitmap;
}
Also, you can use additional information: red number places on the right.
I am using OpenCV4Android to process my images. I wanted to preform Illumination Normalization which was linked to me with this work:
http://lear.inrialpes.fr/pubs/2007/TT07/Tan-amfg07a.pdf
furthermore I was given COMPLETE IMPLEMENTATION in C++ (OpenCV):
https://github.com/bytefish/opencv/blob/master/misc/tan_triggs.cpp
I tried to rewrite this code do Java, but I think there might be mistake somewhere. So, what I get from this alghorithm is close but not good enough. Check the expected results on the PDF above on page for example 12. And this is what i get:
https://dl.dropboxusercontent.com/u/108321090/a1.png
https://dl.dropboxusercontent.com/u/108321090/Screenshot_2013-12-31-14-09-25.png
So there is still too much noise between background and face features, but I think it's my fault here. This is my code:
//GET IMAGE URI
Uri selectedImage = imageReturnedIntent.getData();
//CREATE BITMAP FROM IT
BitmapFactory.Options bmpFactoryOptions = new BitmapFactory.Options();
bmpFactoryOptions.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap bmp = BitmapFactory.decodeStream(getContentResolver().openInputStream(selectedImage),
null, bmpFactoryOptions);
//CREATE OPENCV MAT OBJECT
Mat imageMat = new Mat();
Utils.bitmapToMat(bmp, imageMat);
//CONVERT TO GRAYSCALE
Mat grayMat = new Mat();
Imgproc.cvtColor(imageMat, grayMat, Imgproc.COLOR_BGR2GRAY);
//CUT OUT FACE FROM WHOLE IMAGE
(...) face detection cascades localize face and writes the region where face is located
in array, then I create mat with only face in it:
Mat cleanFaceMatGRAY = new Mat();
cleanFaceMatGRAY = new Mat(faceDetectMatGRAY, facesArray[0]);
//PROCESSING OF MAT WITH FACE (alghorithm from PDF & .cpp file)
Mat I = tan_triggs_preprocessing(cleanFaceMatGRAY);
Core.normalize(I, I,0, 255, Core.NORM_MINMAX, CvType.CV_8UC1);
//DISPLAY MAT IN IMAGEVIEW
ivPickedPhoto.setImageBitmap(AppTools.createBitmapFromMat(I, Bitmap.Config.ARGB_8888));
And method with algorithm (as u can see its total copy-paste from .cpp file with edited/rewrited methods to OpenCV4Android):
private Mat tan_triggs_preprocessing(Mat image) {
float alpha = 0.1f;
float tau = 10.0f;
float gamma = 0.2f;
int sigma0 = 1;
int sigma1 = 2;
// Convert to floating point:
Mat X = image;
X.convertTo(X, CvType.CV_32FC1);
// Start preprocessing:
Mat I = new Mat();
Core.pow(X, gamma, I);
// Calculate the DOG Image:
{
Mat gaussian0 = new Mat();
Mat gaussian1 = new Mat();
// Kernel Size:
int kernel_sz0 = (3*sigma0);
int kernel_sz1 = (3*sigma1);
// Make them odd for OpenCV:
kernel_sz0 += ((kernel_sz0 % 2) == 0) ? 1 : 0;
kernel_sz1 += ((kernel_sz1 % 2) == 0) ? 1 : 0;
Size ksize1 = new Size(kernel_sz0,kernel_sz0);
Size ksize2 = new Size(kernel_sz1,kernel_sz1);
Imgproc.GaussianBlur(I, gaussian0, ksize1, sigma0, sigma0, Imgproc.BORDER_CONSTANT);
Imgproc.GaussianBlur(I, gaussian1, ksize2, sigma1, sigma1, Imgproc.BORDER_CONSTANT);
Core.subtract(gaussian0, gaussian1, I);
}
{
double meanI = 0.0;
{
Mat tmp = new Mat();
Mat abstmp = new Mat();
Core.absdiff(I, new Scalar(0), abstmp);
Core.pow(abstmp, alpha, tmp);
meanI = Core.mean(tmp).val[0];
}
Core.divide( Math.pow(meanI, 1.0/alpha), I, I);
}
{
double meanI = 0.0;
{
Mat tmp = new Mat();
Mat abstmp = new Mat();
Mat mintmp = new Mat();
Core.absdiff(I, new Scalar(0), abstmp);
Core.min(abstmp, new Scalar(tau), mintmp);
Core.pow(mintmp, alpha, tmp);
meanI = Core.mean(tmp).val[0];
}
Core.divide( Math.pow(meanI, 1.0/alpha), I, I);
}
// Squash into the tanh:
{
for(int r = 0; r < I.rows(); r++) {
for(int c = 0; c < I.cols(); c++) {
I.get(r,c)[0] = Math.tanh(I.get(r,c)[0]) / tau;
}
}
Core.multiply(I,new Scalar(tau), I);
}
return I;
}
And what I didn't understand while I was rewriting this code was the iteration over the matrix. In .cpp there was
I.at<float>(r,c)
Where I have replaced it with just:
I.get(r,c)[0]
Do you think I might have lost some data here so thats why image is shady?