To find the homography between 2 pictures I've used OpenCV 2.4.8, java version but got the following problem:
I can find more than 2 thousands matches but the findHomography function returns some negative values. The statement for the homography is the following:
Mat homography = Calib3d.findHomography(goodReferencePoints, goodScenePoints);
And as you can see in my terminal, the homography returns these negative values:
H= [0.0728467050846091, -1.778341899752422, 523.4269328629535;
0.05293440263461851, -1.283649810597391, 377.7784434651413;
0.0001384401936905583, -0.003397194330027168, 1]
The 1 at the end seems to me to be correct, but these is the only value in these matrix that it is correct :/
Any idea why? How can I get a better Matrix?
Thank you in advance
My code:
private void findCorners() {
Mat img_matches = new Mat();
List<DMatch> matchesList = matches.toList();
if (matchesList.size() < 4) {
// There are too few matches to find the homography.
System.err.println("There are too few matches to find the homography.");
return;
}
List<KeyPoint> object_keypoints_list = object_keypoints.toList();
List<KeyPoint> scene_keypoints_list = scene_keypoints.toList();
this.object_good_keypoints = new ArrayList<KeyPoint>();
this.scene_good_keypoints = new ArrayList<KeyPoint>();
// Calculate the max and min distances between keypoints.
double maxDist = 0.0;
double minDist = Double.MAX_VALUE;
for(DMatch match : matchesList) {
double dist = match.distance;
if (dist < minDist) {
minDist = dist;
}
if (dist > maxDist) {
maxDist = dist;
}
}
// The thresholds for minDist are chosen subjectively
// based on testing. The unit is not related to pixel
// distances; it is related to the number of failed tests
// for similarity between the matched descriptors.
if (minDist > 50.0) {
// The target is completely lost.
// Discard any previously found corners.
scene_corners.create(0, 0, scene_corners.type());
return;
} else if (minDist > 20.0) {
// The target is lost but maybe it is still close.
// Keep any previously found corners.
System.err.println("The target is lost but maybe it is still close. Keep any previously found corners.");
return;
}
// Identify "good" keypoints based on match distance.
ArrayList<Point> goodReferencePointsList = new ArrayList<Point>();
ArrayList<Point> goodScenePointsList = new ArrayList<Point>();
double maxGoodMatchDist = 1.75 * minDist;
for(DMatch match : matchesList) {
if (match.distance < maxGoodMatchDist) {
goodReferencePointsList.add(
object_keypoints_list.get(match.queryIdx).pt);
goodScenePointsList.add(
scene_keypoints_list.get(match.trainIdx).pt);
this.object_good_keypoints.add(object_keypoints_list.get(match.queryIdx));
this.scene_good_keypoints.add(scene_keypoints_list.get(match.trainIdx));
}
}
if (goodReferencePointsList.size() < 4 ||
goodScenePointsList.size() < 4) {
// There are too few good points to find the homography.
System.err.println("There are too few good points to find the homography.");
return;
}
System.out.println("Match found");
MatOfPoint2f goodReferencePoints = new MatOfPoint2f();
goodReferencePoints.fromList(goodReferencePointsList);
MatOfPoint2f goodScenePoints = new MatOfPoint2f();
goodScenePoints.fromList(goodScenePointsList);
System.out.println("goodReferencePoints size ="+goodReferencePoints.size());
System.out.println("goodScenePoints size ="+goodScenePoints.size());
Mat homography = Calib3d.findHomography(goodReferencePoints, goodScenePoints);
System.out.println("homography = "+homography.dump());
object_corners = new Mat(4, 1, CvType.CV_32FC2);
scene_corners = new Mat(4, 1, CvType.CV_32FC2);
object_corners.put(0, 0, new double[] { 0, 0 });
object_corners.put(1, 0, new double[] { object_image.cols(), 0 });
object_corners.put(2, 0, new double[] { object_image.cols(), object_image.rows() });
object_corners.put(3, 0, new double[] { 0, object_image.rows() });
Core.perspectiveTransform(object_corners, scene_corners, homography);
print_corners();
Point p0 = new Point(scene_corners.get(0, 0));
Point p1 = new Point(scene_corners.get(1, 0));
Point p2 = new Point(scene_corners.get(2, 0));
Point p3 = new Point(scene_corners.get(3, 0));
int offset = (int) new Point(object_corners.get(1, 0)).x;
System.out.println("Matches size = "+matches.size());
MatOfKeyPoint object_good_kp = new MatOfKeyPoint();
object_good_kp.fromList(this.object_good_keypoints);
MatOfKeyPoint scene_good_kp = new MatOfKeyPoint();
scene_good_kp.fromList(this.scene_good_keypoints);
System.out.println("object kps = "+this.object_keypoints.size());
System.out.println("object good kps = "+this.object_good_keypoints.size());
System.out.println("scene kps = "+this.scene_keypoints.size());
System.out.println("scene good kps = "+this.scene_good_keypoints.size());
Features2d.drawMatches(object_image, object_keypoints, scene_image, scene_keypoints, matches, img_matches, new Scalar(255, 0, 0),
new Scalar(0, 0, 255), new MatOfByte(), 2);
Core.line(img_matches, new Point(p0.x+offset, p0.y), new Point(p1.x+offset, p1.y), COLOR_GREEN, 4);
Core.line(img_matches, new Point(p1.x+offset, p1.y), new Point(p2.x+offset, p2.y), COLOR_GREEN, 4);
Core.line(img_matches, new Point(p2.x+offset, p2.y), new Point(p3.x+offset, p3.y), COLOR_GREEN, 4);
Core.line(img_matches, new Point(p3.x+offset, p3.y), new Point(p0.x+offset, p0.y), COLOR_GREEN, 4);
System.out.println(String.format("-- Writing result Mat in %s", pathResult));
Highgui.imwrite(pathResult, img_matches);
}
And here is the terminal output where we can see that the homography matrix has some negative values:
Match found
goodReferencePoints size =1x10
goodScenePoints size =1x10
homography = [0.0728467050846091, -1.778341899752422, 523.4269328629535;
0.05293440263461851, -1.283649810597391, 377.7784434651413;
0.0001384401936905583, -0.003397194330027168, 1]
-- Object-Point 0 [0,0]
-- Object-Point 1 [259,0]
-- Object-Point 2 [259,878]
-- Object-Point 3 [0,878]
-- Scene-Point 0 [523.43,377.78]
-- Scene-Point 1 [523.52,377.94]
-- Scene-Point 2 [523.45,377.81]
-- Scene-Point 3 [523.5,377.89]
Matches size = 1x2341
object kps = 1x2341
object good kps = 10
scene kps = 1x2576
scene good kps = 10
On the picture we can see that the matches are drawn but only one green point is there. It should be the contour in green, not just one point.
And the picture:
Related
I'm currently trying to close the contours on the right of this picture:
Sample.
The reason for the opened contour lies in kabeja, a library to convert DXF files to images. It seems that on some images it doesn't convert the last pixel column (or row) and that's why the Sample picture is open.
I had the idea to use Core.copyMakeBorder() in Opencv, to add some space to the picture. After that I tried to use Imgproc.approxPolyDP() to close the contour, but this doesn't work. I tried this with different Epsilon values: Pics EDIT: Can't post more than 2 links
The reason for that is maybe that the contour surrounds the line. It never closes the contour where i want it to do.
I tried another method using Imgproc.convexHull(), which delivers this one: ConvexHull.
This could be useful for me, but i have no idea how to take out the part of the convex hull i need and merge it together with the contour to close it.
I hope that someone has an idea.
Here is my method for Imgproc.approxPolyDP()
public static ArrayList<MatOfPoint> makeComplete(Mat mat) {
System.out.println("makeComplete: START");
Mat dst = new Mat();
Core.copyMakeBorder(mat, dst, 10, 10, 10, 10, Core.BORDER_CONSTANT);
ArrayList<MatOfPoint> cnts = Tools.getContours(dst);
ArrayList<MatOfPoint2f> opened = new ArrayList<>();
//convert to MatOfPoint2f to use approxPolyDP
for (MatOfPoint m : cnts) {
MatOfPoint2f temp = new MatOfPoint2f(m.toArray());
opened.add(temp);
System.out.println("First loop runs");
}
ArrayList<MatOfPoint> closed = new ArrayList<>();
for (MatOfPoint2f conts : opened) {
MatOfPoint2f temp = new MatOfPoint2f();
Imgproc.approxPolyDP(conts, temp, 3, true);
MatOfPoint closedTemp = new MatOfPoint(temp.toArray());
closed.add(closedTemp);
System.out.println("Second loop runs");
}
System.out.println("makeComplete: END");
return closed;
}
And here the code for Imgproc.convexHull()
public static ArrayList<MatOfPoint> getConvexHull(Mat mat) {
Mat dst = new Mat();
Core.copyMakeBorder(mat, dst, 10, 10, 10, 10, Core.BORDER_CONSTANT);
ArrayList<MatOfPoint> cnts = Tools.getContours(dst);
ArrayList<MatOfPoint> out = new ArrayList<MatOfPoint>();
MatOfPoint mopIn = cnts.get(0);
MatOfInt hull = new MatOfInt();
Imgproc.convexHull(mopIn, hull, false);
MatOfPoint mopOut = new MatOfPoint();
mopOut.create((int) hull.size().height, 1, CvType.CV_32SC2);
for (int i = 0; i < hull.size().height; i++) {
int index = (int) hull.get(i, 0)[0];
double[] point = new double[]{
mopIn.get(index, 0)[0], mopIn.get(index, 0)[1]
};
mopOut.put(i, 0, point);
}
out.add(mopOut);
return out;
}
Best regards,
Brk
Assuming the assumption is correct, that the last row (for column it is similar) isn't converting (i.e. missing), then try the following. Assume x goes from left to right and y from top to bottom. We add one row of empty (white?) pixels at the image bottom and then go from left to right. Below is pseudo code:
// EMPTY - value of backgroung e.g. white for the sample image
PixelType curPixel = EMPTY;
int y = height - 1; // last row, the one we added
for (int x = 0; x < width; ++x)
{
// img(y,x) - current pixel, is "empty"
// img (y-1, x) - pixel above the current
if (img(y-1, x) != img(y, x))
{
// pixel above isn't empty, so we make current pixel non-empty
img(y, x) = img(y-1, x);
// if we were drawing, then stop, otherwise - start
if (curPixel == EMPTY)
curPixel = img(y-1, x);
else
curPixel = EMPTY;
}
else
{
img(y, x) = curPixel;
}
}
I am trying to find the location of image inside a specific area in another. I am using javacv to do this issue. But my code is giving an error when executing cvMatchTemplate function. I think I am miss using cvSetImageROI.
This is how I am using it:
public static void main(String c[]) {
IplImage src = cvLoadImage("test.jpg", 0);
IplImage tmp = cvLoadImage("tmp.png", 0);
IplImage result = cvCreateImage(cvSize(src.width() - tmp.width() + 1, src.height() - tmp.height() + 1),
IPL_DEPTH_32F,1);
cvZero(result);
cvSetImageROI(src, new CvRect(22, 50, 30, 30));
cvSetImageROI(result, new CvRect(22, 50, 30, 30));
//Match Template Function from OpenCV
cvMatchTemplate(src, tmp, result, CV_TM_CCORR_NORMED);
double[] min_val = new double[2];
double[] max_val = new double[2];
CvPoint minLoc = new CvPoint();
CvPoint maxLoc = new CvPoint();
cvMinMaxLoc(result, min_val, max_val, minLoc, maxLoc,
null);
CvPoint point = new CvPoint();
point.x(maxLoc.x() + tmp.width());
point.y(maxLoc.y() + tmp.height());
cvRectangle(src, maxLoc, point, CvScalar.RED, 2, 8, 0);
cvShowImage("Lena Image", src);
cvWaitKey(0);
cvReleaseImage(src);
cvReleaseImage(tmp);
cvReleaseImage(result);
}
This is the error:
OpenCV Error: Assertion failed (result.size() == cv::Size(std::abs(img.cols - templ.cols)
+ 1,std::abs(img.rows - templ.rows) + 1) && result.type() == CV_32F) in unknown function,
file ..\..\..\src\opencv\modules\imgproc\src\templmatch.cpp, line 384
Any Help?
Hey see this example that I did with Javacv
public class Test {
static Image image;
public static void main(String[] args) throws Exception {
int width = Integer.parseInt(args[3]);
int height = Integer.parseInt(args[4]);
IplImage src = cvLoadImage(
args[0], 0);
IplImage tmp = cvLoadImage(
args[1], 0);
IplImage result = cvCreateImage(
cvSize(src.width() - tmp.width() + 1,
src.height() - tmp.height() + 1), IPL_DEPTH_32F, src.nChannels());
cvZero(result);
// Match Template Function from OpenCV
cvMatchTemplate(src, tmp, result, CV_TM_CCORR_NORMED);
// double[] min_val = new double[2];
// double[] max_val = new double[2];
DoublePointer min_val = new DoublePointer();
DoublePointer max_val = new DoublePointer();
CvPoint minLoc = new CvPoint();
CvPoint maxLoc = new CvPoint();
cvMinMaxLoc(result, min_val, max_val, minLoc, maxLoc, null);
// Get the Max or Min Correlation Value
// System.out.println(Arrays.toString(min_val));
// System.out.println(Arrays.toString(max_val));
CvPoint point = new CvPoint();
point.x(maxLoc.x() + tmp.width());
point.y(maxLoc.y() + tmp.height());
// cvMinMaxLoc(src, min_val, max_val,0,0,result);
cvRectangle(src, maxLoc, point, CvScalar.RED, 2, 8, 0);// Draw a
// Rectangle for
// Matched
// Region
CvRect rect = new CvRect();
rect.x(maxLoc.x());
rect.y(maxLoc.y());
rect.width(tmp.width() + width);
rect.height(tmp.width() + height);
cvSetImageROI(src, rect);
IplImage imageNew = cvCreateImage(cvGetSize(src), src.depth(),
src.nChannels());
cvCopy(src, imageNew);
cvSaveImage(args[2], imageNew);
cvShowImage("Lena Image", src);
cvWaitKey(0);
cvReleaseImage(src);
cvReleaseImage(tmp);
cvReleaseImage(result);
}
full reference here
we need 4 default parameters like this
"C:\Users\Waldema\Desktop\bg.jpg" "C:\Users\Waldema\Desktop\logosiemens.jpg" "C:\Users\Waldema\Desktop\imageToFind.jpg" 100 200
configurable in the Run configurations of common IDEs.
I think that will help
I found the problem it was in setting the roi of the result image, this is the wrong line :
cvSetImageROI(result, new CvRect(22, 50, 30, 30));
It should be like this :
cvSetImageROI(result, new CvRect(22, 50, 30 - tmp.width() + 1, 30 - tmp.height() + 1));
I am not sure why but I think it is because cvMatchTemplate function takes the result dimension equal to the source dimensions minus the template dimensions plus one pixel.
I am writing function to find rectangles in Mat. But i am getting exception at mixChannels()function. My code is as follow. Can some one check and tell me what could be wrong in it ?I would also like to know how i can implement gray = gray0 >= (l+1)*255/N; in java or android ?
private void findSqaures(Mat sourceImage){
Vector<Point> sqares;
Mat pyr,timing ,gry =new Mat();
pyr=new Mat(sourceImage.size(),CvType.CV_8U);
timing=new Mat(sourceImage.size(),CvType.CV_8U);
int thresh = 50, N = 11;
List<Mat> grayO=new ArrayList<Mat>();
List<Mat> timing1=new ArrayList<Mat>();
Imgproc.pyrDown(sourceImage, pyr,new Size(sourceImage.cols()/2.0, sourceImage.rows()/2));
Imgproc.pyrUp(pyr, timing,sourceImage.size());
// Vector<Point> contours=new Vector<Point>();
timing1.add(0,pyr);
grayO.add(0,timing);
// grayO.add(0,timing);
for(int c=0;c<3;c++){
int ch[]={1,0};
MatOfInt fromto = new MatOfInt(ch);
Core.mixChannels(timing1, grayO, fromto); // Getting Exception here
// Core.mixChannels(src, dst, fromTo)
for(int i=0;i<N;i++){
Mat output=grayO.get(0);
if(i==0){
Imgproc.Canny(output, gry, 5, thresh);
Imgproc.dilate(gry, gry, new Mat(), new Point(-1,-1), 1);
}
else {
// output = output >= (i+1)*255/N;
}
// sourceImage=gry;
contours=new ArrayList<MatOfPoint>();
Imgproc.findContours(gry, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
MatOfPoint2f approxCurve = new MatOfPoint2f();
mDrawnContours.clear();
Log.i(TAG, "::findSqaures:" + "contours.size():"+contours.size());
for(int j=0;i<contours.size();j++){
MatOfPoint tempContour=contours.get(i);
MatOfPoint2f newMat = new MatOfPoint2f( tempContour.toArray() );
int contourSize = (int)tempContour.total();
Imgproc.approxPolyDP(newMat, approxCurve, contourSize*0.02, true);
MatOfPoint points=new MatOfPoint(approxCurve.toArray());
// if( approx.size() == 4 && fabs(contourArea(cv::Mat(approx))) > 1000 && cv::isContourConvex(cv::Mat(approx))) {
if(points.toArray().length==4 && (Math.abs(approxCurve.total())>1000) && Imgproc.isContourConvex(points)){
double maxCosine=0;
int k;
for( k=2;k<5;k++){
double cosine=Math.abs(angle(points.toArray()[k%4], points.toArray()[k-2], points.toArray()[k-1]));
if(maxCosine>cosine){
maxCosine=cosine;
}
}
Log.i(TAG, "::findSqaures:" + "maxCosine:"+maxCosine);
if(maxCosine<0.3){
DrawnContours drawnContours=new DrawnContours();
drawnContours.setIndex(k);
mDrawnContours.add(drawnContours);
}
}
}
Log.i(TAG, "::findSqaures:" + "mDrawnContours.size():"+mDrawnContours.size());
}
}
// Core.mixChannels(src, dst, fromTo)
}
The exception is *CvException [org.opencv.core.CvException: /home/reports/ci/slave_desktop/50-SDK/opencv/modules/core/src/matrix.cpp:3210: error: (-215) A.size == arrays[i0]->size in function void cv::NAryMatIterator::init(const cv::Mat, cv::Mat*, uchar*, int)**
Instead of the following
timing1.add(0,pyr);
grayO.add(0,timing);
Try this
timing1.add(pyr);
grayO.add(timing);
Instead of the below line
gry = output >= (i+1)*255/N;
You could use
Imgproc.threshold(output, gry, (l+1) * 255 / N, 255, Imgproc.THRESH_BINARY);
Also instead of using the pyr as the source, blur the image using medianBlur function, you would get better rectangle identification.
From
Core.mixChannels(timing1, grayO, fromto);
element of gray0 array and timing1 array should have the same size.
but pyr has half the size of timing so you've got an error.
Look again at the sample squares.cpp
source of mixChannels function should be the result Mat of pyrUp function
destination of mixChannels function should be a new empty Mat with the same size.
So, correct it with :
timing1.add(0,timing); // or timing1.add(timing)
grayO.add(0, new Mat(timing.size(), timing.type()) );
Regards,
Louis
I'm trying to automate a process where someone manually converts a code to a digital one.
Then I started reading about OCR. So I installed tesseract OCR and tried it on some images. It doesn't even detect something close to the code.
I figured after reading some questions on stackoverflow, that the images need some preprocessing like skewing the image to a horizontal one, which can been done by openCV for example.
Now my questions are:
What kind of preprocessing or other methods should be used in a case like the above image?
Secondly, can I rely on the output? Will it always work in cases like the above image?
I hope someone can help me!
I have decided to capture the whole card instead of the code only. By capturing the whole card it is possible to transform it to a plain perspective and then I could easily get the "code" region.
Also I learned a lot of things. Especially regarding speed. This function is slow on high resolution images. It can take up to 10 seconds with a size of 3264 x 1836.
What I did to speed things up, is re-sizing the input matrix by a factor of 1 / 4. Which makes it 4^2 times faster and gave me a minimal lose of precision. The next step is scaling the quadrangle which we found back to the normal size. So that we can transform the quadrangle to a plain perspective using the original source.
The code I created for detecting the largest area is heavily based on code I found on stackoverflow. Unfortunately they didn't work as expected for me, so I combined more code snippets and modified a lot.
This is what I got:
private static double angle(Point p1, Point p2, Point p0 ) {
double dx1 = p1.x - p0.x;
double dy1 = p1.y - p0.y;
double dx2 = p2.x - p0.x;
double dy2 = p2.y - p0.y;
return (dx1 * dx2 + dy1 * dy2) / Math.sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}
private static MatOfPoint find(Mat src) throws Exception {
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<>();
List<Mat> blurredChannel = new ArrayList<>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve;
double maxArea = 0;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = {c, 0};
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 10, 20, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1 ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, (src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++)
{
double cosine = Math.abs(angle(curves.get(j % 4), curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.3) {
maxArea = area;
maxId = contours.indexOf(contour);
//contours.set(maxId, getHull(contour));
}
}
}
}
}
if (maxId >= 0) {
return contours.get(maxId);
//Imgproc.drawContours(src, contours, maxId, new Scalar(255, 0, 0, .8), 8);
}
return null;
}
You can call it like so:
MathOfPoint contour = find(src);
See this answer for quadrangle detection from a contour and transforming it to a plain perspective:
Java OpenCV deskewing a contour
I went through many questions in StackOverflow and able to develop small program to detect squares and rectangles correctly. This is my sample code
public static CvSeq findSquares(final IplImage src, CvMemStorage storage) {
CvSeq squares = new CvContour();
squares = cvCreateSeq(0, sizeof(CvContour.class), sizeof(CvSeq.class), storage);
IplImage pyr = null, timg = null, gray = null, tgray;
timg = cvCloneImage(src);
CvSize sz = cvSize(src.width(), src.height());
tgray = cvCreateImage(sz, src.depth(), 1);
gray = cvCreateImage(sz, src.depth(), 1);
// cvCvtColor(gray, src, 1);
pyr = cvCreateImage(cvSize(sz.width() / 2, sz.height() / 2), src.depth(), src.nChannels());
// down-scale and upscale the image to filter out the noise
// cvPyrDown(timg, pyr, CV_GAUSSIAN_5x5);
// cvPyrUp(pyr, timg, CV_GAUSSIAN_5x5);
// cvSaveImage("ha.jpg",timg);
CvSeq contours = new CvContour();
// request closing of the application when the image window is closed
// show image on window
// find squares in every color plane of the image
for (int c = 0; c < 3; c++) {
IplImage channels[] = { cvCreateImage(sz, 8, 1), cvCreateImage(sz, 8, 1), cvCreateImage(sz, 8, 1) };
channels[c] = cvCreateImage(sz, 8, 1);
if (src.nChannels() > 1) {
cvSplit(timg, channels[0], channels[1], channels[2], null);
} else {
tgray = cvCloneImage(timg);
}
tgray = channels[c];
// // try several threshold levels
for (int l = 0; l < N; l++) {
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if (l == 0) {
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
cvCanny(tgray, gray, 0, thresh, 5);
// dilate canny output to remove potential
// // holes between edge segments
cvDilate(gray, gray, null, 1);
} else {
// apply threshold if l!=0:
cvThreshold(tgray, gray, (l + 1) * 255 / N, 255,
CV_THRESH_BINARY);
}
// find contours and store them all as a list
cvFindContours(gray, storage, contours, sizeof(CvContour.class), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
CvSeq approx;
// test each contour
while (contours != null && !contours.isNull()) {
if (contours.elem_size() > 0) {
approx = cvApproxPoly(contours, Loader.sizeof(CvContour.class), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours) * 0.02, 0);
if (approx.total() == 4 && Math.abs(cvContourArea(approx, CV_WHOLE_SEQ, 0)) > 1000 && cvCheckContourConvexity(approx) != 0) {
double maxCosine = 0;
for (int j = 2; j < 5; j++) {
// find the maximum cosine of the angle between
// joint edges
double cosine = Math.abs(angle(
new CvPoint(cvGetSeqElem(
approx, j % 4)),
new CvPoint(cvGetSeqElem(
approx, j - 2)),
new CvPoint(cvGetSeqElem(
approx, j - 1))));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.2) {
CvRect x = cvBoundingRect(approx, l);
if ((x.width() * x.height()) < 50000) {
System.out.println("Width : " + x.width()
+ " Height : " + x.height());
cvSeqPush(squares, approx);
}
}
}
}
contours = contours.h_next();
}
contours = new CvContour();
}
}
return squares;
}
I use this image to detect rectangles and squares
I need to identify the following output
and
But when I run the above code, it detects only the following rectangles. But I don't know the reason for that. Please can someone explain the reason for that.
This is the output that I got.
Please be kind enough to explain the problem in above code and give some suggensions to detect this squares and rectangles.
Given a mask image (binary image, like your second figure), cvFindContours() gives you the contours (several list of points).
look at this link: http://dasl.mem.drexel.edu/~noahKuntz/openCVTut7.html