OpenCV detecting largest rectangle yields puzzling results - java

My aim is to detect the largest rectangle in an image, whether its skewed or not. After some research and googling I came up with a code that theoretically should work, however in half of the cases I see puzzling results.
I used OpenCV for Android, here is the Code:
private void find_parallels() {
Utils.bitmapToMat(selectedPicture,img);
Mat temp = new Mat();
Imgproc.resize(img,temp,new Size(640,480));
img = temp.clone();
Mat imgGray = new Mat();
Imgproc.cvtColor(img,imgGray,Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(imgGray,imgGray,new Size(5,5),0);
Mat threshedImg = new Mat();
Imgproc.adaptiveThreshold(imgGray,threshedImg,255,Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,Imgproc.THRESH_BINARY,11,2);
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Mat imageContours = imgGray.clone();
Imgproc.cvtColor(imageContours,imageContours,Imgproc.COLOR_GRAY2BGR);
Imgproc.findContours(threshedImg,contours,hierarchy,Imgproc.RETR_TREE,Imgproc.CHAIN_APPROX_SIMPLE);
max_area = 0;
int num = 0;
for (int i = 0; i < contours.size(); i++) {
area = Imgproc.contourArea(contours.get(i));
if (area > 100) {
MatOfPoint2f mop = new MatOfPoint2f(contours.get(i).toArray());
peri = Imgproc.arcLength(mop, true);
Imgproc.approxPolyDP(mop, approx, 0.02 * peri, true);
if(area > max_area && approx.toArray().length == 4) {
biggest = approx;
num = i;
max_area = area;
}
}
}
selectedPicture = Bitmap.createBitmap(640,480, Bitmap.Config.ARGB_8888) ;
Imgproc.drawContours(img,contours,num,new Scalar(0,0,255));
Utils.matToBitmap(img, selectedPicture);
imageView1.setImageBitmap(selectedPicture);}
In some cases it works excellent as can be seen in this image(See the white line between monitor bezel and screen.. sorry for the color):
Example that works:
However when in this image, and most images where the screen is greyish it gives crazy result.
Example that doesn't work:

Try use morphology, dilate and then erode with same kernel should make it better.
Or use pyrDown + pyrUp, or just blur it.
In short use low-pass filter class of methods, because your object of interest is much larger than noise.

Related

OpenCV image matching (regardless of color)

I am kind of hopeless in my quest to write a screenshot reader for a game I am addicted to.
We take a screenshot, regardless of size/coloring (so custom settings ingame) and I have a library of images I want to check it against. I am using OpenCV
Example screenshot:
now I have a library of all ingame materials, for example this one
I already know how to rescale and stuffs, but I just know too little for it to find a decent match. I am quite new in image filtering/matching and such so if you have any ideas/tipps, please let me know. My code so far:
public void scan2( String image, String template ) {
Mat iterateImg = Imgcodecs.imread(image, Imgcodecs.IMREAD_COLOR);
Mat templ2 = Imgcodecs.imread(template, Imgcodecs.IMREAD_COLOR);
Mat templ2resized = new Mat();
double templx = templ2.size().width;
double temply = templ2.size().height;
System.out.println(templx+"-"+temply);
for(double scale = 1;scale <2;scale = scale+0.01 ){
Imgproc.resize(templ2, templ2resized, new Size(templx/scale, temply/scale));
MatchResultWrapper match = match(iterateImg, templ2resized);
match.setScaledx((int) (templx/scale));
match.setScaledy((int) (temply/scale));
vals.add(match);
}
double[]results= new double[vals.size()];
for(int i = 0; i < vals.size();i++){
results[i]=vals.get(i).getMatch();
}
double diff = Integer.MAX_VALUE;
int closestIndex = 0;
for (int i = 0; i < results.length; ++i) {
double abs = Math.abs(results[i]);
if (abs < diff) {
closestIndex = i;
diff = abs;
} else if (abs == diff && results[i] > 0 && results[closestIndex] < 0) {
//same distance to zero but positive
closestIndex =i;
}
}
System.out.println(vals.get(closestIndex));
}
private MatchResultWrapper match( Mat source, Mat template ) {
Mat result = new Mat();
Mat img_display = new Mat();
source.copyTo(img_display);
int result_cols = source.cols() - template.cols() + 1;
int result_rows = source.rows() - template.rows() + 1;
result.create(result_rows, result_cols, CvType.CV_32FC1);
Imgproc.matchTemplate(source, template, result, Imgproc.TM_SQDIFF);
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, - 1, new Mat());
Core.MinMaxLocResult mmr = Core.minMaxLoc(result);
MatchResultWrapper wrapper = new MatchResultWrapper();
wrapper.setMatch(mmr.minVal);
wrapper.setX((int)mmr.minLoc.x);
wrapper.setY((int)mmr.minLoc.y);
return wrapper;
}
Thanks to #christoph-rackwitz we now have the following result. Which sadly does not work either :(
Probably you need a mask for your template. For template matching, the match mode documentation is not 100% clear. But TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, and TM_CCORR_NORMED seem to support masks. I generally prefer the normed match modes, but you can try both of these modes to see what gives you best results. If you do try TM_CCORR_NORMED be sure to use max instead of min.

How to find contours in algorithm conversion from Python 3.6 to Java 1.8 JavaCV

I have to do develop a similar algorithm as in Remove top section of image above border line to detect text document, but in Java 1.8 using JavaCV.
The method signature in Python is
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
However in Java it appears to be:
MatVector mt = new MatVector();
findContours(dst, mt, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
I'm stuck into finding the contours and sorting them from biggest to lowest. How do I go about sorting from biggest to lower contours?
My code:
Mat image = imread(imagePath);
Mat gray = new Mat();
cvtColor(mat, gray, COLOR_BGR2GRAY);
Mat grayImg = convertToGray(mat);
GaussianBlur(grayImg, grayImg, new Size(3, 3), 0);
Mat dst = new Mat();
threshold(grayImg, dst, 0, 255,THRESH_BINARY + THRESH_OTSU);
// Find contours and sort for largest contour
MatVector mt = new MatVector();
findContours(dst, mt, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
How to access contours suggestion from https://github.com/bytedeco/javacv/issues/1270:
// accessing contours
MatVector contours = ...
for (int i = 0; i < contours.size(); ++i) {
IntIndexer points = contours.get(i).createIndexer();
int size = (int) points.size(0); // points are stored in a Mat with a single column and multiple rows, since size(0), each element has two channels - for x and y - so the type is CV_32SC2 for integer points
for (int j = 0; j < size; ++j) {
int x = points.get(2 * j);
int y = points.get(2 * j + 1);
// do something with x and y
}
}
Thank you
As #fmw42 said, I refactored the code to look into the contourArea().
See below,
Mat mask = new Mat();
Mat gray = new Mat();
Mat denoised = new Mat();
Mat bin = new Mat();
Mat hierarchy = new Mat();
MatVector contours = new MatVector();
cvtColor(mat, gray, COLOR_BGR2GRAY);
//Normalize
GaussianBlur(gray, denoised, new Size(5, 5), 0);
threshold(denoised, mask, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
normalize(gray, gray, 0, 255, NORM_MINMAX, -1, mask);
// Convert image to binary
threshold(gray, bin, 150, 255, THRESH_BINARY);
// Find contours
findContours(bin, contours, hierarchy, RETR_TREE, CHAIN_APPROX_NONE);
long contourCount = contours.size();
System.out.println("Countour count " + contourCount);
double maxArea = 0;
int maxAreaId = 0;
for (int i = 0; i < contourCount; ++i) {
// Calculate the area of each contour
Mat contour = contours.get(i);
double area = contourArea(contour);
if(area > maxArea){
maxAreaId = i;
maxArea = area;
}
}

OpenCV: How to draw connectedComponentsWithStats labels in Java

I am trying to mask my binary image in a way, that it only shows certain labels (selected by me).
This piece of code does what I want. Can someone please tell me the better way to achieve this? I suppose you can do it with masks without iterating over each pixel, but the code examples that I find are always in C++ and I can't find out how to do it in Java.
Mat labels = new Mat();
Mat stats = new Mat();
Mat centroids = new Mat();
List<Integer> labelsToKeep = new ArrayList<>();
Imgproc.connectedComponentsWithStats(binary, labels, stats, centroids, 4);
for (int i=0; i < stats.height(); i++) {
// for each label
if (someCondition)
labelsToKeep.add(i);
}
Mat mask = new Mat(binary.rows(), binary.cols(), binary.type());
for (int i=0; i < binary.rows(); i++) {
for (int j=0; j < binary.cols(); j++) {
// for each pixel
double[] data = new double[1];
if (labelsToKeep.contains((int)labels.get(i,j)[0]))
data[0] = 255;
else
data[0] = 0;
mask.put(i,j, channels);
}
}
Mat masked = new Mat();
binary.copyTo(masked, mask);
Regards

How to make K-mean image processing algorithm faster in my android application

I want to have a faster way to apply k-mean to a image and display on the screen. I want to have a opencv for android solutions. My code has a 30s run time on smart phone. I want to run it around 1 or 2s.
I already had code for the K-mean and displayed on the screen using opencv. But I need it to be faster. I think the way it label the image and display took to much time.
public void k_Mean(){
Mat rgba = new Mat();
Mat mHSV = new Mat();
Bitmap bitmap = BitmapFactory.decodeResource(getResources(),images[current_image]);
Bitmap outputBitmap = Bitmap.createBitmap(bitmap.getWidth(),bitmap.getHeight(), Bitmap.Config.RGB_565);
Utils.bitmapToMat(bitmap,rgba);
//must convert to 3 channel image
Imgproc.cvtColor(rgba, mHSV, Imgproc.COLOR_RGBA2RGB,3);
Imgproc.cvtColor(rgba, mHSV, Imgproc.COLOR_RGB2HSV,3);
Mat clusters = cluster(mHSV, 3).get(0);
Utils.matToBitmap(clusters,outputBitmap);
imageView.setImageBitmap(outputBitmap);
}
public List<Mat> cluster(Mat cutout, int k) {
Mat samples = cutout.reshape(1, cutout.cols() * cutout.rows());
Mat samples32f = new Mat();
samples.convertTo(samples32f, CvType.CV_32F, 1.0 / 255.0);
Mat labels = new Mat();
//criteria means the maximum loop
TermCriteria criteria = new TermCriteria(TermCriteria.COUNT, 20, 1);
Mat centers = new Mat();
Core.kmeans(samples32f, k, labels, criteria, 1, Core.KMEANS_PP_CENTERS, centers);
return showClusters(cutout, labels, centers);
}
private static List<Mat> showClusters (Mat cutout, Mat labels, Mat centers) {
centers.convertTo(centers, CvType.CV_8UC1, 255.0);
centers.reshape(3);
System.out.println(labels + "labels");
List<Mat> clusters = new ArrayList<Mat>();
for(int i = 0; i < centers.rows(); i++) {
clusters.add(Mat.zeros(cutout.size(), cutout.type()));
}
Map<Integer, Integer> counts = new HashMap<Integer, Integer>();
for(int i = 0; i < centers.rows(); i++) counts.put(i, 0);
int rows = 0;
for(int y = 0; y < cutout.rows(); y++) {
for(int x = 0; x < cutout.cols(); x++) {
int label = (int)labels.get(rows, 0)[0];
int r = (int)centers.get(label, 2)[0];
int g = (int)centers.get(label, 1)[0];
int b = (int)centers.get(label, 0)[0];
counts.put(label, counts.get(label) + 1);
clusters.get(label).put(y, x, b, g, r);
rows++;
}
}
System.out.println(counts);
return clusters;
}
My output is correct. I wander if there is any faster way to do this. My other image processing algorithm run time is less than 1s.

Illumination Normalization not returning expected results

I am using OpenCV4Android to process my images. I wanted to preform Illumination Normalization which was linked to me with this work:
http://lear.inrialpes.fr/pubs/2007/TT07/Tan-amfg07a.pdf
furthermore I was given COMPLETE IMPLEMENTATION in C++ (OpenCV):
https://github.com/bytefish/opencv/blob/master/misc/tan_triggs.cpp
I tried to rewrite this code do Java, but I think there might be mistake somewhere. So, what I get from this alghorithm is close but not good enough. Check the expected results on the PDF above on page for example 12. And this is what i get:
https://dl.dropboxusercontent.com/u/108321090/a1.png
https://dl.dropboxusercontent.com/u/108321090/Screenshot_2013-12-31-14-09-25.png
So there is still too much noise between background and face features, but I think it's my fault here. This is my code:
//GET IMAGE URI
Uri selectedImage = imageReturnedIntent.getData();
//CREATE BITMAP FROM IT
BitmapFactory.Options bmpFactoryOptions = new BitmapFactory.Options();
bmpFactoryOptions.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap bmp = BitmapFactory.decodeStream(getContentResolver().openInputStream(selectedImage),
null, bmpFactoryOptions);
//CREATE OPENCV MAT OBJECT
Mat imageMat = new Mat();
Utils.bitmapToMat(bmp, imageMat);
//CONVERT TO GRAYSCALE
Mat grayMat = new Mat();
Imgproc.cvtColor(imageMat, grayMat, Imgproc.COLOR_BGR2GRAY);
//CUT OUT FACE FROM WHOLE IMAGE
(...) face detection cascades localize face and writes the region where face is located
in array, then I create mat with only face in it:
Mat cleanFaceMatGRAY = new Mat();
cleanFaceMatGRAY = new Mat(faceDetectMatGRAY, facesArray[0]);
//PROCESSING OF MAT WITH FACE (alghorithm from PDF & .cpp file)
Mat I = tan_triggs_preprocessing(cleanFaceMatGRAY);
Core.normalize(I, I,0, 255, Core.NORM_MINMAX, CvType.CV_8UC1);
//DISPLAY MAT IN IMAGEVIEW
ivPickedPhoto.setImageBitmap(AppTools.createBitmapFromMat(I, Bitmap.Config.ARGB_8888));
And method with algorithm (as u can see its total copy-paste from .cpp file with edited/rewrited methods to OpenCV4Android):
private Mat tan_triggs_preprocessing(Mat image) {
float alpha = 0.1f;
float tau = 10.0f;
float gamma = 0.2f;
int sigma0 = 1;
int sigma1 = 2;
// Convert to floating point:
Mat X = image;
X.convertTo(X, CvType.CV_32FC1);
// Start preprocessing:
Mat I = new Mat();
Core.pow(X, gamma, I);
// Calculate the DOG Image:
{
Mat gaussian0 = new Mat();
Mat gaussian1 = new Mat();
// Kernel Size:
int kernel_sz0 = (3*sigma0);
int kernel_sz1 = (3*sigma1);
// Make them odd for OpenCV:
kernel_sz0 += ((kernel_sz0 % 2) == 0) ? 1 : 0;
kernel_sz1 += ((kernel_sz1 % 2) == 0) ? 1 : 0;
Size ksize1 = new Size(kernel_sz0,kernel_sz0);
Size ksize2 = new Size(kernel_sz1,kernel_sz1);
Imgproc.GaussianBlur(I, gaussian0, ksize1, sigma0, sigma0, Imgproc.BORDER_CONSTANT);
Imgproc.GaussianBlur(I, gaussian1, ksize2, sigma1, sigma1, Imgproc.BORDER_CONSTANT);
Core.subtract(gaussian0, gaussian1, I);
}
{
double meanI = 0.0;
{
Mat tmp = new Mat();
Mat abstmp = new Mat();
Core.absdiff(I, new Scalar(0), abstmp);
Core.pow(abstmp, alpha, tmp);
meanI = Core.mean(tmp).val[0];
}
Core.divide( Math.pow(meanI, 1.0/alpha), I, I);
}
{
double meanI = 0.0;
{
Mat tmp = new Mat();
Mat abstmp = new Mat();
Mat mintmp = new Mat();
Core.absdiff(I, new Scalar(0), abstmp);
Core.min(abstmp, new Scalar(tau), mintmp);
Core.pow(mintmp, alpha, tmp);
meanI = Core.mean(tmp).val[0];
}
Core.divide( Math.pow(meanI, 1.0/alpha), I, I);
}
// Squash into the tanh:
{
for(int r = 0; r < I.rows(); r++) {
for(int c = 0; c < I.cols(); c++) {
I.get(r,c)[0] = Math.tanh(I.get(r,c)[0]) / tau;
}
}
Core.multiply(I,new Scalar(tau), I);
}
return I;
}
And what I didn't understand while I was rewriting this code was the iteration over the matrix. In .cpp there was
I.at<float>(r,c)
Where I have replaced it with just:
I.get(r,c)[0]
Do you think I might have lost some data here so thats why image is shady?

Categories

Resources