How to skew angle correction using Java - java

I read a tutorial to give the correction of skew angle on this site. But I don't understand how to convert those code to Java.
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = img.begin<uchar>();
cv::Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it) //what is the meaning of this code (1)
points.push_back(it.pos()); //what is the meaning of this code (2)
Please help me understand this code.

For OpenCV 3.2.0 Here is full translation of deskew in Java from C++ at https://felix.abecassis.me/2011/10/opencv-bounding-box-skew-angle/ (with little modifications):
public Mat deskew(Mat src, double angle) {
Point center = new Point(src.width()/2, src.height()/2);
Mat rotImage = Imgproc.getRotationMatrix2D(center, angle, 1.0);
//1.0 means 100 % scale
Size size = new Size(src.width(), src.height());
Imgproc.warpAffine(src, src, rotImage, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS);
return src;
}
public void computeSkew( String inFile ) {
//Load this image in grayscale
Mat img = Imgcodecs.imread( inFile, Imgcodecs.IMREAD_GRAYSCALE );
//Binarize it
//Use adaptive threshold if necessary
//Imgproc.adaptiveThreshold(img, img, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 15, 40);
Imgproc.threshold( img, img, 200, 255, THRESH_BINARY );
//Invert the colors (because objects are represented as white pixels, and the background is represented by black pixels)
Core.bitwise_not( img, img );
Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
//We can now perform our erosion, we must declare our rectangle-shaped structuring element and call the erode function
Imgproc.erode(img, img, element);
//Find all white pixels
Mat wLocMat = Mat.zeros(img.size(),img.type());
Core.findNonZero(img, wLocMat);
//Create an empty Mat and pass it to the function
MatOfPoint matOfPoint = new MatOfPoint( wLocMat );
//Translate MatOfPoint to MatOfPoint2f in order to user at a next step
MatOfPoint2f mat2f = new MatOfPoint2f();
matOfPoint.convertTo(mat2f, CvType.CV_32FC2);
//Get rotated rect of white pixels
RotatedRect rotatedRect = Imgproc.minAreaRect( mat2f );
Point[] vertices = new Point[4];
rotatedRect.points(vertices);
List<MatOfPoint> boxContours = new ArrayList<>();
boxContours.add(new MatOfPoint(vertices));
Imgproc.drawContours( img, boxContours, 0, new Scalar(128, 128, 128), -1);
double resultAngle = rotatedRect.angle;
if (rotatedRect.size.width > rotatedRect.size.height)
{
rotatedRect.angle += 90.f;
}
//Or
//rotatedRect.angle = rotatedRect.angle < -45 ? rotatedRect.angle + 90.f : rotatedRect.angle;
Mat result = deskew( Imgcodecs.imread( inFile ), rotatedRect.angle );
Imgcodecs.imwrite( outputFile, result );
}

import org.opencv.core.*;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
public class ValidateDocumentAlignment {
public ValidateDocumentAlignment() {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
}
public boolean isDocumentTiltAngleWithinThresholdLimit(File scannedDoc, int thresholdAngle) {
int kernelSize = 3;
int cannyLowerThreshold = 25;
int cannyUpperThreshold = 50;
Mat image = new Mat();
Mat blur = new Mat();
Mat edged = new Mat();
Mat dilate = new Mat();
Mat erode = new Mat();
int maxValIdx = 0;
double area = 0;
List<MatOfPoint> contours = new ArrayList<>();
Mat sourceImage = Imgcodecs.imread(scannedDoc.getPath(), Imgcodecs.IMREAD_GRAYSCALE);
Imgproc.adaptiveThreshold(sourceImage, sourceImage, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 15, 40);
Core.bitwise_not(sourceImage, sourceImage);
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(kernelSize, kernelSize));
Imgproc.morphologyEx(sourceImage, image, Imgproc.MORPH_CLOSE, kernel);
Imgproc.GaussianBlur(image, blur, new Size(7, 7), 0);
Imgproc.Canny(blur, edged, cannyLowerThreshold, cannyUpperThreshold);
Imgproc.dilate(edged, dilate, kernel, new Point(-1, -1), 6);
Imgproc.erode(dilate, erode, kernel, new Point(-1, -1), 3);
Imgproc.findContours(erode, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++) {
Rect rect = Imgproc.boundingRect(contours.get(contourIdx));
if ((rect.height * rect.width) > area) {
area = rect.height * rect.width;
maxValIdx = contourIdx;
}
}
RotatedRect rotatedRect = Imgproc.minAreaRect(new MatOfPoint2f(contours.get(maxValIdx).toArray()));
double skewAngle = rotatedRect.angle;
int acuteAngle = (int) (skewAngle % 90);
boolean isProperlyAligned = true;
if (Math.abs(acuteAngle) > thresholdAngle && Math.abs(acuteAngle) < (90 - thresholdAngle)) {
isProperlyAligned = false;
}
return isProperlyAligned;
}
}

private fun main(){
val bmp:Bitmap? = null //Any bitmap (if you are working with bitmap)
var mRgba = Mat() // else you can direct use MAT on onCameraFrame
val mGray = Mat()
val bmp32: Bitmap = bmp.copy(Bitmap.Config.ARGB_8888, true)
Utils.bitmapToMat(bmp32, mRgba)
Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_BGR2GRAY)
mRgba = makeOrientationCorrection(mRgba,mGray)// here actual magic starts
Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_BGR2GRAY)
val bmpOutX = Bitmap.createBitmap(
mRgba.cols(),
mRgba.rows(),
Bitmap.Config.ARGB_8888
)
Utils.matToBitmap(mRgba, bmpOutX)
binding.imagePreview.setImageBitmap(bmpOutX!!)
}
private fun makeOrientationCorrection(mRGBA:Mat, mGRAY:Mat):Mat{
val dst = Mat()
val cdst = Mat()
val cdstP: Mat
Imgproc.Canny(mGRAY, dst, 50.0, 200.0, 3, false)
Imgproc.cvtColor(dst, cdst, Imgproc.COLOR_GRAY2BGR)
cdstP = cdst.clone()
val linesP = Mat()
Imgproc.HoughLinesP(dst, linesP, 1.0, Math.PI/180, 50, 50.0, 10.0)
var biggestLineX1 = 0.0
var biggestLineY1 = 0.0
var biggestLineX2 = 0.0
var biggestLineY2 = 0.0
var biggestLine = 0.0
for (x in 0 until linesP.rows()) {
val l = linesP[x, 0]
Imgproc.line(
cdstP, org.opencv.core.Point(l[0], l[1]),
org.opencv.core.Point(l[2], l[3]),
Scalar(0.0, 0.0, 255.0), 3, Imgproc.LINE_AA, 0)
}
for (x in 0 until linesP.rows()) {
val l = linesP[x, 0]
val x1 = l[0]
val y1 = l[1]
val x2 = l[2]
val y2 = l[3]
val lineHeight = sqrt(((x2 - x1).pow(2.0)) + ((y2 - y1).pow(2.0)))
if(biggestLine<lineHeight){
val angleOfRotationX1 = angleOf(PointF(x1.toFloat(),y1.toFloat()),PointF(x2.toFloat(),y2.toFloat()))
Log.e("angleOfRotationX1","$angleOfRotationX1")
if(angleOfRotationX1<45.0 || angleOfRotationX1>270.0){
biggestLine = lineHeight
if(angleOfRotationX1<45.0){
biggestLineX1 = x1
biggestLineY1 = y1
biggestLineX2 = x2
biggestLineY2 = y2
}
if(angleOfRotationX1>270.0){
biggestLineX1 = x2
biggestLineY1 = y2
biggestLineX2 = x1
biggestLineY2 = y1
}
}
}
if(x==linesP.rows()-1){
Imgproc.line(
cdstP, org.opencv.core.Point(biggestLineX1, biggestLineY1),
org.opencv.core.Point(biggestLineX2, biggestLineY2),
Scalar(255.0, 0.0, 0.0), 3, Imgproc.LINE_AA, 0)
}
}
var angle = angleOf(PointF(biggestLineX1.toFloat(),biggestLineY1.toFloat()),PointF(biggestLineX2.toFloat(),biggestLineY2.toFloat()))
Log.e("angleOfRotationX2","$angle")
angle -= (angle * 2)
return deskew(mRGBA,angle)
}
fun angleOf(p1: PointF, p2: PointF): Double {
val deltaY = (p1.y - p2.y).toDouble()
val deltaX = (p2.x - p1.x).toDouble()
val result = Math.toDegrees(Math.atan2(deltaY, deltaX))
return if (result < 0) 360.0 + result else result
}
private fun deskew(src:Mat, angle:Double):Mat{
val center = org.opencv.core.Point((src.width() / 2).toDouble(), (src.height() / 2).toDouble())
val scaleBy = if(angle<0){
1.0+((0.5*angle)/45)//max scale down by 0.50(50%) based on angle
}else{
1.0-((0.3*angle)/45)//max scale down by 0.50(50%) based on angle
}
Log.e("scaleBy",""+scaleBy)
val rotImage = Imgproc.getRotationMatrix2D(center, angle, scaleBy)
val size = Size(src.width().toDouble(), src.height().toDouble())
Imgproc.warpAffine(src, src, rotImage, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS)
return src
}

Related

Android canny, hough transform. I want to go back to original image

Android, I used canny and hough transform to make line detection on phone screen. However, I can't find a way to get back to original image(I mean HSV to BGR). I want to see keep lines well with BGR image. Thank you!
My code, please let me know which code I have to put in.
public class CameraActivity extends Activity implements CameraBridgeViewBase.CvCameraViewListener2{
private static final String TAG="MainActivity";
private Mat mRgba;
private Mat mGray;
Scalar scalarLow,scalarHigh;///
Mat mat1,mat2;////
Rect rect;////
Rect roi_rect;///
private Mat m_matRoi;////
Point rect1, rect2;///////
public void onCameraViewStarted(int width ,int height){
mRgba=new Mat(height,width, CvType.CV_8UC4);
mGray =new Mat(height,width,CvType.CV_8UC1);
mat1 = new Mat(height,width,CvType.CV_8UC4);
mat2 = new Mat(height,width,CvType.CV_8UC4);
}
public void onCameraViewStopped(){
mRgba.release();
}
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame){
mRgba=inputFrame.rgba();/////mRgba = image, matInput 과 같은 역할
mGray=inputFrame.gray();
double m_dWscale = (double) 1/3;
double m_dHscale = (double) 1/4;
int mRoiWidth = (int)(mRgba.size().width * m_dWscale);
int mRoiHeight = (int)(mRgba.size().height * m_dHscale);
int mRoiX = (int) (mRgba.size().width - mRoiWidth) ;
int mRoiY = (int) (mRgba.size().height - mRoiHeight) ;
//rect = new Rect(mRoiX,mRoiY,mRoiWidth,mRoiHeight);
rect1 =new Point(mRoiX, mRoiY);
rect2 = new Point(mRoiWidth,mRoiHeight);
Imgproc.rectangle(mRgba,rect1,rect2,new Scalar(0, 255, 0, 255),5);
roi_rect = new Rect(mRoiX+4,mRoiY+4,mRoiWidth-8,mRoiHeight-8);
m_matRoi = mRgba.submat(roi_rect);
Mat temp = new Mat();
Imgproc.cvtColor(m_matRoi, temp, Imgproc.COLOR_BGRA2GRAY,0);
Mat temp_rgba = new Mat();
Imgproc.cvtColor(temp, temp_rgba, Imgproc.COLOR_GRAY2BGRA,0);
temp_rgba.copyTo(m_matRoi);
scalarLow=new Scalar(0,0,200);
scalarHigh=new Scalar(180,255,255);
Imgproc.cvtColor(mRgba,mat1,Imgproc.COLOR_BGR2HSV);
Core.inRange(mat1,scalarLow,scalarHigh,mat2);
Core.bitwise_and(mRgba,mRgba,mat1,mat2);
mRgba=mat1;
Imgproc.dilate(mRgba,mRgba,new Mat(),new Point(1,2),2);//
Mat edges=new Mat();
Imgproc.Canny(mRgba,edges,90,150);
Mat lines=new Mat();
Point p1=new Point();
Point p2=new Point();
double a,b;
double x0,y0;
Imgproc.HoughLinesP(edges,lines,1.0,Math.PI/180.0,50,100.0,10.0);//
for(int i=0;i<lines.rows();i++) {
double[] l = lines.get(i, 0);
Imgproc.line(mRgba, new Point(l[0], l[1]), new Point(l[2], l[3]), new Scalar(0, 0, 255.0), 3);
}
return mRgba;
}
}
I divided it to two parts b/c there were not necessary codes between them.
Thank you.
Edit1)
To #Jeru Luke, I changed the code you said. I put many slashes where I changed the code.
public void onCameraViewStarted(int width ,int height){
mRgba=new Mat(height,width, CvType.CV_8UC4);
mGray =new Mat(height,width,CvType.CV_8UC1);
mat1 = new Mat(height,width,CvType.CV_8UC4);
mat2 = new Mat(height,width,CvType.CV_8UC4);//////////
}
public void onCameraViewStopped(){
mRgba.release();
}
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame){
mRgba=inputFrame.rgba();/////mRgba = image, matInput 과 같은 역할
mGray=inputFrame.gray();
double m_dWscale = (double) 1/3;
double m_dHscale = (double) 1/4;
int mRoiWidth = (int)(mRgba.size().width * m_dWscale);
int mRoiHeight = (int)(mRgba.size().height * m_dHscale);
int mRoiX = (int) (mRgba.size().width - mRoiWidth) ;
int mRoiY = (int) (mRgba.size().height - mRoiHeight) ;
//rect = new Rect(mRoiX,mRoiY,mRoiWidth,mRoiHeight);
rect1 =new Point(mRoiX, mRoiY);
rect2 = new Point(mRoiWidth,mRoiHeight);
Imgproc.rectangle(mRgba,rect1,rect2,new Scalar(0, 255, 0, 255),5);
roi_rect = new Rect(mRoiX+4,mRoiY+4,mRoiWidth-8,mRoiHeight-8);
m_matRoi = mRgba.submat(roi_rect);
Mat temp = new Mat();
Imgproc.cvtColor(m_matRoi, temp, Imgproc.COLOR_BGRA2GRAY,0);
Mat temp_rgba = new Mat();
Imgproc.cvtColor(temp, temp_rgba, Imgproc.COLOR_GRAY2BGRA,0);
temp_rgba.copyTo(m_matRoi);
scalarLow=new Scalar(0,0,200);
scalarHigh=new Scalar(180,255,255);
Imgproc.cvtColor(mRgba,mat1,Imgproc.COLOR_BGR2HSV);
Core.inRange(mat1,scalarLow,scalarHigh,mat2);
Core.bitwise_and(mRgba,mRgba,mat1,mat2);
mRgba=mat2;////////////////////////////////////
Imgproc.dilate(mat1,mRgba,new Mat(),new Point(1,2),2);/////////////
Mat edges=new Mat();
Imgproc.Canny(mat1,edges,90,150);//////////////////////////
Mat lines=new Mat();
Point p1=new Point();
Point p2=new Point();
double a,b;
double x0,y0;
Imgproc.HoughLinesP(edges,lines,1.0,Math.PI/180.0,50,100.0,10.0);
for(int i=0;i<lines.rows();i++) {
double[] l = lines.get(i, 0);
Imgproc.line(mRgba, new Point(l[0], l[1]), new Point(l[2], l[3]), new Scalar(0, 0, 255.0), 3);
Scalar(255.0,255.0,255.0),1,Imgproc.LINE_AA,0);
}
return mRgba;
}
}
Edit2) My final objective of this project is draw lines only in ROI(Region of Interest) area. Below code is about ROI. But the problem is it only shows rectangular box and lines are everywhere in my phone screen.
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame){
mRgba=inputFrame.rgba();
mGray=inputFrame.gray();
mat3=inputFrame.rgba();
double m_dWscale = (double) 1/3;
double m_dHscale = (double) 1/4;
int mRoiWidth = (int)(mRgba.size().width * m_dWscale);
int mRoiHeight = (int)(mRgba.size().height * m_dHscale);
int mRoiX = (int) (mRgba.size().width - mRoiWidth) ;
int mRoiY = (int) (mRgba.size().height - mRoiHeight) ;
//rect = new Rect(mRoiX,mRoiY,mRoiWidth,mRoiHeight);
rect1 =new Point(mRoiX, mRoiY);
rect2 = new Point(mRoiWidth,mRoiHeight);
Imgproc.rectangle(mRgba,rect1,rect2,new Scalar(0, 255, 0, 255),5);
roi_rect = new Rect(mRoiX+4,mRoiY+4,mRoiWidth-8,mRoiHeight-8);
m_matRoi = mRgba.submat(roi_rect);
//Mat temp = new Mat();
//Imgproc.cvtColor(m_matRoi, temp, Imgproc.COLOR_BGRA2GRAY,0);
//Mat temp_rgba = new Mat();
//Imgproc.cvtColor(temp, temp_rgba, Imgproc.COLOR_GRAY2BGRA,0);
//temp_rgba.copyTo(m_matRoi);
this part is about ROI.
Edit3) below code is about making mask I guess..
roi_rect = new Rect(mRoiX,mRoiY,mRoiWidth,mRoiHeight);
mat5 = mRgba.submat(roi_rect);
//Mat temp = new Mat();
//Mat temp_rgba = new Mat();
Imgproc.cvtColor(mRgba,mRgba,Imgproc.COLOR_BGR2GRAY);
Imgproc.cvtColor(mat5, mat5, Imgproc.COLOR_GRAY2BGR,0); //
//Imgproc.cvtColor(temp, temp_rgba, Imgproc.COLOR_GRAY2BGRA,0);
mat5.copyTo(mRgba);
roi_rect 's parameters are all (int) and mat5 is
mat5 = new Mat(height,width,CvType.CV_8UC4);
I made this code firstly to make all region dark. And using roi_rect only that part, which means mat5 is converted GRAY2BGR and then use mat5.copyTo(mRgba) to paste mat5 on mRgba. But it shuts down when I turn on the app.

Detect Biggest Rectangle in the Image using Java Opencv [SOLVED]

How can I detect the four corner points of the biggest square (at center of the image) using opencv in java
I have solved this using findContours.
Original Image
Output Image
Please find the code below. I don't now how to detect the end points of center square. I tried to detect lines using HoughLinesP but it is returning only 1 verticle line instead of giving all the 4 lines.
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
String path = "/Users/saurabhsaluja/Desktop/cimg.jpg";
Mat img = Imgcodecs.imread(path);
Mat destination = new Mat(img.rows(),img.cols(),img.type());
Core.addWeighted(img, 1.3, destination, -0.7, 0, destination);
Mat cannyOutput = new Mat();
int threshold = 15;
Mat srcGray = new Mat();
Imgproc.cvtColor(destination, srcGray, Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(srcGray, cannyOutput, threshold, threshold* 4);
Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(10,10));
Mat element2 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(10,10));
Imgproc.dilate(cannyOutput, cannyOutput, element);
Imgproc.dilate(cannyOutput, cannyOutput, element2);
element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
element2 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
Imgproc.erode(cannyOutput, cannyOutput, element);
Imgproc.erode(cannyOutput, cannyOutput, element2);
Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/cannyOutput.jpg", cannyOutput); //THE IMAGE YOU ARE LOOKING AT
Mat lines = new Mat();
Imgproc.HoughLinesP(cannyOutput, lines, 1, Math.PI / 180, 50, 20, 20);
for(int i = 0; i < lines.cols(); i++) {
double[] val = lines.get(0, i);
Imgproc.line(img, new Point(val[0], val[1]), new Point(val[2], val[3]), new Scalar(0, 0, 255), 2);
}
Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/finalimg.jpg", img);
Solution:
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(cannyOutput, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double inf = 0;
Rect max_rect = null;
for(int i=0; i< contours.size();i++){
Rect rect = Imgproc.boundingRect(contours.get(i));
double area = rect.area();
if(inf < area) {
max_rect = rect;
inf = area;
//Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/input"+i+".jpg", img);
}
if(area > 50000) {
System.out.println(area);
Imgproc.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(0,0,0),5);
}
}
Now just get the biggest by looking area of each counter.
Thanks.
Solution Image:
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(cannyOutput, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
double inf = 0;
Rect max_rect = null;
for(int i=0; i< contours.size();i++){
Rect rect = Imgproc.boundingRect(contours.get(i));
double area = rect.area();
if(inf < area) {
max_rect = rect;
inf = area;
//Imgcodecs.imwrite("/Users/saurabhsaluja/Desktop/input"+i+".jpg", img);
}
if(area > 50000) {
System.out.println(area);
Imgproc.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(0,0,0),5);
}
}
Output:

Java OCR Not reading clearly obvious numbers

picture of processed image + original
I'm working on a project where I'm taking Smash Bros. game output, taking a screenshot, processing it in order to detect the percents which the characters are sitting at.
The program I wrote is detecting the 57 as a 55 and the 11 (which I let settle to it's normal position) as a 51. And while the gameplay is running, the numbers will jump around.
The program I wrote uses Tess4J, and I've configured everything right. I have trained Tesseract with my own custom font that I made using the games percentage numbers. I've tried multiple different fonts as well. What will make it more accurate!?
I've thought about instead of calculating percents, instead just detecting when they're damaged, but I'm also figuring that out.
This is the code I use to process images:
public static Mat blur(Mat input, int numberOfTimes){
Mat sourceImage = new Mat();
Mat destImage = input.clone();
for(int i=0;i<numberOfTimes;i++){
sourceImage = destImage.clone();
Imgproc.blur(sourceImage, destImage, new Size(3.0, 3.0));
}
return destImage;
}
public static BufferedImage purify(BufferedImage image) {
BufferedImage image2 = ImageHelper.convertImageToGrayscale(image);
Mat mat = BufferedImage2Mat(image2, -1);
Mat resizedMat = new Mat();
double width = mat.cols();
double height = mat.rows();
double aspect = width / height;
Size sz = new Size(width * aspect * 1.4, height * aspect * 1.4);
Imgproc.resize(mat, resizedMat, sz);
double thresh = Imgproc.threshold(resizedMat, resizedMat, 23, 255, Imgproc.THRESH_BINARY_INV);
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
Imgproc.dilate(resizedMat, resizedMat, kernel, new Point(0, 0), 9);
return toBufferedImage(HighGui.toBufferedImage(blur(resizedMat, 0)));
}
public static BufferedImage toBufferedImage(Image img)
{
if (img instanceof BufferedImage)
{
return (BufferedImage) img;
}
BufferedImage bimage = new BufferedImage(img.getWidth(null), img.getHeight(null), BufferedImage.TYPE_INT_ARGB);
Graphics2D bGr = bimage.createGraphics();
bGr.drawImage(img, 0, 0, null);
bGr.dispose();
return bimage;
}
public static Image denoise(BufferedImage img) {
Mat image = BufferedImage2Mat(img, 0);
Mat out = new Mat();
Mat tmp = new Mat();
Mat kernel = new Mat(new Size(3, 3), CvType.CV_8UC1, new Scalar(255));
Imgproc.morphologyEx(image, tmp, Imgproc.MORPH_OPEN, kernel);
Imgproc.morphologyEx(tmp, out, Imgproc.MORPH_CLOSE, kernel);
return HighGui.toBufferedImage(out);
}
public static Mat BufferedImage2Mat(BufferedImage image, int filter) {
try {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
ImageIO.write(image, "jpg", byteArrayOutputStream);
byteArrayOutputStream.flush();
return Imgcodecs.imdecode(new MatOfByte(byteArrayOutputStream.toByteArray()), filter);
} catch (IOException e) {
return null;
}
}
public static Image clean(BufferedImage image) {
Mat og = BufferedImage2Mat(image, Imgcodecs.IMREAD_UNCHANGED);
Mat im = BufferedImage2Mat(image, 0);
Mat bw = new Mat(im.size(), CvType.CV_8U);
Imgproc.threshold(im, bw, 0, 255, Imgproc.THRESH_BINARY_INV | Imgproc.THRESH_OTSU);
Mat dist = new Mat(im.size(), CvType.CV_32F);
Imgproc.distanceTransform(bw, dist, Imgproc.CV_DIST_L2, Imgproc.CV_DIST_MASK_PRECISE);
Mat dibw32f = new Mat(im.size(), CvType.CV_32F);
final double SWTHRESH = 8.0; // stroke width threshold
Imgproc.threshold(dist, dibw32f, SWTHRESH/2.0, 255, Imgproc.THRESH_BINARY);
Mat dibw8u = new Mat(im.size(), CvType.CV_8U);
dibw32f.convertTo(dibw8u, CvType.CV_8U);
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3, 3));
Mat cont = new Mat(im.size(), CvType.CV_8U);
Imgproc.morphologyEx(dibw8u, cont, Imgproc.MORPH_OPEN, kernel);
final double HTHRESH = im.rows() * 0.5;
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
List<Point> digits = new ArrayList<Point>();
Mat hierchy = new Mat();
Imgproc.findContours(cont, contours, hierchy, Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE, new Point(0, 0));
List<Mat>cleanedMatList = new ArrayList<Mat>();
int c = 0;
for (int i = 0; i >= hierchy.cols(); i++) {
Rect rect = Imgproc.boundingRect(contours.get(i));
if (rect.height > HTHRESH) {
Mat binary = new Mat();
Imgproc.rectangle(binary, new Point(rect.x, rect.y), new Point(rect.x + rect.width - 1, rect.y + rect.height - 1), new Scalar(0, 0, 255), 3);
cleanedMatList.add(c, binary);
c++;
}
}
List<MatOfInt> digitsHull = new ArrayList<MatOfInt>();
for(int i=0; i < contours.size(); i++){
digitsHull.add(new MatOfInt());
}
for(int i=0; i < contours.size(); i++){
Imgproc.convexHull(contours.get(i), digitsHull.get(i));
}
List<MatOfPoint> digitRegions = new ArrayList<MatOfPoint>();
for (int i = 0; i< digitRegions.size(); i++) {
MatOfPoint dr = digitRegions.get(i);
dr.push_back(digitsHull.get(i));
}
Mat digitsMask = new Mat(og.rows(),og.cols(), CvType.CV_8U);
Imgproc.drawContours(digitsMask, digitRegions, 0, new Scalar(255, 255, 255), -1);
Imgproc.morphologyEx(digitsMask, digitsMask, Imgproc.MORPH_DILATE, kernel);
Mat cleaned = new Mat(og.rows(), og.cols(), CvType.CV_8U);
dibw8u.copyTo(cleaned, digitsMask);
return HighGui.toBufferedImage(dibw8u);
}

OpenCV Java: Compare Bounding Rect's y value, clear unwanted

In the picture below, the application detected multiple "black" and drawn a bounding rectangle around them. Now I want to compare the rect3.tl().y values of each rectangle and only keep the lowest one, deleting the other bounding rectangles. But I'm not sure how to go about doing that.
Code:
Rect rectBlack = new Rect();
Bitmap roiBitmap = null;
Scalar green = new Scalar(0, 255, 0, 255);
Mat sourceMat = new Mat(sourceBitmap.getWidth(), sourceBitmap.getHeight(), CvType.CV_8UC3);
Utils.bitmapToMat(sourceBitmap, sourceMat);
Mat roiTmp = sourceMat.clone();
bitmapWidth = sourceBitmap.getWidth();
Log.e("bitmapWidth", "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
final Mat hsvMat = new Mat();
sourceMat.copyTo(hsvMat);
// convert mat to HSV format for Core.inRange()
Imgproc.cvtColor(hsvMat, hsvMat, Imgproc.COLOR_RGB2HSV);
Scalar lowerb = new Scalar(85, 50, 40); // lower color border for BLUE
Scalar upperb = new Scalar(135, 255, 255); // upper color border for BLUE
Scalar lowerblack = new Scalar(0, 0, 0); // lower color border for BLACK
Scalar upperblack = new Scalar(180, 255, 40); // upper color border for BLACK
Scalar testRunL = new Scalar(60, 50, 40); // lower Green 83 100 51
Scalar testRunU = new Scalar(90, 255, 255); // upper Green
Core.inRange(hsvMat, lowerblack, upperblack, roiTmp); // select only blue pixels
// find contours
List<MatOfPoint> contours = new ArrayList<>();
List<RotatedRect> boundingRects = new ArrayList<>();
Imgproc.findContours(roiTmp, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// find appropriate bounding rectangles
for (MatOfPoint contour : contours) {
MatOfPoint2f areaPoints = new MatOfPoint2f(contour.toArray());
RotatedRect boundingRect = Imgproc.minAreaRect(areaPoints);
double rectangleArea = boundingRect.size.area();
// test min ROI area in pixels
if (rectangleArea > 1300 ) {
Point rotated_rect_points[] = new Point[4];
boundingRect.points(rotated_rect_points);
Rect rect3 = Imgproc.boundingRect(new MatOfPoint(rotated_rect_points));
// test horizontal ROI orientation
if (rect3.height > rect3.width) {
Log.e("w,h", String.valueOf(rect3.width)+ " h " + String.valueOf(rect3.height));
double w = rect3.width;
double h = rect3.height;
double ratio= h/w;
Log.e("h:w ratio", String.valueOf(ratio));
Log.e("Black Area", String.valueOf(rect3.area()));
Imgproc.rectangle(sourceMat, rect3.tl(), rect3.br(), green, 3);
rectBlack = rect3;
Log.e("blackArea", String.valueOf(rect3.area()));
xBlack = rect3.br().x;
xBlackCenter = (rect3.br().x + rect3.tl().x) / 2;
yBlack = rect3.br().y;//bottom
battHeight = (rect3.br().y - rect3.tl().y); //batt height in pixel
}
}
}
You can create list of rects:
List<Rect> rects = new ArrayList<>();
then in your for (MatOfPoint contour : contours) loop add each founded rectangle to that list:
// find appropriate bounding rectangles
for (MatOfPoint contour : contours) {
...
// test horizontal ROI orientation
if (rect3.height > rect3.width) {
...
rects.add(rect3)
}
}
then use method to find bottom-most rectangle, like that:
public static Rect getBottomMostRect(List<Rect> rects) {
Rect bottomMostRect = null;
if (rects != null && rects.size() >= 1) {
Rect rect;
double minY;
int ixMinY = 0;
rect = rects.get(ixMinY);
minY = rect.tl().y;
for (int ix = 1; ix < rects.size(); ix++) {
rect = rects.get(ix);
if (rect.tl().y < minY) {
minY = rect.tl().y;
ixMinY = ix;
}
}
bottomMostRect = rects.get(ixMinY);
}
return bottomMostRect;
}
and call it this way:
Rect bottomMostRect = getBottomMostRect(rects)
Or add getBottomMostRect() implementation directly into your for (MatOfPoint contour : contours) loop.

Comparing an Image to a background image to find an object using OpenCV with Java

I have a project where I am trying to track a person as they move throughout a room. I am using an arduino, some servo motors and an xbox kinect for my camera.
I have a vision of allowing the project some training time where it can scan the room and make a database of images for the empty room. Then when a person enters the room the program can do a simple difference image to create a white blob for the person. Using this white blob I would be able to calculate the centre of mass for the person and compare it to the centre of the image frame in order to pass a command to the arduino telling it how far and in which direction to move the servo motors. I am using eclipse, writing in java and using opencv 2.4.6.
I am stuck on getting a clear white blob. I have already written my methods to calculate the distance from the centre of mass of the blob and the centre of the frame but without a clearly defined blob this is useless. I have been trying to get my program to work by taking a snap shot of the background of my room, changing the image to binary then subtracting it from a binary image of my room with me in it. This has not worked. Is my vision of training the system then comparing with these trained images valid or should I be going about a different way to detect an object?
I have tried implementing opticalflow() but it seems erratic and not extremely accurate.
Any information on the topic would be extremely helpful. I thank you in advance for reading my question.
-Trent
Edit: I have attached my code. The area in question is the training() and matdiff() methods.
package testingV1;
//OpenCv + OpenNI + Java Libraries
import java.awt.FlowLayout;
import java.util.ArrayList;
import java.util.List;
import java.awt.image.BufferedImage;
import java.awt.image.DataBuffer;
import java.awt.image.DataBufferByte;
import java.io.*;
import java.nio.ByteBuffer;
import javax.imageio.ImageIO;
import javax.swing.*;
import org.opencv.core.*;
import org.opencv.imgproc.*;
import org.opencv.objdetect.CascadeClassifier;
import org.opencv.video.BackgroundSubtractorMOG;
import org.opencv.video.Video;
import org.opencv.highgui.*;
import org.opencv.*;
import org.OpenNI.*;
public class TestV1 {
static int imWidth = 640, imHeight = 480;
static ImageGenerator imageGen;
static Context context;
static int flag = CvType.CV_8UC3;
static int flag2 = CvType.CV_8UC1;
static Mat background;
public static void main(String[] args) throws GeneralException{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
//We create a new "context" of the Kinect
context = new Context();
JFrame canvas = new JFrame("Optical Flow");
//need to create and add license to our "context"
License license = new License("PrimeSense", "0KOIk2JeIBYClPWVnMoRKn5cdY4=");
context.addLicense(license);
//defining the data we are taking from the kinect
MapOutputMode mapMode = null; //initialize it to null
mapMode = new MapOutputMode(imWidth, imHeight, 30); //create a 640x480 30fps feed definition
imageGen = ImageGenerator.create(context); //Rgb camera
imageGen.setMapOutputMode(mapMode); //change our feed to 640x480 30 fps
imageGen.setPixelFormat(PixelFormat.RGB24);///Pixel format, RGB 8-bit 3 channel
context.setGlobalMirror(true); //Mirrors our feed to make it more intuitive
BufferedImage rgbImage = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
BufferedImage prevImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
BufferedImage currImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
BufferedImage diffImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
BufferedImage paintedImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
BufferedImage facesImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
Mat paintedMat = new Mat(imHeight, imWidth, flag);
Mat facesMat = new Mat(imHeight, imWidth, flag);
Mat currMat = new Mat(imHeight, imWidth, flag2);
Mat prevMat = new Mat(imHeight, imWidth, flag2);
Mat diffMat = new Mat(imHeight, imWidth, flag2);
Mat paintedMatg = new Mat(imHeight, imWidth, flag2);
ByteBuffer imageBB;
//First Frame
canvas.getContentPane().setLayout(new FlowLayout());
Icon video = new ImageIcon(rgbImage);
JLabel panel = new JLabel(video);
//Icon video2 = new ImageIcon(paintedImg);
//JLabel panel2 = new JLabel(video2);
//Icon video3 = new ImageIcon(facesImg);
//JLabel panel3 = new JLabel(video3);
Icon video4 = new ImageIcon(diffImg);
JLabel panel4 = new JLabel(video4);
canvas.getContentPane().add(panel);
//canvas.getContentPane().add(panel2);
//canvas.getContentPane().add(panel3);
canvas.getContentPane().add(panel4);
canvas.pack();
canvas.setVisible(true);
canvas.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
CascadeClassifier faceDetectorAlg = new CascadeClassifier("C:/Users/Trent/Desktop/Capstone"
+ "/ComputerVisionCode/November16/testingV1/src/testingV1/haarcascade_frontalface_alt.xml");
boolean firstTime = true;
imageGen.startGenerating();
while(true){
context.waitOneUpdateAll(imageGen);
imageBB = imageGen.getImageMap().createByteBuffer(); //get KinectData
rgbImage = bufToImage(imageBB); //take data from kinect and put in BufferedImage
prevMat = currMat;
currMat = img2Mat(rgb2Gray(rgbImage));
if(firstTime){
training(rgbImage);
firstTime = false;
}
else{
diffMat = findDiff(currMat);
diffImg = mat2Img(diffMat);
}
//optical flow - inaccurate
//paintedMatg = opticalFlow(img2Mat(prevImg), img2Mat(currImg), 300, 0.01, 10);
//Imgproc.cvtColor(paintedMatg, paintedMat, Imgproc.COLOR_GRAY2RGB); //change from gray to color
//paintedImg = mat2Img(paintedMat);
//face detection - extremely resource intensive
//facesMat = faceDetector(img2Mat(rgbImage), faceDetectorAlg);
//facesImg = mat2Img(facesMat);
panel.setIcon(new ImageIcon(rgbImage));
//panel2.setIcon(new ImageIcon(paintedImg));
//panel3.setIcon(new ImageIcon(facesImg));
panel4.setIcon(new ImageIcon(diffImg));
canvas.repaint();
canvas.revalidate();
}
}
//establishes a background for better diff images
private static void training(BufferedImage in){
background = new Mat(imHeight, imWidth, flag2);
background = img2Mat(rgb2Gray(in));
System.out.println("Training Complete");
}
private static Mat findDiff(Mat in){
Mat output = new Mat(imHeight, imWidth, flag2);
Core.absdiff(background, in, output);
Imgproc.threshold(output, output, 20, 255, Imgproc.THRESH_BINARY);
return output;
}
//Face Detection
private static Mat faceDetector(Mat in, CascadeClassifier Alg){
Mat output = in;
MatOfRect faceDetections = new MatOfRect();
if(Alg.empty()){
System.out.println("didnt load");
return output;
}
Alg.detectMultiScale(in, faceDetections);
for(Rect rect : faceDetections.toArray()){
Core.rectangle(output, new Point(rect.x, rect.y),
new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 255, 0), 2);
}
return output;
}
//Returns an image with vectors painted to show movement.
private static Mat opticalFlow(Mat curr, Mat prev, int maxDetectionCount, double qualityLevel, double minDistance){
List<MatOfPoint2f> trackedPoints = new ArrayList<MatOfPoint2f>();
MatOfPoint initial = new MatOfPoint();
MatOfFloat err = new MatOfFloat();
MatOfByte status = new MatOfByte();
MatOfPoint2f initial2f = new MatOfPoint2f();
MatOfPoint2f next2f = new MatOfPoint2f();
double[] temp;
Point p1 = new Point();
Point p2 = new Point();
Mat output = new Mat(imHeight, imWidth, flag);
Scalar red = new Scalar(255, 0, 0);
//Finds Tracking points
if(trackedPoints.size() < 1){
Imgproc.goodFeaturesToTrack(curr, initial, maxDetectionCount, qualityLevel, minDistance);
initial.convertTo(initial2f, CvType.CV_32FC2);
trackedPoints.add(initial2f);
}
//catches first time frame
if(prev.empty())
curr.copyTo(prev);
//find points in current image
if(trackedPoints.get(0).total() > 0){
Video.calcOpticalFlowPyrLK(prev, curr, trackedPoints.get(0), next2f, status, err);
trackedPoints.add(next2f);
}
output = curr;
//draw red lines
for(int i = 0; i < trackedPoints.get(0).cols(); i++){
for(int j = 0; j < trackedPoints.get(0).rows(); j++){
temp = trackedPoints.get(0).get(j, i);
p1.set(temp);
temp = trackedPoints.get(1).get(j, i);
p2.set(temp);
Core.line(output, p1, p2, red);
}
}
return output;
}
//Returns a vector to indicate how the magnitude of movement.
private static double[] opticalFlowAnalysis(Mat curr, Mat prev, int maxDetectionCount, double qualityLevel, double minDistance){
List<MatOfPoint2f> trackedPoints = new ArrayList<MatOfPoint2f>();
MatOfPoint initial = new MatOfPoint();
MatOfFloat err = new MatOfFloat();
MatOfByte status = new MatOfByte();
MatOfPoint2f initial2f = new MatOfPoint2f();
MatOfPoint2f next2f = new MatOfPoint2f();
double[] total = new double[2];
total[0] = 0;
total[1] = 0;
double[] point1;
double[] point2;
double[] output = new double[2];
//Finds Tracking points
if(trackedPoints.size() < 1){
Imgproc.goodFeaturesToTrack(curr, initial, maxDetectionCount, qualityLevel, minDistance);
initial.convertTo(initial2f, CvType.CV_32FC2);
trackedPoints.add(initial2f);
}
//catches first time frame
if(prev.empty())
curr.copyTo(prev);
//find points in current image
if(trackedPoints.get(0).total() > 0){
Video.calcOpticalFlowPyrLK(prev, curr, trackedPoints.get(0), next2f, status, err);
trackedPoints.add(next2f);
}
//average the distance moved
// (-) signifies distance moved right and down
// (+) signifies distance moved left and up
for(int i = 0; i < trackedPoints.get(0).cols(); i++){
for(int j = 0; j < trackedPoints.get(0).rows(); j++){
point1 = trackedPoints.get(0).get(j, i);
point2 = trackedPoints.get(1).get(j, i);
total[0] += point1[0] - point2[0];
total[1] += point1[1] - point2[0];
}
}
output[0] = total[0] / trackedPoints.get(0).cols();
output[1] = total[1] / trackedPoints.get(0).rows();
return output;
}
private static Mat img2Mat(BufferedImage in){
Mat out;
byte[] data;
int r, g, b;
if(in.getType() == BufferedImage.TYPE_INT_RGB){
out = new Mat(imHeight, imWidth, flag);
data = new byte[imWidth * imHeight * (int)out.elemSize()];
int[] dataBuff = in.getRGB(0, 0, imWidth, imHeight, null, 0, imWidth);
for(int i = 0; i < dataBuff.length; i++){
data[i*3] = (byte) ((dataBuff[i] >> 16) & 0xFF);
data[i*3 + 1] = (byte) ((dataBuff[i] >> 8) & 0xFF);
data[i*3 + 2] = (byte) ((dataBuff[i] >> 0) & 0xFF);
}
}
else{
out = new Mat(imHeight, imWidth, flag2);
data = new byte[imWidth * imHeight * (int)out.elemSize()];
int[] dataBuff = in.getRGB(0, 0, imWidth, imHeight, null, 0, imWidth);
for(int i = 0; i < dataBuff.length; i++){
r = (byte) ((dataBuff[i] >> 16) & 0xFF);
g = (byte) ((dataBuff[i] >> 8) & 0xFF);
b = (byte) ((dataBuff[i] >> 0) & 0xFF);
data[i] = (byte)((0.21 * r) + (0.71 * g) + (0.07 * b)); //luminosity
}
}
out.put(0, 0, data);
return out;
}
private static BufferedImage mat2Img(Mat in){
BufferedImage out;
byte[] data = new byte[imWidth * imHeight * (int)in.elemSize()];
int type;
in.get(0, 0, data);
if(in.channels() == 1)
type = BufferedImage.TYPE_BYTE_GRAY;
else
type = BufferedImage.TYPE_3BYTE_BGR;
out = new BufferedImage(imWidth, imHeight, type);
out.getRaster().setDataElements(0, 0, imWidth, imHeight, data);
return out;
}
private static BufferedImage rgb2Gray(BufferedImage in){
BufferedImage out = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
Mat color = new Mat(imHeight, imWidth, flag);
Mat gray = new Mat(imHeight, imWidth, flag);
color = img2Mat(in); //converting bufferedImage to Mat
Imgproc.cvtColor(color, gray, Imgproc.COLOR_RGB2GRAY); //change from color to grayscale
out = mat2Img(gray); //converting Mat to bufferedImage
return out;
}
//Converts bytebuffer to buffered image
private static BufferedImage bufToImage(ByteBuffer pixelsRGB){
int[] pixelInts = new int[imWidth * imHeight];
int rowStart = 0;
int bbIdx; //index to ByteBuffer
int i = 0; //index to pixels
int rowLen = imWidth * 3;
for (int row = 0; row < imHeight; row++){
bbIdx = rowStart;
for(int col = 0; col < imWidth; col++){
int pixR = pixelsRGB.get(bbIdx++);
int pixG = pixelsRGB.get(bbIdx++);
int pixB = pixelsRGB.get(bbIdx++);
pixelInts[i++] = 0xFF000000 | ((pixR & 0xFF) << 16) | ((pixG & 0xFF) << 8) | (pixB & 0xFF);
}
rowStart += rowLen; //Move to next row
}
BufferedImage im = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
im.setRGB(0, 0, imWidth, imHeight, pixelInts, 0, imWidth);
return im;
}
}
Answer to the question is bit late but may help for future references.
I think to learn about object detection you can look here and his code here (I learn from it to do my project). And then I made my project like this based on his object detection. Or you can look for a simple background subtraction here

Categories

Resources