OpenCV detecting drilled holes - java

I'm working on a project where I have to detect drilled holes on a surface. (the top two holes and there for orientation purposes only)
After detecting the holes the pattern will judge the placement of the holes and give results. I have created an overlay grid layout and placed it over the camera2api preview so the user can align the holes and scan (The real testing will not be of a picture from the LCD as shown in the screenshot)
Currently, I'm cropping the image based on the grid and resizing it to 1920x2560 to have a consistent frame for pattern judgement, which makes a single grid of roughly about 300px. I am unable to detect the blobs can someone suggest what sort of filtering I should choose for this work and if there is a better approach for doing this rather than using a grid layout as the placement of the holes in regard to the orientation holes matter for final results (both x and y axis)
Here is my code:
Mat srcMat = resizeAndCropMatToGrid(mats[0]);
if (srcMat == null) {
exception = new Exception("Cropping Failed");
errorMessage = "Unable to crop image based on grid";
return null;
}
matProgressTask = srcMat;
Mat processedMat = new Mat();
Imgproc.cvtColor(srcMat, processedMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(processedMat, processedMat, new org.opencv.core.Size(5, 5), 5);
Imgproc.threshold(processedMat, processedMat, 115, 255, Imgproc.THRESH_BINARY);
matProgressTask = processedMat;
FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SIMPLEBLOB);
featureDetector.read(Environment.getExternalStorageDirectory() + "/Android/blob.xml");
MatOfKeyPoint matOfKeyPoint = new MatOfKeyPoint();
featureDetector.detect(processedMat, matOfKeyPoint);
KeyPoint[] keyPointsArray = matOfKeyPoint.toArray();
Log.e("keypoints", "" + Arrays.toString(keyPointsArray));
if (keyPointsArray.length < 1) {
exception = new Exception("Blobs Missing");
errorMessage = "Error: Unable to filter blobs";
} else {
try {
MatOfKeyPoint matOfKeyPointFilteredBlobs = new MatOfKeyPoint(keyPointsArray);
Features2d.drawKeypoints(srcMat, matOfKeyPointFilteredBlobs, srcMat, new Scalar(255, 0, 0), Features2d.DRAW_OVER_OUTIMG);
} catch (Exception e) {
e.printStackTrace();
exception = e;
errorMessage = "Error: Unable to draw Blobs";
return null;
}
matProgressTask = srcMat;
onProgressUpdate();
patterData = pinpointBlobsToGetData(keyPointsArray);
if (patterData == null) {
exception = new Exception("Unable to establish pattern");
errorMessage = "Error: Key points array is null";
}
}
And here is the blobby file configuration that I'm using:
<?xml version="1.0"?>
<opencv_storage>
<format>3</format>
<thresholdStep>10.</thresholdStep>
<minThreshold>50.</minThreshold>
<maxThreshold>120.</maxThreshold>
<minRepeatability>2</minRepeatability>
<minDistBetweenBlobs>20.</minDistBetweenBlobs>
<filterByColor>1</filterByColor>
<blobColor>0</blobColor>
<filterByArea>1</filterByArea>
<minArea>2300.</minArea>
<maxArea>4500.</maxArea>
<filterByCircularity>1</filterByCircularity>
<minCircularity>0.2</minCircularity>
<maxCircularity>1.0</maxCircularity>
<filterByInertia>1</filterByInertia>
<minInertiaRatio>0.2</minInertiaRatio>
<maxInertiaRatio>1.0</maxInertiaRatio>
<filterByConvexity>1</filterByConvexity>
<minConvexity>0.2</minConvexity>
<maxConvexity>1.0</maxConvexity>
</opencv_storage>

I am using Python.
For the second image you provided I successfully detected the holes...
...using this code...
import cv2
import numpy as np
img = cv2.imread("C:\\Users\\Link\\Desktop\\2.jpg")
# cv2.imshow("original", img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# cv2.imshow("gray", gray)
blur = cv2.medianBlur(gray, 31)
# cv2.imshow("blur", blur)
ret, thresh = cv2.threshold(blur, 127, 255, cv2.THRESH_OTSU)
# cv2.imshow("thresh", thresh)
canny = cv2.Canny(thresh, 75, 200)
# cv2.imshow('canny', canny)
im2, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contour_list = []
for contour in contours:
approx = cv2.approxPolyDP(contour, 0.01 * cv2.arcLength(contour, True), True)
area = cv2.contourArea(contour)
if 5000 < area < 15000:
contour_list.append(contour)
msg = "Total holes: {}".format(len(approx)//2)
cv2.putText(img, msg, (20, 40), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2, cv2.LINE_AA)
cv2.drawContours(img, contour_list, -1, (0, 255, 0), 2)
cv2.imshow('Objects Detected', img)
cv2.imwrite("detected_holes.png", img)
cv2.waitKey(0)
Now, the first is a bit different. The same code will not work in detecting the right amount of holes. The program keep detecting also what is clearly not a hole (crack in left bottom angle..) with missing some main holes.
Here is an example of what I am talking about:
Not only the counter in that case is wrong but also, the main problem, is that the hole at right bottom can't be detected.

So, I have managed to figure it by passing the mat directly to FeatureDetector class without any prior processing...
Mat srcMat = mats[0];
if (srcMat == null) {
exception = new Exception("Cropping Failed");
errorMessage = "Unable to crop image based on grid";
return null;
}
matProgressTask = srcMat;
FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SIMPLEBLOB);
featureDetector.read(Environment.getExternalStorageDirectory() + "/Android/blob.xml");
Log.e("LoadingBlob", "wqfqfwq");
MatOfKeyPoint matOfKeyPoint = new MatOfKeyPoint();
featureDetector.detect(srcMat, matOfKeyPoint);
KeyPoint[] keyPointsArray = matOfKeyPoint.toArray();
Log.e("keypoints", "" + Arrays.toString(keyPointsArray));
if (keyPointsArray.length < 1) {
exception = new Exception("Blobs Missing");
errorMessage = "Error: Unable to filter blobs";
} else {
try {
MatOfKeyPoint matOfKeyPointFilteredBlobs = new MatOfKeyPoint(keyPointsArray);
Features2d.drawKeypoints(srcMat, matOfKeyPointFilteredBlobs, srcMat, new Scalar(0, 255, 0), Features2d.DRAW_OVER_OUTIMG);
} catch (Exception e) {
e.printStackTrace();
exception = e;
errorMessage = "Error: Unable to draw Blobs";
return null;
}
matProgressTask = srcMat;
onProgressUpdate();
patterData = pinpointBlobsToGetData(keyPointsArray);
if (patterData == null) {
exception = new Exception("Unable to establish pattern");
errorMessage = "Error: Key points array is null";
}
}
And my feature detector parameters file is:
<?xml version="1.0"?>
<opencv_storage>
<format>3</format>
<thresholdStep>10.</thresholdStep>
<minThreshold>50.</minThreshold>
<maxThreshold>120.</maxThreshold>
<minRepeatability>2</minRepeatability>
<minDistBetweenBlobs>20.</minDistBetweenBlobs>
<filterByColor>0</filterByColor>
<blobColor>0</blobColor>
<filterByArea>1</filterByArea>
<minArea>3000.</minArea>
<maxArea>10000.</maxArea>
<filterByCircularity>1</filterByCircularity>
<minCircularity>0.3</minCircularity>
<maxCircularity>1.0</maxCircularity>
<filterByInertia>1</filterByInertia>
<minInertiaRatio>0.3</minInertiaRatio>
<maxInertiaRatio>1.0</maxInertiaRatio>
<filterByConvexity>1</filterByConvexity>
<minConvexity>0.3</minConvexity>
<maxConvexity>1.0</maxConvexity>
</opencv_storage>
The result images:

Related

Java OpenCV - Using knnMatch with findHomography shows duplicates

I am new to OpenCV java and I have an android app that will match two images using ORB FeatureDetector and DescriptorExtractor. I use DescriptorMatcher BRUTEFORCE_HAMMING. All the time the matcher works but other times it shows duplicates of Keypoints. When Image on the Scene is too bright or too dark, it shows duplicate key points which is not what I wanted.
The image that accurately matches:
The image that is bad matches:
try {
bmpObjToRecognize = bmpObjToRecognize.copy(Bitmap.Config.ARGB_8888, true);
bmpScene = bmpScene.copy(Bitmap.Config.ARGB_8888, true);
img1 = new Mat();
img2 = new Mat();
Utils.bitmapToMat(bmpObjToRecognize, img1);
Utils.bitmapToMat(bmpScene, img2);
Imgproc.cvtColor(img1, img1, Imgproc.COLOR_RGBA2GRAY);
Imgproc.cvtColor(img2, img2, Imgproc.COLOR_RGBA2GRAY);
Imgproc.equalizeHist(img1, img1);
Imgproc.equalizeHist(img2, img2);
detector = FeatureDetector.create(FeatureDetector.ORB);
descExtractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
keypoints1 = new MatOfKeyPoint();
keypoints2 = new MatOfKeyPoint();
descriptors = new Mat();
dupDescriptors = new Mat();
detector.detect(img1, keypoints1);
Log.d("LOG!", "number of query Keypoints= " + keypoints1.size());
detector.detect(img2, keypoints2);
Log.d("LOG!", "number of dup Keypoints= " + keypoints2.size());
// Descript keypoints1
descExtractor.compute(img1, keypoints1, descriptors);
descExtractor.compute(img2, keypoints2, dupDescriptors);
// matching descriptors
List<MatOfDMatch> knnMatches = new ArrayList<>();
matcher.knnMatch(descriptors, dupDescriptors, knnMatches, DescriptorMatcher.BRUTEFORCE);
goodMatches = new ArrayList<>();
knnMatchesValue = knnMatches.size();
Log.i("xxx", "xxx match count knnMatches = " + knnMatches.size());
for (int i = 0; i < knnMatches.size(); i++) {
if (knnMatches.get(i).rows() > 1) {
DMatch[] matches = knnMatches.get(i).toArray();
if (matches[0].distance < 0.89f * matches[1].distance) {
goodMatches.add(matches[0]);
}
}
}
// get keypoint coordinates of good matches to find homography and remove outliers using ransac
List<Point> pts1 = new ArrayList<>();
List<Point> pts2 = new ArrayList<>();
for (int i = 0; i < goodMatches.size(); i++) {
Point destinationPoint = keypoints2.toList().get(goodMatches.get(i).trainIdx).pt;
pts1.add(keypoints1.toList().get(goodMatches.get(i).queryIdx).pt);
pts2.add(destinationPoint);
}
// convertion of data types - there is maybe a more beautiful way
Mat outputMask = new Mat();
MatOfPoint2f pts1Mat = new MatOfPoint2f();
pts1Mat.fromList(pts1);
MatOfPoint2f pts2Mat = new MatOfPoint2f();
pts2Mat.fromList(pts2);
// Find homography - here just used to perform match filtering with RANSAC, but could be used to e.g. stitch images
// the smaller the allowed reprojection error (here 15), the more matches are filtered
Mat Homog = Calib3d.findHomography(pts1Mat, pts2Mat, Calib3d.RANSAC, 15, outputMask, 2000, 0.995);
// outputMask contains zeros and ones indicating which matches are filtered
better_matches = new LinkedList<>();
for (int i = 0; i < goodMatches.size(); i++) {
if (outputMask.get(i, 0)[0] != 0.0) {
better_matches.add(goodMatches.get(i));
}
}
matches_final_mat = new MatOfDMatch();
matches_final_mat.fromList(better_matches);
imgOutputMat = new Mat();
MatOfByte drawnMatches = new MatOfByte();
Features2d.drawMatches(img1, keypoints1, img2, keypoints2, matches_final_mat,
imgOutputMat, GREEN, RED, drawnMatches, Features2d.NOT_DRAW_SINGLE_POINTS);
bmp = Bitmap.createBitmap(imgOutputMat.cols(), imgOutputMat.rows(), Bitmap.Config.ARGB_8888);
Imgproc.cvtColor(imgOutputMat, imgOutputMat, Imgproc.COLOR_BGR2RGB);
Utils.matToBitmap(imgOutputMat, bmp);
List<DMatch> betterMatchesList = matches_final_mat.toList();
final int matchesFound = betterMatchesList.size();
} catch (Exception e) {
e.printStackTrace();
}
Is there a part of the code that I am missing?
TL;DR Use the class BFMatcher and its create method explicitly then your are able set the crosscheck flag to true. This will enable your wanted "vice versa check".
To cite the OpenCV documentation of knnMatch and its header:
Finds the k best matches for each descriptor from a query set.
knnMatch(InputArray queryDescriptors, InputArray trainDescriptors, ...)
So this means that it is possible that more than one of the "query descriptors" match to the same descriptor in the "training set". It just gives you the k best and if there are more query descriptors than training descriptors you will inevitably get duplicates. Especially, when you almost have no features and therefore descriptors in the training image/set (due to the lack of any texture e.g. your black input), that will be the case.
If you want to get rid of your duplicates, set the "crosscheck" flag of the BFMatcher to true. Otherwise (i.e. other matcher) you would need to go trough your matches "group" them by the respective training descriptors and remove all but the one with the smallest distance.

OpenCV on Android: net.forward yields "215 Assertion failed"

Following this tutorial from openCV, and it should be straight forward. However, it crashes with an assertion fail on the net.forward, that I cannot resolve/find anywhere else.
Thought this problem seemed similar and tried to go through the fix/problem finding. However, restarting the discussion and trials showed it is likely not the same. I used initially 3.4.3, which did not support the same Mat type somehow. Updated to 3.4.7 now, and can confirm the blob size is okay (generated from image). Tried also various other prototxt and caffemodels, but doubt by now that the problem lies there (works if the files are okay, otherwise the net loading fails). The key code should be this:
// Load a network.
public void onCameraViewStarted(int width, int height) {
String proto = getPath("deploy.prototxt", this);
String weights = getPath("MobileNetSSD_deploy.caffemodel", this);
net = Dnn.readNetFromCaffe(proto, weights);
Log.i(TAG, "Network loaded successfully");
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
// Get a new frame
Mat frame = inputFrame.rgba();
Imgproc.cvtColor(frame, frame, Imgproc.COLOR_RGBA2RGB);
// Forward image through network.
Mat blob = Dnn.blobFromImage(frame, 0.007843,
new Size(300, 300),
new Scalar(127.5, 127.5, 127.5));
net.setInput(blob);
Mat detections = net.forward(); //***215 ASSERTION FAILED occurs***
int cols = frame.cols();
int rows = frame.rows();
detections = detections.reshape(1, (int)detections.total() / 7);
for (int i = 0; i < detections.rows(); ++i) {
double confidence = detections.get(i, 2)[0];
if (confidence > 0.2) {
int classId = (int)detections.get(i, 1)[0];
int left = (int)(detections.get(i, 3)[0] * cols);
int top = (int)(detections.get(i, 4)[0] * rows);
int right = (int)(detections.get(i, 5)[0] * cols);
int bottom = (int)(detections.get(i, 6)[0] * rows);
// Draw rectangle around detected object.
Imgproc.rectangle(frame, new Point(left, top), new Point(right, bottom),
new Scalar(0, 255, 0));
String label = classNames[classId] + ": " + confidence;
int[] baseLine = new int[1];
Size labelSize = Imgproc.getTextSize(label, Core.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);
// Draw background for label.
Imgproc.rectangle(frame, new Point(left, top - labelSize.height),
new Point(left + labelSize.width, top + baseLine[0]),
new Scalar(255, 255, 255), Core.FILLED);
// Write class name and confidence.
Imgproc.putText(frame, label, new Point(left, top),
Core.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0));
}
}
return frame;
}
public void onCameraViewStopped() {}
// Upload file to storage and return a path.
private static String getPath(String file, Context context) {
AssetManager assetManager = context.getAssets();
BufferedInputStream inputStream = null;
try {
// Read data from assets.
inputStream = new BufferedInputStream(assetManager.open(file));
byte[] data = new byte[inputStream.available()];
inputStream.read(data);
inputStream.close();
// Create copy file in storage.
File outFile = new File(context.getFilesDir(), file);
FileOutputStream os = new FileOutputStream(outFile);
os.write(data);
os.close();
// Return a path to file which may be read in common way.
return outFile.getAbsolutePath();
} catch (IOException ex) {
Log.i(TAG, "Failed to upload a file");
}
return "";
}
The full error message is
cv::Exception: OpenCV(3.4.7) /build/3_4_pack-android/opencv/modules/dnn/src/layers/batch_norm_layer.cpp:39: error: (-215:Assertion failed) blobs.size() >= 2 in function 'cv::dnn::BatchNormLayerImpl::BatchNormLayerImpl(const cv::dnn::experimental_dnn_34_v13::LayerParams&)'
I expect it to not crash. The frame should be okay (image loaded), the net is not empty, and the layers in the net seem fine too (checked since there are some differences using caffe in java). Any help is appreciated!
After some days of research in different directions, I found the problem: the frame format should be BGR, not RGB! That means
Imgproc.cvtColor(frame, frame, Imgproc.COLOR_RGBA2BGR);

Java char recognition with OpenCV

i am trying to build an application that with a camera can recognize some numbers and letters from a board in front of the camera.
At moment i can detect faces, counters but i want to use ROI and Tess4j to recognize this live video.
Do you know any example of something like this?
My idea is that i have to analyze frame by frame and when i detect a char like '*' i make the full verification of the values
public class Demo {
public static void main(String[] args) throws Exception {
String classifierName = null;
if (args.length > 0) {
classifierName = args[0];
} else {
URL url = new URL("https://raw.github.com/Itseez/opencv/2.4.0/data/haarcascades/haarcascade_frontalface_alt.xml");
File file = Loader.extractResource(url, null, "classifier", ".xml");
file.deleteOnExit();
classifierName = file.getAbsolutePath();
}
// Preload the opencv_objdetect module to work around a known bug.
Loader.load(opencv_objdetect.class);
// We can "cast" Pointer objects by instantiating a new object of the desired class.
CvHaarClassifierCascade classifier = new CvHaarClassifierCascade(cvLoad(classifierName));
if (classifier.isNull()) {
System.err.println("Error loading classifier file \"" + classifierName + "\".");
System.exit(1);
}
// The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio),
// DC1394FrameGrabber, FlyCaptureFrameGrabber, OpenKinectFrameGrabber, OpenKinect2FrameGrabber,
// RealSenseFrameGrabber, PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber.
FrameGrabber grabber = FrameGrabber.createDefault(0);
grabber.start();
// CanvasFrame, FrameGrabber, and FrameRecorder use Frame objects to communicate image data.
// We need a FrameConverter to interface with other APIs (Android, Java 2D, or OpenCV).
OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
// FAQ about IplImage and Mat objects from OpenCV:
// - For custom raw processing of data, createBuffer() returns an NIO direct
// buffer wrapped around the memory pointed by imageData, and under Android we can
// also use that Buffer with Bitmap.copyPixelsFromBuffer() and copyPixelsToBuffer().
// - To get a BufferedImage from an IplImage, or vice versa, we can chain calls to
// Java2DFrameConverter and OpenCVFrameConverter, one after the other.
// - Java2DFrameConverter also has static copy() methods that we can use to transfer
// data more directly between BufferedImage and IplImage or Mat via Frame objects.
IplImage grabbedImage = converter.convert(grabber.grab());
int width = grabbedImage.width();
int height = grabbedImage.height();
IplImage grayImage = IplImage.create(width, height, IPL_DEPTH_8U, 1);
IplImage rotatedImage = grabbedImage.clone();
// Objects allocated with a create*() or clone() factory method are automatically released
// by the garbage collector, but may still be explicitly released by calling release().
// You shall NOT call cvReleaseImage(), cvReleaseMemStorage(), etc. on objects allocated this way.
CvMemStorage storage = CvMemStorage.create();
// The OpenCVFrameRecorder class simply uses the CvVideoWriter of opencv_videoio,
// but FFmpegFrameRecorder also exists as a more versatile alternative.
FrameRecorder recorder = FrameRecorder.createDefault("output.avi", width, height);
recorder.start();
// CanvasFrame is a JFrame containing a Canvas component, which is hardware accelerated.
// It can also switch into full-screen mode when called with a screenNumber.
// We should also specify the relative monitor/camera response for proper gamma correction.
CanvasFrame frame = new CanvasFrame("Some Title", CanvasFrame.getDefaultGamma()/grabber.getGamma());
// Let's create some random 3D rotation...
CvMat randomR = CvMat.create(3, 3), randomAxis = CvMat.create(3, 1);
// We can easily and efficiently access the elements of matrices and images
// through an Indexer object with the set of get() and put() methods.
DoubleIndexer Ridx = randomR.createIndexer(), axisIdx = randomAxis.createIndexer();
axisIdx.put(0, (Math.random()-0.5)/4, (Math.random()-0.5)/4, (Math.random()-0.5)/4);
cvRodrigues2(randomAxis, randomR, null);
double f = (width + height)/2.0; Ridx.put(0, 2, Ridx.get(0, 2)*f);
Ridx.put(1, 2, Ridx.get(1, 2)*f);
Ridx.put(2, 0, Ridx.get(2, 0)/f); Ridx.put(2, 1, Ridx.get(2, 1)/f);
System.out.println(Ridx);
// We can allocate native arrays using constructors taking an integer as argument.
CvPoint hatPoints = new CvPoint(3);
while (frame.isVisible() && (grabbedImage = converter.convert(grabber.grab())) != null) {
cvClearMemStorage(storage);
// Let's try to detect some faces! but we need a grayscale image...
cvCvtColor(grabbedImage, grayImage, CV_BGR2GRAY);
CvSeq faces = cvHaarDetectObjects(grayImage, classifier, storage,
1.1, 3, CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH);
int total = faces.total();
for (int i = 0; i < total; i++) {
CvRect r = new CvRect(cvGetSeqElem(faces, i));
int x = r.x(), y = r.y(), w = r.width(), h = r.height();
cvRectangle(grabbedImage, cvPoint(x, y), cvPoint(x+w, y+h), CvScalar.RED, 1, CV_AA, 0);
// To access or pass as argument the elements of a native array, call position() before.
hatPoints.position(0).x(x-w/10) .y(y-h/10);
hatPoints.position(1).x(x+w*11/10).y(y-h/10);
hatPoints.position(2).x(x+w/2) .y(y-h/2);
cvFillConvexPoly(grabbedImage, hatPoints.position(0), 3, CvScalar.GREEN, CV_AA, 0);
}
// Let's find some contours! but first some thresholding...
cvThreshold(grayImage, grayImage, 64, 255, CV_THRESH_BINARY);
// To check if an output argument is null we may call either isNull() or equals(null).
CvSeq contour = new CvSeq(null);
cvFindContours(grayImage, storage, contour, Loader.sizeof(CvContour.class),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
while (contour != null && !contour.isNull()) {
if (contour.elem_size() > 0) {
CvSeq points = cvApproxPoly(contour, Loader.sizeof(CvContour.class),
storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour)*0.02, 0);
cvDrawContours(grabbedImage, points, CvScalar.BLUE, CvScalar.BLUE, -1, 1, CV_AA);
}
contour = contour.h_next();
}
//TESS4J - se ahou os contornos, analisar conteúdo
if(contour != null && !contour.isNull()) {
IplImageToBufferedImage(grayImage);
ITesseract instance = new Tesseract(); // JNA Interface Mapping
instance.setTessVariable("tessedit_char_whitelist", "0123456789");
try {
String result = instance.doOCR(IplImageToBufferedImage(grayImage));
System.out.println(result);
} catch (TesseractException e) {
System.err.println(e.getMessage());
}
}
cvWarpPerspective(grabbedImage, rotatedImage, randomR);
Frame rotatedFrame = converter.convert(rotatedImage);
frame.showImage(rotatedFrame);
recorder.record(rotatedFrame);
}
frame.dispose();
recorder.stop();
grabber.stop();
}
public static BufferedImage IplImageToBufferedImage(IplImage src) {
OpenCVFrameConverter.ToIplImage grabberConverter = new OpenCVFrameConverter.ToIplImage();
Java2DFrameConverter paintConverter = new Java2DFrameConverter();
Frame frame = grabberConverter.convert(src);
return paintConverter.getBufferedImage(frame,1);
}
}

Why does my program terminate after the first frame of the video?

I'm currently working on a program which takes the video from a webcam as input and then detects movement within this video, drawing lines around objects to show where they've moved to and from.
However, when I run this program, all it does it display one still image from my webcam. I have a pretty good idea why this is happening - the if-statement if (!(matFrame.empty())) is being evaluated as false, so the else statement runs, changing keepProcessing to false. This then terminates the while-loop, leaving nothing but ims.showImage(matFrame); as an output.
I can't find why this might be happening though, so I was hoping someone here might be able to help me. I've posted the code below so you can check for problems. I've also tried running it with a video to make sure this wasn't the fault of my webcam, and I found the same problem. Thanks for your time.
public class CaptureVideo {
public static void main(String[] args) throws InterruptedException {
// load the Core OpenCV library by name
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// create video capture device object
VideoCapture cap = new VideoCapture();
// try to use the hardware device if present
int CAM_TO_USE = 0;
// create a new image object
Mat matFrame = new Mat();
Mat previousFrame = new Mat();
Mat diffFrame = new Mat();
// try to open first capture device (0)
try {
cap.open(CAM_TO_USE);
} catch (Exception e1) {
System.out.println("No webcam attached");
// otherwise try opening a video file
try{
cap.open("files/video.mp4");
} catch (Exception e2) {
System.out.println("No video file found");
}
}
// if the a video capture source is now open
if (cap.isOpened())
{
// create a new window object
Imshow ims = new Imshow("From video source ... ");
boolean keepProcessing = true;
// add a flag to check whether the first frame has been read
boolean firstFrame = true;
while (keepProcessing)
{
// save previous frame before getting next one, but
// only do this if the first frame has passed
if (!firstFrame)
previousFrame = matFrame.clone();
// grab the next frame from video source
cap.grab();
// decode and return the grabbed video frame
cap.retrieve(matFrame);
// if the frame is valid (not end of video for example)
if (!(matFrame.empty()))
{
// if we are on the first frame, only show that and
// set the flag to false
if (firstFrame) {
ims.showImage(matFrame);
firstFrame = false;
}
// now show absolute difference after first frame
else {
Core.absdiff(matFrame, previousFrame, diffFrame);
ims.showImage(diffFrame);
}
// now convert it to grey and threshold it
Mat grey = new Mat();
Imgproc.cvtColor(diffFrame, grey, Imgproc.COLOR_BGR2GRAY);
Imgproc.adaptiveThreshold(grey, diffFrame, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C,
Imgproc.THRESH_BINARY_INV, 7, 10);
// now clean it up using some morphological operations
Size ksize = new Size(15,15);
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_ELLIPSE, ksize);
Imgproc.morphologyEx(diffFrame, diffFrame, Imgproc.MORPH_CLOSE, kernel);
// find the all the contours from the binary image using the edge to contour
// stuff we looked at in lectures
List<MatOfPoint> contours = new Vector<MatOfPoint>();
Imgproc.findContours(diffFrame, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// draw the contours on image 2 in red
Imgproc.drawContours(matFrame, contours, -1, new Scalar(0,0,255));
// find the largest contour by area
double maxArea = 0;
int maxAreaIndex = 0;
for (int i = 0; i < contours.size(); i++) {
double area = Imgproc.contourArea(contours.get(i), false);
if ( area > maxArea )
{
maxArea = area;
maxAreaIndex = i;
}
}
// draw the largest contour in red
Imgproc.drawContours(matFrame, contours, maxAreaIndex, new Scalar(0,255,0));
// create a new window objects
Imshow ims_diff = new Imshow("Difference");
// display images
ims_diff.showImage(diffFrame);
// display image with a delay of 40ms (i.e. 1000 ms / 25 = 25 fps)
Thread.sleep(40);
} else {
keepProcessing = false;
}
}
}
}
}
You should be seeing an exception on your console or output window:
OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cv::cvtColor, file ..\..\..\..\opencv\modules\imgproc\src\color.cpp, line 3739
Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: ..\..\..\..\opencv\modules\imgproc\src\color.cpp:3739: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor
]
at org.opencv.imgproc.Imgproc.cvtColor_1(Native Method)
at org.opencv.imgproc.Imgproc.cvtColor(Imgproc.java:4598)
at CaptureVideo.main(CaptureVideo.java:87)
Which references line 87 (in my source file) which is:
Imgproc.cvtColor(diffFrame, grey, Imgproc.COLOR_BGR2GRAY);
The problem is that diffFrame hasn't been initialized so it's bombing out. I was able to get it to work locally by adding this block:
// decode and return the grabbed video frame
cap.retrieve(matFrame);
// *** START
if (firstFrame) {
firstFrame = false;
continue;
}
// *** End
// if the frame is valid (not end of video for example)
if (!(matFrame.empty()))
The effect of this is that the first frame will not be painted, but subsequent ones will. Also, code later on will open a new JFrame (Imshow) for every "diff" frame, which will quickly kill your machine, so be ready to kill the process.

Drawing the shape of the detected object?

I successfully detected the upperbody of the person in a picture. But all I can do now is draw a rectangle around the upperbody. How can I trace the upperbody?, i.e. draw a line (that looks like the upperbody) around the detected upperbody. I'm working with OpenCV.
Here's some code from the detection system.
if(new File("E:\\OpenCV\\opencv\\data\\haarcascades\\haarcascade_mcs_upperbody.xml\\").isFile())
{ System.out.println("file there"); }
cascadeClassifier = new CascadeClassifier("E:\\OpenCV\\opencv\\data\\haarcascades\\haarcascade_mcs_upperbody.xml");
inputPic = Highgui.imread(picSrcDir + picName);
MatOfInt intw = new MatOfInt(1);
MatOfDouble dble = new MatOfDouble(1.05);
rect = new MatOfRect();
cascadeClassifier.detectMultiScale(inputPic, rect, intw, dble);
Scalar color = new Scalar(0, 0, 255);
System.out.println("Number Of Hits: " + rect.toArray().length);
Rect[] rectArr = rect.toArray();
System.out.println(rectArr.length);
int i=0;
for(Rect recta : rectArr){
System.out.println(rectArr[i]); i++;
Core.rectangle(inputPic, new Point(recta.x, recta.y), new Point(recta.x+recta.width, recta.y+recta.height), color);
}
Highgui.imwrite(picName, inputPic);
After detecting the people upperbody rect:
Remove the rect background, keeping just the person upperbody.
Binarize the image.
Apply morphological boundary algorithm to trace the upperbody.
Example:
OpenCV provides these algorithms. However, the example above was developed using Marvin. The source code is presented below:
public class TraceShape {
public TraceShape(){
// Load Plug-in
MarvinImagePlugin boundary = MarvinPluginLoader.loadImagePlugin("org.marvinproject.image.morphological.boundary");
// Load image
MarvinImage image = MarvinImageIO.loadImage("./res/person.jpg");
// Binarize
MarvinImage binImage = MarvinColorModelConverter.rgbToBinary(image, 245);
MarvinImageIO.saveImage(binImage, "./res/person_bin.png");
// Boundary
boundary.process(binImage.clone(), binImage);
MarvinImageIO.saveImage(binImage, "./res/person_boundary.png");
}
public static void main(String[] args) {
new TraceShape();
}
}

Categories

Resources