I am fairly new to openCV on Android and I am using the ColorBlobDetector class from the OpenCV Samples to detect traffic light blobs such as Red, Green Amber.
I cant seem to understand the use of mColorRadius.
I also cannot figure out where to compare colors to find the appropriate blob I am looking for.
Here's my code
PS: I even tried entering values for mLowerBound and mUpperBound, but it kept highlighting black blobs.
package edu.csueb.ilab.blindbike.lightdetection;
import android.os.Environment;
import android.util.Log;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
import java.io.File;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
// Lower and Upper bounds for range checking in HSV color space
private Scalar mLowerBound = new Scalar(0); //for blue 120,100,100 Current: 176,255,244 ::perfect working Green 70,20,100
// for flouracent green light 57,255,20
private Scalar mUpperBound = new Scalar(0); // for blue 179,255,255 , blue cap 28,28,37 Current: 177,255,252:: perfect working Green 85,35,125
// for flouracent green light 57,255,200
// for gray signs 76,55,28
// for gray signs 89,62,33 ,blue cap 80,109,149
// Minimum contour area in percent for contours filtering
private static double mMinContourArea = 0.01; //<></>ried 0.4
// Color radius for range checking in HSV color space
private Scalar mColorRadius = new Scalar(25,50,50,0); //initial val 25,50,50,0 //214,55,52,0 for the blue cap
private Mat mSpectrum = new Mat(); //
private List<MatOfPoint> mContours = new ArrayList<MatOfPoint>();
// Cache
Mat mPyrDownMat = new Mat();
Mat mHsvMat = new Mat();
Mat mMask = new Mat();
Mat mDilatedMask = new Mat();
Mat mHierarchy = new Mat();
SimpleDateFormat df= new SimpleDateFormat("yyyy_MM_dd_HH_mm_yyyy");
public void setColorRadius(Scalar radius) {
mColorRadius = radius;
}
public void setHsvColor(Scalar hsvColor) {
double minH = (hsvColor.val[0] >= mColorRadius.val[0]) ? hsvColor.val[0]-mColorRadius.val[0] : 0;
double maxH = (hsvColor.val[0]+mColorRadius.val[0] <= 255) ? hsvColor.val[0]+mColorRadius.val[0] : 255;
mLowerBound.val[0] = minH;
mUpperBound.val[0] = maxH;
mLowerBound.val[1] = hsvColor.val[1] - mColorRadius.val[1];
mUpperBound.val[1] = hsvColor.val[1] + mColorRadius.val[1];
mLowerBound.val[2] = hsvColor.val[2] - mColorRadius.val[2];
mUpperBound.val[2] = hsvColor.val[2] + mColorRadius.val[2];
mLowerBound.val[3] = 0;
mUpperBound.val[3] = 255;
Mat spectrumHsv = new Mat(1, (int)(maxH-minH), CvType.CV_8UC3);
for (int j = 0; j < maxH-minH; j++) {
byte[] tmp = {(byte)(minH+j), (byte)255, (byte)255};
spectrumHsv.put(0, j, tmp);
}
Imgproc.cvtColor(spectrumHsv, mSpectrum, Imgproc.COLOR_HSV2BGR_FULL, 4); //COLOR_HSV2RGB_FULL
}
public Mat getSpectrum() {
return mSpectrum;
}
public void setMinContourArea(double area) {
mMinContourArea = area;
}
public void process(Mat rgbaImage) {
Scalar colorGreen=new Scalar(0, 128, 0);
Imgproc.pyrDown(rgbaImage, mPyrDownMat);
Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);
Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_BGR2HSV_FULL);
Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask);
Imgproc.dilate(mMask, mDilatedMask, new Mat());
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// Find max contour area
double maxArea = 0;
Iterator<MatOfPoint> each = contours.iterator();
while (each.hasNext()) {
MatOfPoint wrapper = each.next();
double area = Imgproc.contourArea(wrapper);
if (area > maxArea)
maxArea = area;
}
// Filter contours by area and resize to fit the original image size
mContours.clear();
each = contours.iterator();
while (each.hasNext()) {
MatOfPoint contour = each.next(); //Current: >=50 && <200 //testig at jan 9700 || 25200
if (Imgproc.contourArea(contour) >= 49656 || Imgproc.contourArea(contour)<53177) { //mMinContourArea*maxArea //red 30 300-440 green 510 1600
Core.multiply(contour, new Scalar(4,4), contour); //Perfect working: Green 880 || 1800
mContours.add(contour);
}
}
File path =Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES);
String filename = "christ"+df.format(new Date()).toString()+".png";
File file = new File(path, filename);
filename = file.toString();
Boolean save;
MatOfPoint2f approxCurve=new MatOfPoint2f();
for(int i=0;i<contours.size();i++)
{
MatOfPoint2f countour2f = new MatOfPoint2f(contours.get(i).toArray());
double approxDistance = Imgproc.arcLength(countour2f, true)*0.02;
Imgproc.approxPolyDP(countour2f, approxCurve, approxDistance, true);
// Convert back to Contour
MatOfPoint points=new MatOfPoint(approxCurve.toArray());
//Get Bounding rect of contour
Rect rect=Imgproc.boundingRect(points);
//draw enclosing rectangle
Mat ROI = rgbaImage.submat(rect.y, rect.y + rect.height, rect.x, rect.x + rect.width);
// save= Highgui.imwrite(filename,ROI);
// if (save == true)
// Log.i("Save Status", "SUCCESS writing image to external storage");
// else
// Log.i("Save Status", "Fail writing image to external storage");
Core.rectangle(rgbaImage, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height),new Scalar(255,225,0,0),3);
}
}
public List<MatOfPoint> getContours() {
return mContours;
}
}
Related
I wrote this code that detects rectangle but I can not write a code that detects corners.
public class RectDetection {
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat rectengle=Imgcodecs.imread("D:\\sepano\\rect.png");
Mat img =new Mat();
img=rectengle.clone();
Imgproc.cvtColor(rectengle, img, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(img, img, new org.opencv.core.Size(1, 1), 2, 2);
Imgproc.Canny(img,img,3, 3,5,false);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(img, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
int contourSize = (int)temp_contour.total();
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize*0.05, true);
if (approxCurve_temp.total()==8) {
MatOfPoint points = new MatOfPoint( approxCurve_temp.toArray() );
Rect rect = Imgproc.boundingRect(points);
Imgproc.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height), new Scalar(170,0,150,0), 5);}}
Here is a python code for corner detection but I can not convert it to java:
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('simple.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,25,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
plt.imshow(img),plt.show()
can any help me????
Look at your java code closely...
In this line:
Imgproc.rectangle(img, new Point(rect.x,rect.y), new Point(rect.x+rect.width,rect.y+rect.height), new Scalar(170,0,150,0), 5);
Point(rect.x,rect.y) corresponds to the top left corner of your rectangle, and Point(rect.x+rect.width,rect.y+rect.height) corresponds to the bottom right corner of your rectangle.
Rectangle detection code should suffice, and the 4 corners are as follows:
Point(rect.x,rect.y) //Top Left
Point(rect.x+rect.width,rect.y) //Top Right
Point(rect.x,rect.y+rect.height) //Bottom Left
Point(rect.x+rect.width,rect.y+rect.height) //Bottom Right
I have an image:
I want to crop it so the book is by itself.
I am using OpenCV to attempt and get the contrours of the image. Once I draw them, it looks like this. How can I ignore the extra contours to the right of the image? I have already tries using outliers with standard deviation. Right now it takes every point inside the rectanlge, and adds it to an arraylist for later processing. I have an overall arraylist for points, and 2 more so when computing statistical analysis the points can be ordered from least to greatest.
This is what it looks like now:
import java.awt.Point;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Rect;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
public class imtest {
public static void main(String args[]) throws IOException{
String filename="C:/image.png";
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Mat torect=new Mat();
Mat torect1=Imgcodecs.imread(filename,0);
Imgproc.Canny(torect1, torect, 10, 100);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(torect.clone(), contours, new Mat(), Imgproc.RETR_LIST,Imgproc.CHAIN_APPROX_SIMPLE);
ArrayList<Point> outlie=new ArrayList<Point>();
ArrayList<Integer> ylist=new ArrayList<Integer>();
ArrayList<Integer> xlist=new ArrayList<Integer>();
MatOfPoint2f approxCurve = new MatOfPoint2f();
//For each contour found
for (int i=0; i<contours.size(); i++)
{
//Convert contours(i) from MatOfPoint to MatOfPoint2f
MatOfPoint2f contour2f = new MatOfPoint2f( contours.get(i).toArray() );
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true)*0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint( approxCurve.toArray() );
// Get bounding rect of contour
Rect rect = Imgproc.boundingRect(points);
int xoffset=rect.x;
int yoffset=rect.y;
for (int y = 0; y < rect.height; y++) {
for (int x = 0; x < rect.width; x++) {
if (yoffset>1 & xoffset>1)
{
outlie.add(new Point(xoffset+x,yoffset+y));
ylist.add(yoffset+y);
xlist.add(xoffset+x);
}
}
}
}
}
}
Adjusting the threshold of the canny controlled the amount of contours in the resulting image.
I'm working on program that detects pupil area from the eye using opencv language. Below is the code snippet. Its not compiling , throwing CV exception. I don't know what to do. How can i make it work. (opencv 2.4)
import java.util.ArrayList;
import java.util.List;
import java.lang.Math;
import org.opencv.core.Scalar;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Core;
import org.opencv.core.CvException;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
public class Detect {
public static void main(String[] args) throws CvException{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// Load image
Mat src = Highgui.imread("tt.jpg");
// Invert the source image and convert to grayscale
Mat gray = new Mat();
Imgproc.cvtColor(src, gray, Imgproc.COLOR_BGR2GRAY);
Highgui.imwrite("gray.jpg", gray);
// Convert to binary image by thresholding it
Imgproc.threshold(gray, gray, 30, 255, Imgproc.THRESH_BINARY_INV);
Highgui.imwrite("binary.jpg", gray);
// Find all contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(gray.clone(), contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_NONE);
// Fill holes in each contour
Imgproc.drawContours(gray, contours, -1, new Scalar(255,255,255), -1);
for (int i = 0; i < contours.size(); i++)
{
double area = Imgproc.contourArea(contours.get(i));
Rect rect = Imgproc.boundingRect(contours.get(i));
int radius = rect.width/2;
System.out.println("Area: "+area);
// If contour is big enough and has round shape
// Then it is the pupil
if (area >= 30 &&
Math.abs(1 - ((double)rect.width / (double)rect.height)) <= 0.2 &&
Math.abs(1 - (area / (Math.PI * Math.pow(radius, 2)))) <= 0.2)
{
Core.circle(src, new Point(rect.x + radius, rect.y + radius), radius, new Scalar(255,0,0), 2);
System.out.println("pupil");
}
}
Highgui.imwrite("processed.jpg", src);
}
}
Showing the following error
OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cv::cvtColor, file ..\..\..\..\opencv\modules\imgproc\src\color.cpp, line 3739
Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: ..\..\..\..\opencv\modules\imgproc\src\color.cpp:3739: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor
]
at org.opencv.imgproc.Imgproc.cvtColor_1(Native Method)
at org.opencv.imgproc.Imgproc.cvtColor(Imgproc.java:4598)
at Detect.main(Detect.java:24)
I think that OpenCV thinks that "tt.jpg" is already single-channel.
According to the documentation:
The function determines the type of an image by the content, not by the file extension.
To ensure the format, you can use a flag:
Mat src = Highgui.imread("tt.jpg"); // OpenCV decides the type based on the content
Mat src = Highgui.imread("tt.jpg", Highgui.IMREAD_GRAYSCALE); // single-channel image will be loaded, even if it is a 3-channel image
Mat src = Highgui.imread("tt.jpg", Highgui.IMREAD_COLOR); // 3-channel image will be loaded, even if it is a single-channel image
If you need only the grayscale image:
Mat src = Highgui.imread("tt.jpg", Highgui.IMREAD_GRAYSCALE);
I am working on a project where we are trying to detect whether the eye is closed or open in a picture. What we have done so far is that we detected the face, then the eyes. Then we applied hough transform, hoping that the iris would be the only circle when the eye is open. The problem is that when the eye is closed, it produces a circle as well:
Here is the code:
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfRect;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.highgui.Highgui;
import org.opencv.objdetect.CascadeClassifier;
import org.opencv.imgproc.Imgproc;
public class FaceDetector {
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
System.out.println("\nRunning FaceDetector");
CascadeClassifier faceDetector = new CascadeClassifier("D:\\CS\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");
CascadeClassifier eyeDetector = new CascadeClassifier("D:\\CS\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml");
Mat image = Highgui.imread("C:\\Users\\Yousra\\Desktop\\images.jpg");
Mat gray = Highgui.imread("C:\\Users\\Yousra\\Desktop\\eyes\\E7.png");
String faces;
String eyes;
MatOfRect faceDetections = new MatOfRect();
MatOfRect eyeDetections = new MatOfRect();
Mat face;
Mat crop = null;
Mat circles = new Mat();
faceDetector.detectMultiScale(image, faceDetections);
for (int i = 0; i< faceDetections.toArray().length; i++){
faces = "Face"+i+".png";
face = image.submat(faceDetections.toArray()[i]);
crop = face.submat(4, (2*face.width())/3, 0, face.height());
Highgui.imwrite(faces, face);
eyeDetector.detectMultiScale(crop, eyeDetections, 1.1, 2, 0,new Size(30,30), new Size());
if(eyeDetections.toArray().length ==0){
System.out.println(" Not a face" + i);
}else{
System.out.println("Face with " + eyeDetections.toArray().length + "eyes" );
for (int j = 0; j< eyeDetections.toArray().length ; j++){
System.out.println("Eye" );
Mat eye = crop.submat(eyeDetections.toArray()[j]);
eyes = "Eye"+j+".png";
Highgui.imwrite(eyes, eye);
}
}
}
Imgproc.cvtColor(gray, gray, Imgproc.COLOR_BGR2GRAY);
System.out.println("1 Hough :" +circles.size());
float circle[] = new float[3];
for (int i = 0; i < circles.cols(); i++)
{
circles.get(0, i, circle);
org.opencv.core.Point center = new org.opencv.core.Point();
center.x = circle[0];
center.y = circle[1];
Core.circle(gray, center, (int) circle[2], new Scalar(255,255,100,1), 4);
}
Imgproc.Canny( gray, gray, 200, 10, 3,false);
Imgproc.HoughCircles( gray, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 100, 80, 10, 10, 50 );
System.out.println("2 Hough:" +circles.size());
for (int i = 0; i < circles.cols(); i++)
{
circles.get(0, i, circle);
org.opencv.core.Point center = new org.opencv.core.Point();
center.x = circle[0];
center.y = circle[1];
Core.circle(gray, center, (int) circle[2], new Scalar(255,255,100,1), 4);
}
Imgproc.Canny( gray, gray, 200, 10, 3,false);
Imgproc.HoughCircles( gray, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 100, 80, 10, 10, 50 );
System.out.println("3 Hough" +circles.size());
//float circle[] = new float[3];
for (int i = 0; i < circles.cols(); i++)
{
circles.get(0, i, circle);
org.opencv.core.Point center = new org.opencv.core.Point();
center.x = circle[0];
center.y = circle[1];
Core.circle(gray, center, (int) circle[2], new Scalar(255,255,100,1), 4);
}
String hough = "afterhough.png";
Highgui.imwrite(hough, gray);
}
}
How to make it more accurate?
Circular Hough transform is unlikely to work well in the majority of cases i.e. where the eye is partially open or closed. You'd be better off isolating rectangular regions (bounding boxes) around the eyes and computing a measure based on pixel intensities (grey levels). For example the variance of pixels within the region would be a good discriminator between open and closed eyes. Obtaining a bounding box around the eyes can be done quite reliably using relative position from the bounding box detected around the face using OpenCV Haar cascades. Figure 3 in this paper gives some idea of the location process.
http://personal.ee.surrey.ac.uk/Personal/J.Collomosse/pubs/Malleson-IJCV-2012.pdf
You can check circles.cols() value if it is 2 then the eyes are open and if the value is 0 then the eyes are closed. You can also detect the blinking of eye if the value of circles.cols() changes from 2 to 0. Hough transform wil not detect a circle if the eyes are closed.
I have a project where I am trying to track a person as they move throughout a room. I am using an arduino, some servo motors and an xbox kinect for my camera.
I have a vision of allowing the project some training time where it can scan the room and make a database of images for the empty room. Then when a person enters the room the program can do a simple difference image to create a white blob for the person. Using this white blob I would be able to calculate the centre of mass for the person and compare it to the centre of the image frame in order to pass a command to the arduino telling it how far and in which direction to move the servo motors. I am using eclipse, writing in java and using opencv 2.4.6.
I am stuck on getting a clear white blob. I have already written my methods to calculate the distance from the centre of mass of the blob and the centre of the frame but without a clearly defined blob this is useless. I have been trying to get my program to work by taking a snap shot of the background of my room, changing the image to binary then subtracting it from a binary image of my room with me in it. This has not worked. Is my vision of training the system then comparing with these trained images valid or should I be going about a different way to detect an object?
I have tried implementing opticalflow() but it seems erratic and not extremely accurate.
Any information on the topic would be extremely helpful. I thank you in advance for reading my question.
-Trent
Edit: I have attached my code. The area in question is the training() and matdiff() methods.
package testingV1;
//OpenCv + OpenNI + Java Libraries
import java.awt.FlowLayout;
import java.util.ArrayList;
import java.util.List;
import java.awt.image.BufferedImage;
import java.awt.image.DataBuffer;
import java.awt.image.DataBufferByte;
import java.io.*;
import java.nio.ByteBuffer;
import javax.imageio.ImageIO;
import javax.swing.*;
import org.opencv.core.*;
import org.opencv.imgproc.*;
import org.opencv.objdetect.CascadeClassifier;
import org.opencv.video.BackgroundSubtractorMOG;
import org.opencv.video.Video;
import org.opencv.highgui.*;
import org.opencv.*;
import org.OpenNI.*;
public class TestV1 {
static int imWidth = 640, imHeight = 480;
static ImageGenerator imageGen;
static Context context;
static int flag = CvType.CV_8UC3;
static int flag2 = CvType.CV_8UC1;
static Mat background;
public static void main(String[] args) throws GeneralException{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
//We create a new "context" of the Kinect
context = new Context();
JFrame canvas = new JFrame("Optical Flow");
//need to create and add license to our "context"
License license = new License("PrimeSense", "0KOIk2JeIBYClPWVnMoRKn5cdY4=");
context.addLicense(license);
//defining the data we are taking from the kinect
MapOutputMode mapMode = null; //initialize it to null
mapMode = new MapOutputMode(imWidth, imHeight, 30); //create a 640x480 30fps feed definition
imageGen = ImageGenerator.create(context); //Rgb camera
imageGen.setMapOutputMode(mapMode); //change our feed to 640x480 30 fps
imageGen.setPixelFormat(PixelFormat.RGB24);///Pixel format, RGB 8-bit 3 channel
context.setGlobalMirror(true); //Mirrors our feed to make it more intuitive
BufferedImage rgbImage = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
BufferedImage prevImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
BufferedImage currImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
BufferedImage diffImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
BufferedImage paintedImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
BufferedImage facesImg = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
Mat paintedMat = new Mat(imHeight, imWidth, flag);
Mat facesMat = new Mat(imHeight, imWidth, flag);
Mat currMat = new Mat(imHeight, imWidth, flag2);
Mat prevMat = new Mat(imHeight, imWidth, flag2);
Mat diffMat = new Mat(imHeight, imWidth, flag2);
Mat paintedMatg = new Mat(imHeight, imWidth, flag2);
ByteBuffer imageBB;
//First Frame
canvas.getContentPane().setLayout(new FlowLayout());
Icon video = new ImageIcon(rgbImage);
JLabel panel = new JLabel(video);
//Icon video2 = new ImageIcon(paintedImg);
//JLabel panel2 = new JLabel(video2);
//Icon video3 = new ImageIcon(facesImg);
//JLabel panel3 = new JLabel(video3);
Icon video4 = new ImageIcon(diffImg);
JLabel panel4 = new JLabel(video4);
canvas.getContentPane().add(panel);
//canvas.getContentPane().add(panel2);
//canvas.getContentPane().add(panel3);
canvas.getContentPane().add(panel4);
canvas.pack();
canvas.setVisible(true);
canvas.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
CascadeClassifier faceDetectorAlg = new CascadeClassifier("C:/Users/Trent/Desktop/Capstone"
+ "/ComputerVisionCode/November16/testingV1/src/testingV1/haarcascade_frontalface_alt.xml");
boolean firstTime = true;
imageGen.startGenerating();
while(true){
context.waitOneUpdateAll(imageGen);
imageBB = imageGen.getImageMap().createByteBuffer(); //get KinectData
rgbImage = bufToImage(imageBB); //take data from kinect and put in BufferedImage
prevMat = currMat;
currMat = img2Mat(rgb2Gray(rgbImage));
if(firstTime){
training(rgbImage);
firstTime = false;
}
else{
diffMat = findDiff(currMat);
diffImg = mat2Img(diffMat);
}
//optical flow - inaccurate
//paintedMatg = opticalFlow(img2Mat(prevImg), img2Mat(currImg), 300, 0.01, 10);
//Imgproc.cvtColor(paintedMatg, paintedMat, Imgproc.COLOR_GRAY2RGB); //change from gray to color
//paintedImg = mat2Img(paintedMat);
//face detection - extremely resource intensive
//facesMat = faceDetector(img2Mat(rgbImage), faceDetectorAlg);
//facesImg = mat2Img(facesMat);
panel.setIcon(new ImageIcon(rgbImage));
//panel2.setIcon(new ImageIcon(paintedImg));
//panel3.setIcon(new ImageIcon(facesImg));
panel4.setIcon(new ImageIcon(diffImg));
canvas.repaint();
canvas.revalidate();
}
}
//establishes a background for better diff images
private static void training(BufferedImage in){
background = new Mat(imHeight, imWidth, flag2);
background = img2Mat(rgb2Gray(in));
System.out.println("Training Complete");
}
private static Mat findDiff(Mat in){
Mat output = new Mat(imHeight, imWidth, flag2);
Core.absdiff(background, in, output);
Imgproc.threshold(output, output, 20, 255, Imgproc.THRESH_BINARY);
return output;
}
//Face Detection
private static Mat faceDetector(Mat in, CascadeClassifier Alg){
Mat output = in;
MatOfRect faceDetections = new MatOfRect();
if(Alg.empty()){
System.out.println("didnt load");
return output;
}
Alg.detectMultiScale(in, faceDetections);
for(Rect rect : faceDetections.toArray()){
Core.rectangle(output, new Point(rect.x, rect.y),
new Point(rect.x + rect.width, rect.y + rect.height), new Scalar(0, 255, 0), 2);
}
return output;
}
//Returns an image with vectors painted to show movement.
private static Mat opticalFlow(Mat curr, Mat prev, int maxDetectionCount, double qualityLevel, double minDistance){
List<MatOfPoint2f> trackedPoints = new ArrayList<MatOfPoint2f>();
MatOfPoint initial = new MatOfPoint();
MatOfFloat err = new MatOfFloat();
MatOfByte status = new MatOfByte();
MatOfPoint2f initial2f = new MatOfPoint2f();
MatOfPoint2f next2f = new MatOfPoint2f();
double[] temp;
Point p1 = new Point();
Point p2 = new Point();
Mat output = new Mat(imHeight, imWidth, flag);
Scalar red = new Scalar(255, 0, 0);
//Finds Tracking points
if(trackedPoints.size() < 1){
Imgproc.goodFeaturesToTrack(curr, initial, maxDetectionCount, qualityLevel, minDistance);
initial.convertTo(initial2f, CvType.CV_32FC2);
trackedPoints.add(initial2f);
}
//catches first time frame
if(prev.empty())
curr.copyTo(prev);
//find points in current image
if(trackedPoints.get(0).total() > 0){
Video.calcOpticalFlowPyrLK(prev, curr, trackedPoints.get(0), next2f, status, err);
trackedPoints.add(next2f);
}
output = curr;
//draw red lines
for(int i = 0; i < trackedPoints.get(0).cols(); i++){
for(int j = 0; j < trackedPoints.get(0).rows(); j++){
temp = trackedPoints.get(0).get(j, i);
p1.set(temp);
temp = trackedPoints.get(1).get(j, i);
p2.set(temp);
Core.line(output, p1, p2, red);
}
}
return output;
}
//Returns a vector to indicate how the magnitude of movement.
private static double[] opticalFlowAnalysis(Mat curr, Mat prev, int maxDetectionCount, double qualityLevel, double minDistance){
List<MatOfPoint2f> trackedPoints = new ArrayList<MatOfPoint2f>();
MatOfPoint initial = new MatOfPoint();
MatOfFloat err = new MatOfFloat();
MatOfByte status = new MatOfByte();
MatOfPoint2f initial2f = new MatOfPoint2f();
MatOfPoint2f next2f = new MatOfPoint2f();
double[] total = new double[2];
total[0] = 0;
total[1] = 0;
double[] point1;
double[] point2;
double[] output = new double[2];
//Finds Tracking points
if(trackedPoints.size() < 1){
Imgproc.goodFeaturesToTrack(curr, initial, maxDetectionCount, qualityLevel, minDistance);
initial.convertTo(initial2f, CvType.CV_32FC2);
trackedPoints.add(initial2f);
}
//catches first time frame
if(prev.empty())
curr.copyTo(prev);
//find points in current image
if(trackedPoints.get(0).total() > 0){
Video.calcOpticalFlowPyrLK(prev, curr, trackedPoints.get(0), next2f, status, err);
trackedPoints.add(next2f);
}
//average the distance moved
// (-) signifies distance moved right and down
// (+) signifies distance moved left and up
for(int i = 0; i < trackedPoints.get(0).cols(); i++){
for(int j = 0; j < trackedPoints.get(0).rows(); j++){
point1 = trackedPoints.get(0).get(j, i);
point2 = trackedPoints.get(1).get(j, i);
total[0] += point1[0] - point2[0];
total[1] += point1[1] - point2[0];
}
}
output[0] = total[0] / trackedPoints.get(0).cols();
output[1] = total[1] / trackedPoints.get(0).rows();
return output;
}
private static Mat img2Mat(BufferedImage in){
Mat out;
byte[] data;
int r, g, b;
if(in.getType() == BufferedImage.TYPE_INT_RGB){
out = new Mat(imHeight, imWidth, flag);
data = new byte[imWidth * imHeight * (int)out.elemSize()];
int[] dataBuff = in.getRGB(0, 0, imWidth, imHeight, null, 0, imWidth);
for(int i = 0; i < dataBuff.length; i++){
data[i*3] = (byte) ((dataBuff[i] >> 16) & 0xFF);
data[i*3 + 1] = (byte) ((dataBuff[i] >> 8) & 0xFF);
data[i*3 + 2] = (byte) ((dataBuff[i] >> 0) & 0xFF);
}
}
else{
out = new Mat(imHeight, imWidth, flag2);
data = new byte[imWidth * imHeight * (int)out.elemSize()];
int[] dataBuff = in.getRGB(0, 0, imWidth, imHeight, null, 0, imWidth);
for(int i = 0; i < dataBuff.length; i++){
r = (byte) ((dataBuff[i] >> 16) & 0xFF);
g = (byte) ((dataBuff[i] >> 8) & 0xFF);
b = (byte) ((dataBuff[i] >> 0) & 0xFF);
data[i] = (byte)((0.21 * r) + (0.71 * g) + (0.07 * b)); //luminosity
}
}
out.put(0, 0, data);
return out;
}
private static BufferedImage mat2Img(Mat in){
BufferedImage out;
byte[] data = new byte[imWidth * imHeight * (int)in.elemSize()];
int type;
in.get(0, 0, data);
if(in.channels() == 1)
type = BufferedImage.TYPE_BYTE_GRAY;
else
type = BufferedImage.TYPE_3BYTE_BGR;
out = new BufferedImage(imWidth, imHeight, type);
out.getRaster().setDataElements(0, 0, imWidth, imHeight, data);
return out;
}
private static BufferedImage rgb2Gray(BufferedImage in){
BufferedImage out = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_BYTE_GRAY);
Mat color = new Mat(imHeight, imWidth, flag);
Mat gray = new Mat(imHeight, imWidth, flag);
color = img2Mat(in); //converting bufferedImage to Mat
Imgproc.cvtColor(color, gray, Imgproc.COLOR_RGB2GRAY); //change from color to grayscale
out = mat2Img(gray); //converting Mat to bufferedImage
return out;
}
//Converts bytebuffer to buffered image
private static BufferedImage bufToImage(ByteBuffer pixelsRGB){
int[] pixelInts = new int[imWidth * imHeight];
int rowStart = 0;
int bbIdx; //index to ByteBuffer
int i = 0; //index to pixels
int rowLen = imWidth * 3;
for (int row = 0; row < imHeight; row++){
bbIdx = rowStart;
for(int col = 0; col < imWidth; col++){
int pixR = pixelsRGB.get(bbIdx++);
int pixG = pixelsRGB.get(bbIdx++);
int pixB = pixelsRGB.get(bbIdx++);
pixelInts[i++] = 0xFF000000 | ((pixR & 0xFF) << 16) | ((pixG & 0xFF) << 8) | (pixB & 0xFF);
}
rowStart += rowLen; //Move to next row
}
BufferedImage im = new BufferedImage(imWidth, imHeight, BufferedImage.TYPE_INT_RGB);
im.setRGB(0, 0, imWidth, imHeight, pixelInts, 0, imWidth);
return im;
}
}
Answer to the question is bit late but may help for future references.
I think to learn about object detection you can look here and his code here (I learn from it to do my project). And then I made my project like this based on his object detection. Or you can look for a simple background subtraction here