OpenCV java One Line needed to Explain about Detection - java

sorry for my question i'm going to learn about openCv in java and one example i found that have line with something i can't understand
detections = detections.reshape(1, (int)detections.total() / 7);
From full code :
public class DeepNeuralNetworkProcessor {
private Net net;
private final String[] classNames = {"background",
"aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair",
"cow", "diningtable", "dog", "horse",
"motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"};
public DeepNeuralNetworkProcessor() {
this.net = Dnn.readNetFromCaffe(Files_Path.DDN_PROTO, Files_Path.DDN_MODEL);
}
public List<DnnObject> getObjectsInFrame(Mat frame, boolean isGrayFrame) {
int inWidth = 320;
int inHeight = 240;
double inScaleFactor = 0.007843;
double thresholdDnn = 0.2;//Minimum value to detect the object
double meanVal = 127.5;
Mat blob = null;
Mat detections = null;
List<DnnObject> objectList = new ArrayList<>();
int cols = frame.cols();
int rows = frame.rows();
try {
if (isGrayFrame)
Imgproc.cvtColor(frame, frame, Imgproc.COLOR_GRAY2RGB);
blob = Dnn.blobFromImage(frame, inScaleFactor,
new Size(inWidth, inHeight),
new Scalar(meanVal, meanVal, meanVal),
false, false);
net.setInput(blob);
detections = net.forward();
detections = detections.reshape(1, (int) detections.total() / 7);
//all detected objects
for (int i = 0; i < detections.rows(); ++i) {
double confidence = detections.get(i, 2)[0];
if (confidence < thresholdDnn)
continue;
int classId = (int) detections.get(i, 1)[0];
//...
}
} catch (Exception ex) {
ex.printStackTrace();
}
return objectList;
}
}
can anyone explain what this line does ?
or better clear explain about the detection mat.
and what net.forward do?
please send me some reference for java opencv or deeplearning4j

Related

how to use Yolo v3 with Java to detect objects (cows to be specific) in pictures:

I am working on some examples I found online to understand more about Yolo usage in java. I got this code that I edited a little and it can detect objects in videos but now I want to do it with Pictures and I am kinda struggling with it. I would appreciate if anyone can show me how to edit it or has an advise or a method to solve it .
The code:
`class yolo {
private static List<String> getOutputNames(Net net) {
List<String> names = new ArrayList<>();
List<Integer> outLayers = net.getUnconnectedOutLayers().toList();
List<String> layersNames = net.getLayerNames();
outLayers.forEach((item) -> names.add(layersNames.get(item - 1)));//unfold and create R-CNN layers from the loaded YOLO model//
return names;
}
public static void main(String[] args) throws InterruptedException {
System.load("C:\\Users\\LENOVO\\Desktop\\Java1\\Yolo\\opencv\\build\\java\\x64\\opencv_java400.dll");
System.out.println("Library Loaded");
System.load("C:\\Users\\LENOVO\\Desktop\\Java1\\Yolo\\opencv\\build\\java\\x64\\opencv_java400.dll");
String modelWeights = "C:\\Users\\LENOVO\\Desktop\\Java1\\Yolo\\yolov3.weights";
String modelConfiguration = "C:\\Users\\LENOVO\\Desktop\\Java1\\Yolo\\yolov3.cfg.txt";
String filePath = "C:\\Users\\LENOVO\\Desktop\\cows.mp4";
VideoCapture cap = new VideoCapture(filePath);
Mat frame = new Mat();
Mat dst = new Mat ();
//cap.read(frame);
JFrame jframe = new JFrame("Video");
JLabel vidpanel = new JLabel();
jframe.setContentPane(vidpanel);
jframe.setSize(600, 600);
jframe.setVisible(true);
Net net = Dnn.readNetFromDarknet(modelConfiguration, modelWeights);
//Thread.sleep(5000);
//Mat image = Imgcodecs.imread("D:\\yolo-object-detection\\yolo-object-detection\\images\\soccer.jpg");
Size sz = new Size(288,288);
List<Mat> result = new ArrayList<>();
List<String> outBlobNames = getOutputNames(net);
while (true) {
if (cap.read(frame)) {
Mat blob = Dnn.blobFromImage(frame, 0.00392, sz, new Scalar(0), true, false);
net.setInput(blob);
net.forward(result, outBlobNames);
// outBlobNames.forEach(System.out::println);
// result.forEach(System.out::println);
float confThreshold = 0.6f;
List<Integer> clsIds = new ArrayList<>();
List<Float> confs = new ArrayList<>();
List<Rect> rects = new ArrayList<>();
for (int i = 0; i < result.size(); ++i)
{
Mat level = result.get(i);
for (int j = 0; j < level.rows(); ++j)
{
Mat row = level.row(j);
Mat scores = row.colRange(5, level.cols());
Core.MinMaxLocResult mm = Core.minMaxLoc(scores);
float confidence = (float)mm.maxVal;
Point classIdPoint = mm.maxLoc;
if (confidence > confThreshold)
{
int centerX = (int)(row.get(0,0)[0] * frame.cols());
int centerY = (int)(row.get(0,1)[0] * frame.rows());
int width = (int)(row.get(0,2)[0] * frame.cols());
int height = (int)(row.get(0,3)[0] * frame.rows());
int left = centerX - width / 2;
int top = centerY - height / 2;
clsIds.add((int)classIdPoint.x);
confs.add((float)confidence);
rects.add(new Rect(left, top, width, height));
}
}
}
float nmsThresh = 0.5f;
MatOfFloat confidences = new MatOfFloat(Converters.vector_float_to_Mat(confs));
Rect[] boxesArray = rects.toArray(new Rect[0]);
MatOfRect boxes = new MatOfRect(boxesArray);
MatOfInt indices = new MatOfInt();
Dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThresh, indices);
int [] ind = indices.toArray();
int j=0;
for (int i = 0; i < ind.length; ++i)
{
int idx = ind[i];
Rect box = boxesArray[idx];
Imgproc.rectangle(frame, box.tl(), box.br(), new Scalar(0,0,255), 2);
//i=j;
System.out.println(idx);
}
// Imgcodecs.imwrite("D://out.png", image);
//System.out.println("Image Loaded");
ImageIcon image = new ImageIcon(Mat2bufferedImage(frame));
vidpanel.setIcon(image);
vidpanel.repaint();
// System.out.println(j);
//System.out.println("Done");
}
}
}
// }
private static BufferedImage Mat2bufferedImage(Mat image) {
MatOfByte bytemat = new MatOfByte();
Imgcodecs.imencode(".jpg", image, bytemat);
byte[] bytes = bytemat.toArray();
InputStream in = new ByteArrayInputStream(bytes);
BufferedImage img = null;
try {
img = ImageIO.read(in);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return img;
}`
`

Centroid of Buffer

I was trying to build innerbuffers . My task is it to make buffer from polygon so small that the new buffers were maximal 5% of the original size of the polygon but never 0!
I was trying something like that:
public Point getInnerBufferCentroid(MapContent mapContent) throws Exception {
JLabel label = new JLabel("Wählen Sie einen Layer aus:");
List<Layer> layerList = mapContent.layers();
String[] layerNames = new String[layerList.size()];
for (int i = 0; i < layerList.size(); i++) {
layerNames[i] = layerList.get(i).getFeatureSource().getName().toString();
}
String selectedLayer = (String) JOptionPane.showInputDialog(null, label,
"Layerauswahl", JOptionPane.QUESTION_MESSAGE, null, layerNames, layerNames[0]);
FeatureLayer selectedFeatureLayer = null;
for (Layer layer : layerList) {
if (layer.getFeatureSource().getName().toString().equals(selectedLayer)) {
selectedFeatureLayer = (FeatureLayer) layer;
break;
}
}
SimpleFeatureSource featureSource = (SimpleFeatureSource)
selectedFeatureLayer.getFeatureSource();
SimpleFeatureCollection featureCollection = featureSource.getFeatures();
SimpleFeatureIterator featureIterator = featureCollection.features();
SimpleFeature feature = featureIterator.next();
Geometry geometry = (Geometry) feature.getDefaultGeometry();
double originalArea = geometry.getArea();
double bufferArea = originalArea;
Geometry buffer = geometry.buffer(0);
while (bufferArea >= originalArea * 0.05 && bufferArea > 0) {
buffer = buffer.buffer(-1);
bufferArea = buffer.getArea();
}
Point centroid;
if (buffer instanceof Polygon) {
centroid = ((Polygon) buffer).getInteriorPoint();
} else {
MultiPolygon multiPolygon = (MultiPolygon) buffer;
int numGeometries = multiPolygon.getNumGeometries();
Polygon largestPolygon = null;
double largestArea = 0;
for (int i = 0; i < numGeometries; i++) {
Polygon polygon = (Polygon) multiPolygon.getGeometryN(i);
double area = polygon.getArea();
if (area > largestArea) {
largestArea = area;
largestPolygon = polygon;
}
}
centroid = largestPolygon.getCentroid();
}
return centroid;
}
but getInteriorPoint() and geCEntroid aren't existing. Did someone know a other strategy?

Load MTL Files in LWJGL

So I am making a "game" with LWJGL, and I started loading 3D Models (Using Wavefront .obj files). I have successfully loaded the model, but instead of having textures, I wanted to try out the .mtl files, to specify the materials. I "sort of" made it, but it seems to be not completely working. Here is my code, and a picture of the Tree model I tried to render:
Tree Model
Now here is my code:
private static OBJMesh mesh;
public static Mesh load3DModel(String objFileName)
{
mesh = new OBJMesh();
BufferedReader reader = null;
try
{
reader = new BufferedReader(new FileReader(new File(objFileName)));
}
catch (FileNotFoundException e)
{
System.err.println("Could not locate OBJ File at " + objFileName);
e.printStackTrace();
}
String mtlFileName = null;
String line = null;
String currentFaceMat = null;
try
{
while ((line = reader.readLine()) != null)
{
String[] lineParts = line.split(" ");
switch (line.substring(0, 2))
{
case "v ":
Vertex v = new Vertex(lineParts[1], lineParts[2], lineParts[3]);
mesh.addVertex(v);
break;
case "vn":
Normal n = new Normal(lineParts[1], lineParts[2], lineParts[3]);
mesh.addNormal(n);
break;
case "mt":
mtlFileName = FileHelper.getDirectory(objFileName) + lineParts[1];
break;
case "us":
currentFaceMat = lineParts[1];
break;
case "f ":
Face face = createFace(currentFaceMat, lineParts);
mesh.addFace(face);
break;
}
}
reader = new BufferedReader(new FileReader(mtlFileName));
Material mat = null;
while ((line = reader.readLine()) != null)
{
String[] lineParts = line.split(" ");
if (line.length() > 1)
{
switch (line.substring(0, 2))
{
case "ne":
mat = new Material(lineParts[1]);
mesh.addMaterial(lineParts[1], mat);
break;
case "Ka":
mat.setKa(createVector(lineParts));
break;
case "Kd":
mat.setKd(createVector(lineParts));
break;
case "Ks":
mat.setKs(createVector(lineParts));
break;
case "Ns":
mat.setNs(Float.parseFloat(lineParts[1]));
break;
case "d ":
mat.setD(Float.parseFloat(lineParts[1]));
break;
case "il":
mat.setIllum(Integer.parseInt(lineParts[1]));
break;
}
}
}
reader.close();
}
catch (IOException e)
{
e.printStackTrace();
}
mesh.normalArray = new float[mesh.vertices.size() * 3];
for (Face face : mesh.faces)
{
decodeNormals(face.indices1);
decodeNormals(face.indices2);
decodeNormals(face.indices3);
}
mesh.vertexArray = new float[mesh.vertices.size() * 3];
mesh.indexArray = new int[mesh.indices.size() * 3];
mesh.colorArray = new float[mesh.faces.size() * 3];
int vertexPointer = 0;
for (Vertex vertex : mesh.vertices)
{
mesh.vertexArray[vertexPointer++] = vertex.x;
mesh.vertexArray[vertexPointer++] = vertex.y;
mesh.vertexArray[vertexPointer++] = vertex.z;
}
for (int i = 0; i < mesh.indices.size(); i++)
{
mesh.indexArray[i] = mesh.indices.get(i);
}
int colorPointer = 0;
for (Face face : mesh.faces)
{
mesh.colorArray[colorPointer++] = mesh.materials.get(face.material).Kd.x;
mesh.colorArray[colorPointer++] = mesh.materials.get(face.material).Kd.y;
mesh.colorArray[colorPointer++] = mesh.materials.get(face.material).Kd.z;
}
return MeshLoader.genVertexModel(mesh.vertexArray, mesh.indexArray, mesh.colorArray);
}
private static Face createFace(String materialName, String[] lineData)
{
String[] indices1 = General.replaceEmptySlashes(lineData[1]).split("/");
String[] indices2 = General.replaceEmptySlashes(lineData[2]).split("/");
String[] indices3 = General.replaceEmptySlashes(lineData[3]).split("/");
return new Face(materialName, indices1, indices2, indices3);
}
private static Vector3f createVector(String[] lineData)
{
float x = Float.parseFloat(lineData[1]);
float y = Float.parseFloat(lineData[2]);
float z = Float.parseFloat(lineData[3]);
return new Vector3f(x, y, z);
}
private static void decodeNormals(Vector3f vertex)
{
int vertexPointer = (int) vertex.x - 1;
mesh.indices.add(vertexPointer);
Normal normal = mesh.normals.get((int) vertex.z - 1);
mesh.normalArray[vertexPointer * 3] = normal.x;
mesh.normalArray[vertexPointer * 3 + 1] = normal.y;
mesh.normalArray[vertexPointer * 3 + 2] = normal.z;
}
The OBJMesh class:
public List<Vertex> vertices = new ArrayList<Vertex>();
public List<Normal> normals = new ArrayList<Normal>();
public List<Integer> indices = new ArrayList<Integer>();
public List<Face> faces = new ArrayList<Face>();
public Map<String, Material> materials = new HashMap<String, Material>();
public float[] vertexArray;
public float[] normalArray;
public float[] colorArray;
public int[] indexArray;
public void addVertex(Vertex vertex)
{
vertices.add(vertex);
}
public void addNormal(Normal normal)
{
normals.add(normal);
}
public void addMaterial(String name, Material material)
{
materials.put(name, material);
}
public void addFace(Face face)
{
faces.add(face);
}
The Face class:
public Vector3f indices1;
public Vector3f indices2;
public Vector3f indices3;
public String material;
public Face(String material, String[] v1, String[] v2, String[] v3)
{
this.material = material;
this.indices1 = new Vector3f(Float.parseFloat(v1[0]), Float.parseFloat(v1[1]), Float.parseFloat(v1[2]));
this.indices2 = new Vector3f(Float.parseFloat(v2[0]), Float.parseFloat(v2[1]), Float.parseFloat(v2[2]));
this.indices3 = new Vector3f(Float.parseFloat(v3[0]), Float.parseFloat(v3[1]), Float.parseFloat(v3[2]));
}
The Material class just contains RGB values.
If you could find something, let me know; I have been searching for weeks (No joke!). Thank you
OpenGL expects all vertex attributes to be, well, per-vertex. The way you are at the moment populating your colorArray suggests you are only doing this per-face. Change this and it should give the correct results.

WARNING Possible use of "Transverse_Mercator" projection outside its valid area

I am trying to combign tiff image and shapefile and want show it. For this, I am using GeoTiff and I am stuck that my tiff file is now being displayed. Shapefile is showing properly but tiff image, which is having only 1 band and grey scale index, is not being shown because of some reason. I am getting one warning message as below.
2016-08-04T12:43:06.456+0530 WARNING Possible use of "Transverse_Mercator" projection outside its valid area.
Latitude 180°00.0'S is out of range (±90°).
How can I remove this message?
My code is as below
private void displayLayers() throws Exception {
AbstractGridFormat format = GridFormatFinder.findFormat(this.getBlueMarble());
this.setGridCoverageReader(format.getReader(this.getBlueMarble()));
Style rgbStyle = this.createRGBStyle();
// connect to the shapefile
FileDataStore dataStore = FileDataStoreFinder.getDataStore(this.getBorderShape());
SimpleFeatureSource shapefileSource = dataStore.getFeatureSource();
Style shpStyle = SLD.createPolygonStyle(Color.BLUE, null, 0.0f);
MapContent map = new MapContent();
map.getViewport().setCoordinateReferenceSystem(
DefaultGeographicCRS.WGS84);
map.setTitle("Illegal Mining");
Layer rasterLayer = new GridReaderLayer(this.getGridCoverageReader(), rgbStyle);
map.addLayer(rasterLayer);
Layer shpLayer = new FeatureLayer(shapefileSource, shpStyle);
map.addLayer(shpLayer);
System.out.println("Trying to show on map...");
JMapPane mapPane = new JMapPane();
mapPane.setMapContent(map);
mapPane.setDisplayArea(shapefileSource.getBounds());
//mapPane.setDisplayArea(this.getGridCoverageReader().getOriginalEnvelope());
this.add(mapPane, BorderLayout.CENTER);
}
private Style createRGBStyle() {
GridCoverage2DReader reader = this.getGridCoverageReader();
StyleFactory sf = this.getStyleFactory();
GridCoverage2D cov = null;
try {
cov = reader.read(null);
} catch (IOException giveUp) {
throw new RuntimeException(giveUp);
}
// We need at least three bands to create an RGB style
int numBands = cov.getNumSampleDimensions();
System.out.println("numBands:"+numBands);
if (numBands < 3) {
System.out.println("Bands are less than 3");
//return null;
}
// Get the names of the bands
String[] sampleDimensionNames = new String[numBands];
for (int i = 0; i < numBands; i++) {
GridSampleDimension dim = cov.getSampleDimension(i);
sampleDimensionNames[i] = dim.getDescription().toString();
}
final int RED = 0, GREEN = 1, BLUE = 2;
int[] channelNum = { -1, -1, -1 };
Boolean greyflag=false;
// We examine the band names looking for "red...", "green...",
// "blue...".
// Note that the channel numbers we record are indexed from 1, not 0.
for (int i = 0; i < numBands; i++) {
String name = sampleDimensionNames[i].toLowerCase();
System.out.println("name :"+name);
if (name != null) {
if (name.matches("red.*")) {
channelNum[RED] = i + 1;
} else if (name.matches("green.*")) {
channelNum[GREEN] = i + 1;
} else if (name.matches("blue.*")) {
channelNum[BLUE] = i + 1;
}else if(name.matches("gray.*")){
System.out.println("What to do here");
channelNum[RED] = 1;
channelNum[GREEN] = 2;
channelNum[BLUE] = 3;
greyflag=true;
}
}
}
// If we didn't find named bands "red...", "green...", "blue..."
// we fall back to using the first three bands in order
if(greyflag==false){
if (channelNum[RED] < 0 || channelNum[GREEN] < 0
|| channelNum[BLUE] < 0) {
channelNum[RED] = 1;
channelNum[GREEN] = 2;
channelNum[BLUE] = 3;
}
}
// Now we create a RasterSymbolizer using the selected channels
SelectedChannelType[] sct = new SelectedChannelType[cov
.getNumSampleDimensions()];
ContrastEnhancement ce = sf.contrastEnhancement(this.ff.literal(1.0),
ContrastMethod.NORMALIZE);
for (int i = 0; i < numBands; i++) {
sct[i] = sf.createSelectedChannelType(
String.valueOf(channelNum[i]), ce);
System.out.println(String.valueOf(channelNum[i]));
}
RasterSymbolizer sym = sf.getDefaultRasterSymbolizer();
ChannelSelection sel =sf.channelSelection(sct[RED]);
if(numBands>1){
sel = sf.channelSelection(sct[RED], sct[GREEN],
sct[BLUE]);
}
sym.setChannelSelection(sel);
return SLD.wrapSymbolizers(sym);
}
I just pass two files as below code
public MapImagePanel() {
this.setLayout(new BorderLayout(0, 0));
this.setBackground(Color.BLUE);
this.setPreferredSize(new Dimension(720, 360));
this.setBlueMarble(new File("E:/tifffilename.TIFF"));
this.setBorderShape(new File("E:/shapefilename.shp"));
try {
this.displayLayers();
} catch (Exception e) {
e.printStackTrace();
}
}
This is how i use this class in main class
//see output in main method
JFrame frame = new JFrame();
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
MapImagePanel panel = new MapImagePanel();
panel.setPreferredSize(new Dimension(1024,768));
panel.setVisible(true);
frame.getContentPane().add(panel);
frame.pack();
frame.setVisible(true);
frame.show();
TLDR; add the following line to your program start up:
System.setProperty("org.geotools.referencing.forceXY", "true");
From GeoTools FAQ as computer programmers they knew that coordinates would be expressed as longitude,latitude pairs so they could use existing graphics code easily by treating them as a simple (x,y) pair. but for sequence like (x,y) or (y,x) they confused that is why this error is coming.

How to know the Image or Picture Location while parsing MS Word Doc in java using apache poi

HWPFDocument wordDoc = new HWPFDocument(new FileInputStream(fileName));
List<Picture> picturesList = wordDoc.getPicturesTable().getAllPictures();
The above statement gives the list of all pictures inside a document. I want to know after which text/position in the doc the image will be located at?
You're getting at the pictures the wrong way, which is why you're not finding any positions!
What you need to do is process each CharacterRun of the document in turn. Pass that to the PicturesTable, and check if the character run has a picture in. If it does, fetch back the picture from the table, and you know where in the document it belongs as you have the run it comes from
At the simplest, it'd be something like:
PicturesSource pictures = new PicturesSource(document);
PicturesTable pictureTable = document.getPicturesTable();
Range r = document.getRange();
for(int i=0; i<r.numParagraphs(); i++) {
Paragraph p = r.getParagraph(i);
for(int j=0; j<p.numCharacterRuns(); j++) {
CharacterRun cr = p.getCharacterRun(j);
if (pictureTable.hasPicture(cr)) {
Picture picture = pictures.getFor(cr);
// Do something useful with the picture
}
}
}
You can find a good example of doing this in the Apache Tika parser for Microsoft Word .doc, which is powered by Apache POI
You Should add PicturesSourceClass
public class PicturesSource {
private PicturesTable picturesTable;
private Set<Picture> output = new HashSet<Picture>();
private Map<Integer, Picture> lookup;
private List<Picture> nonU1based;
private List<Picture> all;
private int pn = 0;
public PicturesSource(HWPFDocument doc) {
picturesTable = doc.getPicturesTable();
all = picturesTable.getAllPictures();
lookup = new HashMap<Integer, Picture>();
for (Picture p : all) {
lookup.put(p.getStartOffset(), p);
}
nonU1based = new ArrayList<Picture>();
nonU1based.addAll(all);
Range r = doc.getRange();
for (int i = 0; i < r.numCharacterRuns(); i++) {
CharacterRun cr = r.getCharacterRun(i);
if (picturesTable.hasPicture(cr)) {
Picture p = getFor(cr);
int at = nonU1based.indexOf(p);
nonU1based.set(at, null);
}
}
}
private boolean hasPicture(CharacterRun cr) {
return picturesTable.hasPicture(cr);
}
private void recordOutput(Picture picture) {
output.add(picture);
}
private boolean hasOutput(Picture picture) {
return output.contains(picture);
}
private int pictureNumber(Picture picture) {
return all.indexOf(picture) + 1;
}
public Picture getFor(CharacterRun cr) {
return lookup.get(cr.getPicOffset());
}
private Picture nextUnclaimed() {
Picture p = null;
while (pn < nonU1based.size()) {
p = nonU1based.get(pn);
pn++;
if (p != null) return p;
}
return null;
}
}

Categories

Resources