I want to move a sliding window (a Rect) by half of each window, but I can only get the first line:
My code:
int widthImg = 600;
int HeightImg = 500;
int wWin = 100;// weight window
int hWin = 100;// height window
int xWin = 0;
int yWin = 0;
int winSize = ((widthImg/wWin)*2) * ((HeightImg/hWin)*2);// slide half of window(50)
for(int i=0;i<winSize;i++){
Mat ROIMat = new Mat();
if(i < winSize){
xWin = xWin + wWin/2;
if(xWin == widthImg){
xWin = 0;
yWin = yWin + hWin/2;
}
}
ROIMat = croppMat(Highgui.imread(fileImageName), new Rect(xWin , yWin , wWin , hWin) );
Highgui.imwrite(pathROI+"\\"+i+".jpg", ROIMat); //save ROI image
}
ERROR:
OpenCV Error: Assertion failed (0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols) in cv::Mat::Mat, file......\opencv\modules\core\src\matrix.cpp, line 292 Exception in thread "AWT-EventQueue-0" CvException [org.opencv.core.CvException: cv::Exception: ........\opencv\modules\core\src\matrix.cpp:292: error: (-215) 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols in function cv::Mat::Mat]
Where am I doing wrong?
If I understand correctly your question, you should correct your for loop.
Take a look at this code, and check if it's the expected result. The code is in C++, but it's be very close to Java, and I added as comments the equivalent Java calls (but I didn't test them).
#include <opencv2/opencv.hpp>
#include <string>
using namespace cv;
int main()
{
// Load image
Mat3b img = imread(fileImageName);
// JAVA: Mat img = Highgui.imread(fileImageName);
int widthImg = img.cols; // JAVA: img.cols();
int heightImg = img.rows; // JAVA: img.rows();
int wWin = 100; // weight window
int hWin = 100; // height window
int counter = 0;
for (int yWin = 0; yWin <= heightImg - hWin; yWin += hWin/2)
{
for (int xWin = 0; xWin <= widthImg - wWin; xWin += wWin/2)
{
Mat ROIMat(img(Rect(xWin, yWin, wWin, hWin)));
// JAVA: Mat ROIMat = new Mat();
// JAVA: ROIMat = croppMat(img, new Rect(xWin, yWin, wWin, hWin));
imwrite(pathROI + std::to_string(counter) + ".jpg", ROIMat);
//JAVA: Highgui.imwrite(pathROI + "\\" + counter + ".jpg", ROIMat); //save ROI image
++counter;
}
}
return 0;
}
Related
I'm creating a app that gets a image from camera (using CameraKit library), process the image and do a OCR Read using Google Vision Api, And get this error:
FATAL EXCEPTION: main
Process: com.., PID: 1938
java.lang.OutOfMemoryError: Failed to allocate a 63701004 byte
allocation with 16777216 free bytes and 60MB until OOM
at dalvik.system.VMRuntime.newNonMovableArray(Native Method)
at android.graphics.Bitmap.nativeCreate(Native Method)
at android.graphics.Bitmap.createBitmap(Bitmap.java:905)
at android.graphics.Bitmap.createBitmap(Bitmap.java:882)
at android.graphics.Bitmap.createBitmap(Bitmap.java:849)
at
com.****.****.Reader.ReaderResultActivity.createContrast(ReaderResultActivity.java:123)
at
com.*****.****.Reader.ReaderResultActivity.onCreate(ReaderResultActivity.java:47)
at android.app.Activity.performCreate(Activity.java:6672)
at
android.app.Instrumentation.callActivityOnCreate(Instrumentation.java:1140)
at
android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2612)
at
android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:2724)
at android.app.ActivityThread.-wrap12(ActivityThread.java)
at
android.app.ActivityThread$H.handleMessage(ActivityThread.java:1473)
at android.os.Handler.dispatchMessage(Handler.java:102)
at android.os.Looper.loop(Looper.java:154)
at android.app.ActivityThread.main(ActivityThread.java:6123)
at java.lang.reflect.Method.invoke(Native Method)
at
com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:867)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:757)
ReaderResultActivity Code:
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_reader_result);
ImageView img1 = (ImageView)findViewById(R.id.imageView2);
ImageView img2 = (ImageView)findViewById(R.id.imageView3);
ImageView img3 = (ImageView)findViewById(R.id.imageView4);
TextView scanResults = (TextView)findViewById(R.id.textView);
//Get bitmap from a static class.
Bitmap bitmap = Reader.img;
Bitmap grayScale = toGrayscale(bitmap);
Bitmap blackWhiteImage = createContrast(grayScale, 50);
Bitmap invertColor = invertColor(blackWhiteImage);
//Show process steps
img1.setImageBitmap(grayScale);
img2.setImageBitmap(blackWhiteImage);
img3.setImageBitmap(invertColor);
TextRecognizer detector = new TextRecognizer.Builder(getApplicationContext()).build();
try {
if (detector.isOperational()) {
Frame frame = new Frame.Builder().setBitmap(invertColor).build();
SparseArray<TextBlock> textBlocks = detector.detect(frame);
String blocks = "";
String lines = "";
String words = "";
for (int index = 0; index < textBlocks.size(); index++) {
//extract scanned text blocks here
TextBlock tBlock = textBlocks.valueAt(index);
blocks = blocks + tBlock.getValue() + "\n" + "\n";
for (Text line : tBlock.getComponents()) {
//extract scanned text lines here
lines = lines + line.getValue() + "\n";
for (Text element : line.getComponents()) {
//extract scanned text words here
words = words + element.getValue() + ", ";
}
}
}
if (textBlocks.size() == 0) {
scanResults.setText("Scan Failed: Found nothing to scan");
} else {
lines = lines.replaceAll("o", "0");
lines = lines.replaceAll("A", "1");
scanResults.setText(lines + "\n");
}
} else {
scanResults.setText("Could not set up the detector!");
}
} catch (Exception e) {
Toast.makeText(this, "Failed to load Image", Toast.LENGTH_SHORT)
.show();
Log.e("312", e.toString());
}
}
private Bitmap processImage(Bitmap bitmap){
Bitmap grayScale = toGrayscale(bitmap);
Bitmap blackWhiteImage = createContrast(grayScale, 50);
Bitmap invertColor = invertColor(blackWhiteImage);
return invertColor;
}
public Bitmap toGrayscale(Bitmap bmpOriginal) {
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap bmpGrayscale = Bitmap.createBitmap(width, height, bmpOriginal.getConfig());
Canvas c = new Canvas(bmpGrayscale);
Paint paint = new Paint();
ColorMatrix cm = new ColorMatrix();
cm.setSaturation(0);
ColorMatrixColorFilter f = new ColorMatrixColorFilter(cm);
paint.setColorFilter(f);
c.drawBitmap(bmpOriginal, 0, 0, paint);
return bmpGrayscale;
}
public static Bitmap createContrast(Bitmap src, double value) {
// image size
int width = src.getWidth();
int height = src.getHeight();
// create output bitmap
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
// color information
int A, R, G, B;
int pixel;
// get contrast value
double contrast = Math.pow((100 + value) / 100, 2);
// scan through all pixels
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
// get pixel color
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
// apply filter contrast for every channel R, G, B
R = Color.red(pixel);
R = (int)(((((R / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if(R < 0) { R = 0; }
else if(R > 255) { R = 255; }
G = Color.red(pixel);
G = (int)(((((G / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if(G < 0) { G = 0; }
else if(G > 255) { G = 255; }
B = Color.red(pixel);
B = (int)(((((B / 255.0) - 0.5) * contrast) + 0.5) * 255.0);
if(B < 0) { B = 0; }
else if(B > 255) { B = 255; }
// set new pixel color to output bitmap
bmOut.setPixel(x, y, Color.argb(A, R, G, B));
}
}
return bmOut;
}
Bitmap invertColor(Bitmap src){
Bitmap copy = src.copy(src.getConfig(), true);
for (int x = 0; x < copy.getWidth(); ++x) {
for (int y = 0; y < copy.getHeight(); ++y) {
int color = copy.getPixel(x, y);
int r = Color.red(color);
int g = Color.green(color);
int b = Color.blue(color);
int avg = (r + g + b) / 3;
int newColor = Color.argb(255, 255 - avg, 255 - avg, 255 - avg);
copy.setPixel(x, y, newColor);
}
}
return copy;
}
Already try to do this in Manifest
android:largeHeap="true"
But the application just stop running when is on:
ReaderResultActivity.createContrast(ReaderResultActivity.java:123)
The same line that appears on error without the "largeHeap" tag.
Just dont know what to do, but i think that has something with all those "Bitmap.CreateBitmap" in every process function.
But without doing this, in OCR reading, appear a error saying that the bitmap has a wrong format.
You are loading three bitmaps in different imageviews without scaling it according to the size you want to show on your UI.
Android devices's camera captures pictures with much higher resolution than the screen density of your device.
Given that you are working with limited memory, ideally you only want to load a lower resolution version in memory. The lower resolution version should match the size of the UI component that displays it. An image with a higher resolution does not provide any visible benefit, but still takes up precious memory and incurs additional performance overhead due to additional on the fly scaling.
You can optimize it by following developer documentation suggestions - https://developer.android.com/topic/performance/graphics/load-bitmap.html
I'm writing a fairly simple app that will, in real-time, tell the user how many pixels there are above a certain color value in an image.
That is, it takes preview images from the camera and analyses them as the user move the camera around.
Right now, I have this code, which technically works:
mRgba = inputFrame.rgba();
Rect sample = new Rect();
Mat sampleRegionRgba;
numPixs = 0;
boundary.add(100); boundary.add(100);boundary.add(100);
int cols = mRgba.cols();
int rows = mRgba.rows();
double yLow = (double)mOpenCvCameraView.getHeight() * 0.2401961;
double yHigh = (double)mOpenCvCameraView.getHeight() * 0.7696078;
double xScale = (double)cols / (double)mOpenCvCameraView.getWidth();
double yScale = (double)rows / (yHigh-yLow);
int tmpX;
int tmpY;
for (int x = 0; x < cols-6; x++) {
for (int y = (int)yLow; y < yHigh-6; y++){
tmpX = (int)((double)x * xScale);
tmpY = (int)((double)y * yScale);
sample.x = tmpX+3;
sample.y = tmpY+3;
sample.width = 2;
sample.height = 2;
sampleRegionRgba = mRgba.submat(sample);
Mat sampleRegionHsv = new Mat();
Imgproc.cvtColor(sampleRegionRgba, sampleRegionHsv, Imgproc.COLOR_RGB2HSV_FULL);
mBlobColorHsv = Core.sumElems(sampleRegionHsv);
int pointCount = sample.width * sample.height;
for (int i = 0; i < mBlobColorHsv.val.length; i++){
mBlobColorHsv.val[i] /= pointCount;
}
mBlobColorRgba = convertScalarToRgba(mBlobColorHsv);
// System.out.println(mBlobColorRgba.toString());
if (mBlobColorRgba.val[0] > boundary.get(0)
&& mBlobColorRgba.val[1] > boundary.get(1)
&& mBlobColorRgba.val[2] > boundary.get(2)){
numPixs += 1;
}
// System.out.println(sampleRegionRgba.toString());
}
}
System.out.println("number of pixels above boundary: "+Integer.toString(numPixs));
massflow = m*(Math.pow(numPixs,.25))+b;
runOnUiThread(new Runnable() {
#Override
public void run() {
massflow_text.setText("Massflow: "+Double.valueOf(massflow));
}
});
While this code works, it takes about 6 seconds to run for each image.
I'd like it to have a much more reasonable frame rate. I know this can be done with numpy (I've done it with np.where()). Is it possible with Java/OpenCv/Android Studio ?
I am trying to extract user silhouette and put it above my images. I was able to make a mask and cut user from rgb image. But the contour is messy.
The question is how I can make the mask more precise (to fit real user). I've tried ERODE-DILATE filters, but they don't do much. Maybe I need some Feather filter like in Photoshop. Or I don't know.
Here is my code.
import SimpleOpenNI.*;
SimpleOpenNI context;
PImage mask;
void setup()
{
size(640*2, 480);
context = new SimpleOpenNI(this);
if (context.isInit() == false)
{
exit();
return;
}
context.enableDepth();
context.enableRGB();
context.enableUser();
context.alternativeViewPointDepthToImage();
}
void draw()
{
frame.setTitle(int(frameRate) + " fps");
context.update();
int[] userMap = context.userMap();
background(0, 0, 0);
mask = loadImage("black640.jpg"); //just a black image
int xSize = context.depthWidth();
int ySize = context.depthHeight();
mask.loadPixels();
for (int y = 0; y < ySize; y++) {
for (int x = 0; x < xSize; x++) {
int index = x + y*xSize;
if (userMap[index]>0) {
mask.pixels[index]=color(255, 255, 255);
}
}
}
mask.updatePixels();
image(mask, 0, 0);
mask.filter(DILATE);
mask.filter(DILATE);
PImage rgb = context.rgbImage();
rgb.mask(mask);
image(rgb, context.depthWidth() + 10, 0);
}
It's good you're aligning the RGB and depth streams.
There are few things that could be improved in terms of efficiency:
No need to reload a black image every single frame (in the draw() loop) since you're modifying all the pixels anyway:
mask = loadImage("black640.jpg"); //just a black image
Also, since you don't need the x,y coordinates as you loop through the user data, you can use a single for loop which should be a bit faster:
for(int i = 0 ; i < numPixels ; i++){
mask.pixels[i] = userMap[i] > 0 ? color(255) : color(0);
}
instead of:
for (int y = 0; y < ySize; y++) {
for (int x = 0; x < xSize; x++) {
int index = x + y*xSize;
if (userMap[index]>0) {
mask.pixels[index]=color(255, 255, 255);
}
}
}
Another hacky thing you could do is retrieve the userImage() from SimpleOpenNI, instead of the userData() and apply a THRESHOLD filter to it, which in theory should give you the same result as above.
For example:
int[] userMap = context.userMap();
background(0, 0, 0);
mask = loadImage("black640.jpg"); //just a black image
int xSize = context.depthWidth();
int ySize = context.depthHeight();
mask.loadPixels();
for (int y = 0; y < ySize; y++) {
for (int x = 0; x < xSize; x++) {
int index = x + y*xSize;
if (userMap[index]>0) {
mask.pixels[index]=color(255, 255, 255);
}
}
}
could be:
mask = context.userImage();
mask.filter(THRESHOLD);
In terms of filtering, if you want to shrink the silhouette you should ERODE and bluring should give you a bit of that Photoshop like feathering.
Note that some filter() calls take arguments (like BLUR), but others don't like the ERODE/DILATE morphological filters, but you can still roll your own loops to deal with that.
I also recommend having some sort of easy to tweak interface (it can be fancy slider or a simple keyboard shortcut) when playing with filters.
Here's a rough attempt at the refactored sketch with the above comments:
import SimpleOpenNI.*;
SimpleOpenNI context;
PImage mask;
int numPixels = 640*480;
int dilateAmt = 1;
int erodeAmt = 1;
int blurAmt = 0;
void setup()
{
size(640*2, 480);
context = new SimpleOpenNI(this);
if (context.isInit() == false)
{
exit();
return;
}
context.enableDepth();
context.enableRGB();
context.enableUser();
context.alternativeViewPointDepthToImage();
mask = createImage(640,480,RGB);
}
void draw()
{
frame.setTitle(int(frameRate) + " fps");
context.update();
int[] userMap = context.userMap();
background(0, 0, 0);
//you don't need to keep reloading the image every single frame since you're updating all the pixels bellow anyway
// mask = loadImage("black640.jpg"); //just a black image
// mask.loadPixels();
// int xSize = context.depthWidth();
// int ySize = context.depthHeight();
// for (int y = 0; y < ySize; y++) {
// for (int x = 0; x < xSize; x++) {
// int index = x + y*xSize;
// if (userMap[index]>0) {
// mask.pixels[index]=color(255, 255, 255);
// }
// }
// }
//a single loop is usually faster than a nested loop and you don't need the x,y coordinates anyway
for(int i = 0 ; i < numPixels ; i++){
mask.pixels[i] = userMap[i] > 0 ? color(255) : color(0);
}
//erode
for(int i = 0 ; i < erodeAmt ; i++) mask.filter(ERODE);
//dilate
for(int i = 0 ; i < dilateAmt; i++) mask.filter(DILATE);
//blur
mask.filter(BLUR,blurAmt);
mask.updatePixels();
//preview the mask after you process it
image(mask, 0, 0);
PImage rgb = context.rgbImage();
rgb.mask(mask);
image(rgb, context.depthWidth() + 10, 0);
//print filter values for debugging purposes
fill(255);
text("erodeAmt: " + erodeAmt + "\tdilateAmt: " + dilateAmt + "\tblurAmt: " + blurAmt,15,15);
}
void keyPressed(){
if(key == 'e') erodeAmt--;
if(key == 'E') erodeAmt++;
if(key == 'd') dilateAmt--;
if(key == 'D') dilateAmt++;
if(key == 'b') blurAmt--;
if(key == 'B') blurAmt++;
//constrain values
if(erodeAmt < 0) erodeAmt = 0;
if(dilateAmt < 0) dilateAmt = 0;
if(blurAmt < 0) blurAmt = 0;
}
Unfortunately I can't test with an actual sensor right now, so please use the concepts explained, but bare in mind the full sketch code isn't tested.
This above sketch (if it runs) should allow you to use keys to control the filter parameters (e/E to decrease/increase erosion, d/D for dilation, b/B for blur). Hopefully you'll get satisfactory results.
When working with SimpleOpenNI in general I advise recording an .oni file (check out the RecorderPlay example for that) of a person for the most common use case. This will save you some time on the long run when testing and will allow you to work remotely with the sensor detached. One thing to bare in mind, the depth resolution is reduced to half on recordings (but using a usingRecording boolean flag should keep things safe)
The last and probably most important point is about the quality of the end result. Your resulting image can't be that much better if the source image isn't easy to work with to begin with. The depth data from the original Kinect sensor isn't great. The Asus sensors feel a wee bit more stable, but still the difference is negligible in most cases. If you are going to stick to one of these sensors, make sure you've got a clear background and decent lighting (without too much direct warm light (sunlight, incandescent lightbulbs, etc.) since they may interfere with the sensor)
If you want a more accurate user cut and the above filtering doesn't get the results you're after, consider switching to a better sensor like KinectV2. The depth quality is much better and the sensor is less susceptible to direct warm light. This may mean you need to use Windows (I see there's a KinectPV2 wrapper available) or OpenFrameworks(c++ collections of libraries similar to Processing) with ofxKinectV2
I've tried built-in erode-dilate-blur in processing. But they are very inefficient. Every time I increment blurAmount in img.filter(BLUR,blurAmount), my FPS decreases by 5 frames.
So I decided to try opencv. It is much better in comparison. The result is satisfactory.
import SimpleOpenNI.*;
import processing.video.*;
import gab.opencv.*;
SimpleOpenNI context;
OpenCV opencv;
PImage mask;
int numPixels = 640*480;
int dilateAmt = 1;
int erodeAmt = 1;
int blurAmt = 1;
Movie mov;
void setup(){
opencv = new OpenCV(this, 640, 480);
size(640*2, 480);
context = new SimpleOpenNI(this);
if (context.isInit() == false) {
exit();
return;
}
context.enableDepth();
context.enableRGB();
context.enableUser();
context.alternativeViewPointDepthToImage();
mask = createImage(640, 480, RGB);
mov = new Movie(this, "wild.mp4");
mov.play();
mov.speed(5);
mov.volume(0);
}
void movieEvent(Movie m) {
m.read();
}
void draw() {
frame.setTitle(int(frameRate) + " fps");
context.update();
int[] userMap = context.userMap();
background(0, 0, 0);
mask.loadPixels();
for (int i = 0; i < numPixels; i++) {
mask.pixels[i] = userMap[i] > 0 ? color(255) : color(0);
}
mask.updatePixels();
opencv.loadImage(mask);
opencv.gray();
for (int i = 0; i < erodeAmt; i++) {
opencv.erode();
}
for (int i = 0; i < dilateAmt; i++) {
opencv.dilate();
}
if (blurAmt>0) {//blur with 0 amount causes error
opencv.blur(blurAmt);
}
mask = opencv.getSnapshot();
image(mask, 0, 0);
PImage rgb = context.rgbImage();
rgb.mask(mask);
image(mov, context.depthWidth() + 10, 0);
image(rgb, context.depthWidth() + 10, 0);
fill(255);
text("erodeAmt: " + erodeAmt + "\tdilateAmt: " + dilateAmt + "\tblurAmt: " + blurAmt, 15, 15);
}
void keyPressed() {
if (key == 'e') erodeAmt--;
if (key == 'E') erodeAmt++;
if (key == 'd') dilateAmt--;
if (key == 'D') dilateAmt++;
if (key == 'b') blurAmt--;
if (key == 'B') blurAmt++;
//constrain values
if (erodeAmt < 0) erodeAmt = 0;
if (dilateAmt < 0) dilateAmt = 0;
if (blurAmt < 0) blurAmt = 0;
}
I have created Mat with training images (150 images size of 144x33) so my Mat is 4752 width and 150 height. Another mat with labels is 1 width and 150 height. And now when I am trying svm.train() with these two Mat's, I am getting following error:
OpenCV Error: Bad argument (response #2 is not integral) in cvPreprocessCategoricalResponses, file ..\..\..\..\opencv\modules\ml\src\inner_functions.cpp, line 715
Exception in thread "main" CvException [org.opencv.core.CvException: cv::Exception: ..\..\..\..\opencv\modules\ml\src\inner_functions.cpp:715: error: (-5) response #2 is not integral in function cvPreprocessCategoricalResponses]
Here is piece of my code, can somebody tell me what could be wrong?
Mat trainingImages = new Mat(0, imageWidth * imageHeight, CvType.CV_32FC1);
Mat labels = new Mat(amountOfPlates + amountOfNoPlates, 1, CvType.CV_32FC1);
List<Integer> trainingLabels = new ArrayList<>();
for (int i = 0; i < amountOfPlates; i++) {
int index = i + 1;
String file = pathPlates + index + ".jpg";
Mat img = Highgui.imread(file, 0);
img.convertTo(img, CvType.CV_32FC1);
img = img.reshape(1, 1);
trainingImages.push_back(img);
trainingLabels.add(1);
}
for (int i = 0; i < amountOfNoPlates; i++) {
int index = i + 1;
String file = pathNoPlates + index + ".jpg";
Mat img = Highgui.imread(file, 0);
img.convertTo(img, CvType.CV_32FC1);
img = img.reshape(1, 1);
trainingImages.push_back(img);
trainingLabels.add(0);
}
Integer[] array = trainingLabels.toArray(new Integer[trainingLabels.size()]);
int[] trainLabels = new int[array.length];
for (int i = 0; i < array.length; i++) {
trainLabels[i] = array[i];
}
for (int i = 0; i < trainingLabels.size(); i++) {
labels.put(i, 1, trainLabels[i]);
}
CvSVMParams params = new CvSVMParams();
params.set_svm_type(CvSVM.C_SVC);
params.set_kernel_type(CvSVM.LINEAR);
params.set_degree(0);
params.set_gamma(1);
params.set_coef0(0);
params.set_C(1);
params.set_nu(0);
params.set_p(0);
TermCriteria tc = new TermCriteria(opencv_core.CV_TERMCRIT_ITER, 1000, 0.01);
params.set_term_crit(tc);
Size data = trainingImages.size();
Size label = labels.size();
CvSVM svmClassifier = new CvSVM();
svmClassifier.train(trainingImages, labels, new Mat(), new Mat(), params);
svmClassifier.save("test.xml");
Size data shows: width = 4752, height = 150
Size labels shows: width = 1, height = 150
What am I doing wrong?
Mat labels was defined as CV_32FC1, but you extend it with integers from int[] trainLabels.
You should use floating point trainLabels or CV_32SC1 type labels instead.
How convert Image obj to Bitmap obj and vice versa?
I have a method that get Image object input and return Image object but i want give bitmap object input and then get bitmap object output my code is this:
public Image edgeFilter(Image imageIn) {
// Image size
int width = imageIn.getWidth();
int height = imageIn.getHeight();
boolean[][] mask = null;
Paint grayMatrix[] = new Paint[256];
// Init gray matrix
for (int i = 0; i <= 255; i++) {
Paint p = new Paint();
p.setColor(Color.rgb(i, i, i));
grayMatrix[i] = p;
}
int [][] luminance = new int[width][height];
for (int y = 0; y < height ; y++) {
for (int x = 0; x < width ; x++) {
if(mask != null && !mask[x][y]){
continue;
}
luminance[x][y] = (int) luminance(imageIn.getRComponent(x, y), imageIn.getGComponent(x, y), imageIn.getBComponent(x, y));
}
}
int grayX, grayY;
int magnitude;
for (int y = 1; y < height-1; y++) {
for (int x = 1; x < width-1; x++) {
if(mask != null && !mask[x][y]){
continue;
}
grayX = - luminance[x-1][y-1] + luminance[x-1][y-1+2] - 2* luminance[x-1+1][y-1] + 2* luminance[x-1+1][y-1+2] - luminance[x-1+2][y-1]+ luminance[x-1+2][y-1+2];
grayY = luminance[x-1][y-1] + 2* luminance[x-1][y-1+1] + luminance[x-1][y-1+2] - luminance[x-1+2][y-1] - 2* luminance[x-1+2][y-1+1] - luminance[x-1+2][y-1+2];
// Magnitudes sum
magnitude = 255 - Image.SAFECOLOR(Math.abs(grayX) + Math.abs(grayY));
Paint grayscaleColor = grayMatrix[magnitude];
// Apply the color into a new image
imageIn.setPixelColor(x, y, grayscaleColor.getColor());
}
}
return imageIn;
}
If you want to convert an Image object to a Bitmap and the format has been selected as JPEG, then you can accomplish this by using the following code (if it is not a JPEG, then additional conversions will be needed):
...
if(image.getFormat() == ImageFormat.JPEG)
{
ByteBuffer buffer = capturedImage.getPlanes()[0].getBuffer();
byte[] jpegByteData = new byte[buffer.remaining()];
Bitmap bitmapImage = BitmapFactory.decodeByteArray(jpegByteData, 0, jpegByteData.length, null);
}
...
This link gives more into on saving images as a png format.
it is difficult to see what you are attempting to do, are you trying to alter this code so it also works for bitmap formats?
here is a answer of someone doing stuff with bitmap images, should be give you a idea of what other people do