Currently I'm building an app to do real-time image processing and then display. The first step is to try displaying the original preview using Camera2 API and ANativeWindow API. I pass the y, u, v channels through JNI separately and do YUV2RGB conversion following the Wikipedia article, but got wrong color output running on Google Pixel - 7.1.0 - API 25 - 1080x1920 on Genymotion:
Implementation of ImageReader.OnImageAvailableListener :
private ImageReader.OnImageAvailableListener mOnImageAvailableListener = new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
// get the newest frame
Image image = reader.acquireNextImage();
if (image == null) {
return;
}
Image.Plane Y_plane = image.getPlanes()[0];
int Y_rowStride = Y_plane.getRowStride();
Image.Plane U_plane = image.getPlanes()[1];
int U_rowStride = U_plane.getRowStride();
Image.Plane V_plane = image.getPlanes()[2];
int V_rowStride = V_plane.getRowStride();
JNIUtils.RGBADisplay(image.getWidth(), image.getHeight(), Y_rowStride, Y_plane.getBuffer(), U_rowStride, U_plane.getBuffer(), V_rowStride, V_plane.getBuffer(), surface);
image.close();
}
};
JNI:
public static native void RGBADisplay(int srcWidth, int srcHeight, int Y_rowStride, ByteBuffer Y_Buffer, int U_rowStride, ByteBuffer U_Buffer, int V_rowStride, ByteBuffer V_Buffer, Surface surface);
C++:
const uint8_t NUM_128 = 128;
const uint8_t NUM_255 = 255;
JNIEXPORT void JNICALL Java_tau_camera2demo_JNIUtils_RGBADisplay(
JNIEnv *env,
jobject obj,
jint srcWidth,
jint srcHeight,
jint Y_rowStride,
jobject Y_Buffer,
jint U_rowStride,
jobject U_Buffer,
jint V_rowStride,
jobject V_Buffer,
jobject surface) {
uint8_t *srcYPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(Y_Buffer));
uint8_t *srcUPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(U_Buffer));
uint8_t *srcVPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(V_Buffer));
ANativeWindow * window = ANativeWindow_fromSurface(env, surface);
ANativeWindow_acquire(window);
ANativeWindow_Buffer buffer;
//set output size and format
//only 3 formats are available:
//WINDOW_FORMAT_RGBA_8888(DEFAULT), WINDOW_FORMAT_RGBX_8888, WINDOW_FORMAT_RGB_565
ANativeWindow_setBuffersGeometry(window, 0, 0, WINDOW_FORMAT_RGBA_8888);
if (int32_t err = ANativeWindow_lock(window, &buffer, NULL)) {
LOGE("ANativeWindow_lock failed with error code: %d\n", err);
ANativeWindow_release(window);
}
//convert YUV_420_888 to RGBA_8888 and display
uint8_t * outPtr = reinterpret_cast<uint8_t *>(buffer.bits);
for (size_t y = 0; y < srcHeight; y++)
{
uint8_t * Y_rowPtr = srcYPtr + y * Y_rowStride;
uint8_t * U_rowPtr = srcUPtr + (y >> 1) * U_rowStride;
uint8_t * V_rowPtr = srcVPtr + (y >> 1) * V_rowStride;
for (size_t x = 0; x < srcWidth; x++)
{
//from Wikipedia article YUV:
//Integer operation of ITU-R standard for YCbCr(8 bits per channel) to RGB888
//Y-Y, U-Cb, V-Cr
//R = Y + V + (V >> 2) + (V >> 3) + (V >> 5);
//G = Y - ((U >> 2) + (U >> 4) + (U >> 5)) - ((V >> 1) + (V >> 3) + (V >> 4) + (V >> 5));
//B = Y + U + (U >> 1) + (U >> 2) + (U >> 6);
uint8_t Y = Y_rowPtr[x];
uint8_t U = U_rowPtr[(x >> 1)] - NUM_128;
uint8_t V = V_rowPtr[(x >> 1)] - NUM_128;
*(outPtr++) = Y + V + (V >> 2) + (V >> 3) + (V >> 5); //R
*(outPtr++) = Y - ((U >> 2) + (U >> 4) + (U >> 5)) - ((V >> 1) + (V >> 3) + (V >> 4) + (V >> 5)); //G
*(outPtr++) = Y + U + (U >> 1) + (U >> 2) + (U >> 6); //B
*(outPtr++) = NUM_255; // gamma for RGBA_8888
}
}
ANativeWindow_unlockAndPost(window);
ANativeWindow_release(window);
}
The whole demo could be found here on Github: https://github.com/Fung-yuantao/android-camera2demo
UPDATE:
Added the following code after the line calling JNIUtils.RGBADisplay:
Log.d(TAG, "Y plane pixel stride: " + Y_plane.getPixelStride());
Log.d(TAG, "U plane pixel stride: " + U_plane.getPixelStride());
Log.d(TAG, "V plane pixel stride: " + V_plane.getPixelStride());
In Logcat:
09-07 06:40:02.576 5376-5392/tau.camera2demo D/Camera2Demo: Y plane pixel stride: 1
09-07 06:40:02.576 5376-5392/tau.camera2demo D/Camera2Demo: U plane pixel stride: 1
09-07 06:40:02.576 5376-5392/tau.camera2demo D/Camera2Demo: V plane pixel stride: 1
The image format should be planar according to the answer from alijandro.
The image output format for YUV_420_888 might be planar(I420, YV12) or semiplanar(NV12, NV21) format, from the documentation here.
So how to know it's planar or semi-planar format?
I guess you can find by image.getPlanes()[1].getPixelStride(). If it's 2, the image format is semi-planar format and has the following bit pattern.
YYYYYYYY UVUVUVUV ...
In my test environment, the output image format from ImageReader is semi-planar.
For semi-planar, we only need to handle the first two planar.
Change your code like following.
ANativeWindow_setBuffersGeometry(window, srcWidth, srcHeight, WINDOW_FORMAT_RGBA_8888);
if (int32_t err = ANativeWindow_lock(window, &buffer, NULL)) {
LOGE("ANativeWindow_lock failed with error code: %d\n", err);
ANativeWindow_release(window);
}
//convert YUV_420_888 to RGBA_888 and display
uint8_t * outPtr = reinterpret_cast<uint8_t *>(buffer.bits);
for (size_t y = 0; y < srcHeight; y++)
{
uint8_t * Y_rowPtr = srcYPtr + y * Y_rowStride;
uint8_t * UV_rowPtr = srcUPtr + (y >> 1) * Y_rowStride;
// uint8_t * V_rowPtr = srcVPtr + (y >> 1) * Y_rowStride / 4;
for (size_t x = 0; x < srcWidth; x++)
{
uint8_t Y = Y_rowPtr[x];
size_t uIndex = x & 0xfffffffe;
uint8_t U = UV_rowPtr[uIndex];
uint8_t V = UV_rowPtr[uIndex + 1];
double R = ((Y-16) * 1.164 + (V-128) * 1.596);
double G = ((Y-16) * 1.164 - (U-128) * 0.392 - (V-128) * 0.813);
double B = ((Y-16) * 1.164 + (U-128) * 2.017);
*(outPtr++) = (uint8_t) (R > 255 ? 255 : (R < 0 ? 0 : R));
*(outPtr++) = (uint8_t) (G > 255 ? 255 : (G < 0 ? 0 : G));
*(outPtr++) = (uint8_t) (B > 255 ? 255 : (B < 0 ? 0 : B));
*(outPtr++) = NUM_255; // gamma for RGBA_8888
}
}
For your planar output format, try use the YUV2RGB transform method below.
for (size_t y = 0; y < srcHeight; y++)
{
uint8_t * Y_rowPtr = srcYPtr + y * Y_rowStride;
uint8_t * U_rowPtr = srcUPtr + (y >> 1) * Y_rowStride / 2;
uint8_t * V_rowPtr = srcVPtr + (y >> 1) * Y_rowStride / 2;
for (size_t x = 0; x < srcWidth; x++)
{
uint8_t Y = Y_rowPtr[x];
uint8_t U = U_rowPtr[(x >> 1)];
uint8_t V = V_rowPtr[(x >> 1)];
double R = ((Y-16) * 1.164 + (V-128) * 1.596);
double G = ((Y-16) * 1.164 - (U-128) * 0.392 - (V-128) * 0.813);
double B = ((Y-16) * 1.164 + (U-128) * 2.017);
*(outPtr++) = (uint8_t) (R > 255 ? 255 : (R < 0 ? 0 : R));
*(outPtr++) = (uint8_t) (G > 255 ? 255 : (G < 0 ? 0 : G));
*(outPtr++) = (uint8_t) (B > 255 ? 255 : (B < 0 ? 0 : B));
*(outPtr++) = NUM_255; // gamma for RGBA_8888
}
}
Related
I have a class(an AsyncTask) which does image processing and generates yuv bytes continously, at around ~200ms interval.
Now I send these yuv bytes to another method where the they are recorded using FFmpeg frame recorder:
public void recordYuvData() {
byte[] yuv = getNV21();
System.out.println(yuv.length + " returned yuv bytes ");
if (audioRecord == null || audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
startTime = System.currentTimeMillis();
return;
}
if (RECORD_LENGTH > 0) {
int i = imagesIndex++ % images.length;
yuvimage = images[i];
timestamps[i] = 1000 * (System.currentTimeMillis() - startTime);
}
/* get video data */
if (yuvimage != null && recording) {
((ByteBuffer) yuvimage.image[0].position(0)).put(yuv);
if (RECORD_LENGTH <= 0) {
try {
long t = 1000 * (System.currentTimeMillis() - startTime);
if (t > recorder.getTimestamp()) {
recorder.setTimestamp(t);
}
recorder.record(yuvimage);
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
}
}
}
This method; recordYuvData() is initiated on button click.
If I initiate it only once , then only the initial image gets recorded, rest are not.
If I initiate this each time after the end of the image processing it records but leads to 'weird' fps count of the video; and finally this leads to application crash after sometime.
For above what I feel is, at the end of image processing a new instance of recordYuvData() is created without ending the previous one, accumulating many instances of recordYuvData(). [correct me if I am wrong]
So, how do I update 'ONLY' yuv bytes in the method without running it again?
Thanks....!
Edit:
On Click:
record.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
recordYuvdata();
startRecording();
getNV21()
byte[] getNV21(Bitmap bitmap) {
int inputWidth = 1024;
int inputHeight = 640;
int[] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
System.out.println(argb.length + "#getpixels ");
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
System.out.println(yuv420sp.length + " #encoding " + frameSize);
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
}
index++;
}
}
}
I am trying to convert JPEG byte[] data from Camera.PictureCallback to NV21 byte[] format but it didn't work.
I try to do this:
byte [] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
I tried to convert this Java code:
// http://www.stanford.edu/class/ee368/Android/index.html
// Source: http://www.stanford.edu/class/ee368/Android/HelloViewfinder/Project.zip
private void decodeYUV420RGB(int[] rgb, byte[] yuv420sp, int width, int height) {
Convert YUV to RGB
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
which is called this way:
//byte[] mYUVData; int[] mRGBData;
decodeYUV420RGB(mRGBData, mYUVData, mImageWidth, mImageHeight);
to this C code:
#include <string.h>
#include <jni.h>
jint*
Java_com_camera_DrawOnTop_decodeYUV420RGB565(JNIEnv* env, jobject thiz, jintArray rgb, jbyteArray yuv420sp, jint width, jint height)
{
jbyte* yuv420spc = (*env)->GetByteArrayElements(env, yuv420sp, NULL);
jint* rgbc = (*env)->GetIntArrayElements(env, rgb, NULL);
int frameSize = width * height;
int j;
int i;
int yp;
for (j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420spc[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420spc[uvp++]) - 128;
u = (0xff & yuv420spc[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgbc[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
(*env)->ReleaseByteArrayElements(env, yuv420sp, yuv420spc, 0 );
(*env)->ReleaseIntArrayElements(env, rgb, rgbc, 0 );
return rgbc;
}
and call it via JNI:
//int[] mRGBData; int [] tmpData = {1,2,3};
mRGBData = decodeYUV420RGB565(tmpData, mYUVData, mImageWidth, mImageHeight);
But the program breaks running after the above call.
I don't now how to do call by reference with JNI so I used tmpData
only to have data but return real data to mRGBData via equals sign.
What's wrong with my C code so it breaks at running time?
And what have I to change so that it works with reference like
the original code (without equals sign)?
You should provide to your JNI function an RGB array of correct size (that is, w*h), and an YUV array of correct size and structure (w*h*3/2, with w*h luma bytes (Y), followed by (w/2)*(h/2) pairs of chroma (U and V) bytes. The call will crash if you provide rgb array of size 3, as in your snippet.
Also note that you are building an rgb565 array. Its elements are probably expected to be of type short (16 bit) and not int (32 bit).
GetByteArrayElements() creates an array on the C side, which you fill, then release, then return to Java, which gets a pointer to released memory. If you want to actually get the data over to the Java side, you need to create a new Java array object and return that. Or, create the new object on the Java side and pass it to the C code to modify. I find the latter is usually easier. I usually create a DirectByteBuffer on the Java side and pass it into the native function, let the native function call GetDirectBufferAddress and write into that.
For an example, see my PD ojrandlib JNI code: https://github.com/lcrocker/ojrandlib/tree/master/source/java/com/onejoker/randlib
I changed code to this:
Java:
native void decodeYUV420RGB565(ByteBuffer rgb, byte[] yuv420sp, int width, int height);
//There's no allocateDirect for IntBuffer so I have to use ByteBuffer
//although mRGBData is an int array
//allocateDirect(mRGBData.length * 4) because mRGBData is an int array
ByteBuffer mTempData = ByteBuffer.allocateDirect(mRGBData.length*4);
decodeYUV420RGB565(mTempData, mYUVData, mImageWidth, mImageHeight);
mRGBData = mTempData.asIntBuffer().array();
mTempData.clear();
C:
void Java_com_camera_DrawOnTop_decodeYUV420RGB565(JNIEnv* env, jobject thiz, jintArray rgb, jbyteArray yuv420sp, jint width, jint height)
{
jbyte* yuv420spc = (*env)->GetByteArrayElements(env, yuv420sp, NULL);
jint* rgbc = (*env)->GetDirectBufferAddress(env, thiz);
//...conversion code as above...
(*env)->ReleaseByteArrayElements(env, yuv420sp, yuv420spc, 0 );
(*env)->ReleaseIntArrayElements(env, rgb, rgbc, 0 );
But I have the same problem: Code crashes when I call decodeYUV420RGB565(mTempData, mYUVData, mImageWidth, mImageHeight). What have I to change in the code?
I'm converting a single pixel of NV21 to RGB, it is what I did:
public int getYUVvalue(byte[] yuv,int width,int height,int x,int y){
int total=width*height;
int Y=(0xff&yuv[y*width+x])-16;
int U=(0xff&yuv[(y/2)*(width/2)+(x/2)+total])-128;
int V=(0xff&yuv[(y/2)*(width/2)+(x/2)+total+1])-128;
return this.convertYUVtoRGB(Y, U, V);
}
private static int convertYUVtoRGB(int y, int u, int v) {
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
return 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
but the result is just black and white, looks like the U and V value are not correct, can someone help me with this issue?
thank you!
update: I confirm my Y value is right, so I think the problem is at the U and V value, which I have no clue where the problem is.
update#2: My code is very similar to decodeYUV420sp(), which is a widely used function to convert the whole image, it works perfectly, mine is just to convert a single pixel, still trying to figure out the problem by comparing it with my code. Here is the decodeyuv420sp function:
static public void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
I still can't see where my mistake is.
update#3: problem solved, thanks for your help everyone!
The logic for the array indexes of U & V in the getYUVvalue function does not match the logic in decodeYUV420SP. (I count 3 differences.) Without a sample byte array in this format to test with, I can't be sure, but I think it should be:
int U=(0xff&yuv[(y/2)*width+(x&~1)+total+1])-128;
int V=(0xff&yuv[(y/2)*width+(x&~1)+total])-128;
It's missing the if (y < 0) y = 0; check also. Maybe that matters.
In our application, we need to use capture frame from camera # 33 fps and pass it to compression before sending it to server,
My compression module can take YUV Image, to compress the image, my camera configuration as follows,
Width = 500 px,
height = 300 px ,
Image format : YV12
On the preview callback
camera.setPreviewCallback(new PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera arg1) {
}
}
data size is coming out to be 230400, but i suppose it would be around
500*300(Y) + 500*300/4(U) + 500*300/4(V) i.e. 2250000
i.e. 5400 bytes more, does that mean, i can ignore the reamining one ?
Also i need to create YUVImage object , but stride info is not coming, so how we can create YUVImage from above data.
Sincere Thanks for reading and i really appreciate if anyone can help me out on this.
Cant help with the data size question but to get YUV from Camera preview you have 2 choices. If running Android 2.2 or later your can use the android.graphics.YuvImage class and just pass it's constructor your bytearray from PreviewCallback.
If you need to support pre 2.2 then you need to do something like:
/**
* Decodes YUV frame to a buffer which can be use to create a bitmap.
* use this for OS < FROYO which has a native YUV decoder
* decode Y, U, and V values on the YUV 420 buffer described as YCbCr_422_SP by Android
* #param rgb the outgoing array of RGB bytes
* #param fg the incoming frame bytes
* #param width of source frame
* #param height of source frame
* #throws NullPointerException
* #throws IllegalArgumentException
*/
private static void decodeYUV_impl(int[] rgb, byte[] fg, int width, int height) throws NullPointerException, IllegalArgumentException
{
int sz = width * height;
if (rgb == null)
throw new NullPointerException("buffer out is null");
if (rgb.length < sz)
throw new IllegalArgumentException("buffer out size " + rgb.length
+ " < minimum " + sz);
if (fg == null)
throw new NullPointerException("buffer 'fg' is null");
if (fg.length < sz)
throw new IllegalArgumentException("buffer fg size " + fg.length
+ " < minimum " + sz * 3 / 2);
int i, j;
int Y, Cr = 0, Cb = 0;
for (j = 0; j < height; j++) {
int pixPtr = j * width;
final int jDiv2 = j >> 1;
for (i = 0; i < width; i++) {
Y = fg[pixPtr];
if (Y < 0)
Y += 255;
if ((i & 0x1) != 1) {
final int cOff = sz + jDiv2 * width + (i >> 1) * 2;
Cb = fg[cOff];
if (Cb < 0)
Cb += 127;
else
Cb -= 128;
Cr = fg[cOff + 1];
if (Cr < 0)
Cr += 127;
else
Cr -= 128;
}
int R = Y + Cr + (Cr >> 2) + (Cr >> 3) + (Cr >> 5);
if (R < 0)
R = 0;
else if (R > 255)
R = 255;
int G = Y - (Cb >> 2) + (Cb >> 4) + (Cb >> 5) - (Cr >> 1)
+ (Cr >> 3) + (Cr >> 4) + (Cr >> 5);
if (G < 0)
G = 0;
else if (G > 255)
G = 255;
int B = Y + Cb + (Cb >> 1) + (Cb >> 2) + (Cb >> 6);
if (B < 0)
B = 0;
else if (B > 255)
B = 255;
rgb[pixPtr++] = (0xff000000 + (B << 16) + (G << 8) + R);
}
}
}