I am currently writing an android application that should rotate an image towards a set location based on the users current location. I can tell that the set location is setting correctly, and the current location seems to be updating, but instead of rotating the image, the app just zooms in on the image and then does nothing. Any help would really be appreciated!
public double bearing(double lat1, double lon1, double lat2, double lon2) {
double longitude1 = lon1;
double longitude2 = lon2;
double latitude1 = Math.toRadians(lat1);
double latitude2 = Math.toRadians(lat2);
double longDiff = Math.toRadians(longitude2 - longitude1);
double y = Math.sin(longDiff) * Math.cos(latitude2);
double x = Math.cos(latitude1) * Math.sin(latitude2) - Math.sin(latitude1) * Math.cos(latitude2) * Math.cos(longDiff);
return (Math.toDegrees(Math.atan2(y, x)) + 360) % 360;
}
private void rotateImageView(ImageView imageView, int drawable, float rotate) {
// Decode the drawable into a bitmap
Bitmap bitmapOrg = BitmapFactory.decodeResource(getResources(),
drawable);
// Get the width/height of the drawable
DisplayMetrics dm = new DisplayMetrics();
getWindowManager().getDefaultDisplay().getMetrics(dm);
int width = bitmapOrg.getWidth(), height = bitmapOrg.getHeight();
// Initialize a new Matrix
Matrix matrix = new Matrix();
// Decide on how much to rotate
rotate = rotate % 360;
// Actually rotate the image
matrix.postRotate(rotate, width, height);
// recreate the new Bitmap via a couple conditions
Bitmap rotatedBitmap = Bitmap.createBitmap(bitmapOrg, 0, 0, width, height, matrix, true);
//BitmapDrawable bmd = new BitmapDrawable( rotatedBitmap );
//imageView.setImageBitmap( rotatedBitmap );
imageView.setImageDrawable(new BitmapDrawable(getResources(), rotatedBitmap));
imageView.setScaleType(ImageView.ScaleType.CENTER);
}
public void onLocationChange() {
// If we don't have a Location, we break out
if (currentLocation == null) return;
double azimuth = bearing(currentLocation.getLatitude(), currentLocation.getLongitude(), baseLocation.getLatitude(), baseLocation.getLongitude());
double baseAzimuth = azimuth;
GeomagneticField geoField = new GeomagneticField(Double
.valueOf(currentLocation.getLatitude()).floatValue(), Double
.valueOf(currentLocation.getLongitude()).floatValue(),
Double.valueOf(currentLocation.getAltitude()).floatValue(),
System.currentTimeMillis()
);
azimuth -= geoField.getDeclination(); // converts magnetic north into true north
// Store the bearingTo in the bearTo variable
float bearTo = currentLocation.bearingTo(baseLocation);
// If the bearTo is smaller than 0, add 360 to get the rotation clockwise.
if (bearTo < 0) {
bearTo = bearTo + 360;
}
//This is where we choose to point it
double direction = bearTo - azimuth;
// If the direction is smaller than 0, add 360 to get the rotation clockwise.
if (direction < 0) {
direction = direction + 360;
}
float fDir = (float) direction;
rotateImageView((ImageView) findViewById(R.id.arrow), R.drawable.ic_launcher, fDir);
}
private Runnable updateTimeChange = new Runnable() {
public void run() {
onLocationChange();
customHandler.postDelayed(this, 500);
}
};
If your target is greater than 11 then you can try view.setRotation() method:
image.setRotation(angle); // this is for your imageview where image is the imageview object
From the developers page. getRotation()
Sets the degrees that the view is rotated around the pivot point. Increasing values result in clockwise rotation.
If you want to setRotation in xml the xml tag is :
android:rotation
Related
I can't solve problem and I need help. I create planets around sun to rotate and I need one planet t rotate anti clockwise and my problem begins. I try everything I'm familiar. I'm beginner and this is simple code but I cant solve it. Please help. Thanks in advance.
I tried with getting anti clockwise just putting minus and this not work. I try with radius and angle and didn't work. I somewhere getting failed in code but I don't know where.
...
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
screenDimensions = getDisplayDimensions();
Bitmap bitmap = Bitmap.createBitmap(1200, 2000, Bitmap.Config.ARGB_8888);
bitmap.eraseColor(Color.parseColor("#FFFFFF"));
canvas = new Canvas();
canvas.setBitmap(bitmap);
paint = new Paint();
paint1 = new Paint();
paint2 = new Paint();
paint3 = new Paint();
paint4 = new Paint();
paint.setColor(Color.YELLOW);
paint1.setColor(Color.RED);
paint2.setColor(Color.BLUE);
paint3.setColor(Color.BLACK);
paint4.setColor(Color.MAGENTA);
imageView = findViewById(R.id.imageView3);
imageView.setImageBitmap(bitmap);
timer = new Timer();
timer.schedule(new TimerTask() {
#Override
public void run() {
runOnUiThread(
new Runnable() {
#Override
public void run() {
animationFrame();
}
}
);
}
}, 2000, 80);
}
private int[] getDisplayDimensions() {
DisplayMetrics displayMetrics = new DisplayMetrics();
getWindowManager().getDefaultDisplay().getMetrics(displayMetrics);
int width = displayMetrics.widthPixels;
int height = displayMetrics.heightPixels;
return new int[]{width, height};
}
private void animationFrame() {
canvas.drawColor(Color.parseColor("#EBEBEB"));
paint.setColor(Color.YELLOW);
canvas.drawCircle(500f, 750f, 130f, paint);
if (paint1 != null | paint3 != null) {
paint1.setColor(Color.RED);
canvas.drawCircle(400f, 560f, 51f, paint1);
canvas.rotate(10f, 500f, 750f);
paint3.setColor(Color.BLACK);
canvas.rotate(30f, 500f, 750f);
canvas.drawCircle(350f, 1050f, 45, paint3);
}
if (paint2 != null | paint4 != null) {
paint2.setColor(Color.MAGENTA);
canvas.rotate(-10f, 500f, 750f);
canvas.drawCircle(720f, 950f, 48, paint2);
paint4.setColor(Color.GRAY);
canvas.rotate(-25f, 500f, 750f);
canvas.drawCircle(290f, 330f, 42, paint4);
}
imageView.invalidate();
}
}
...
I expected to one of planets get rotation anticlockwise.
EDIT
This code works perfectly for me:
In your case, the rotationPoint is the center point of sun and the bodyCenter is the center point of a planet.
Final points cx and cy are the center point of a planet after rotation around the sun.
class Vector {
float x;
float y;
Vector(float x, float y){
this.x = x;
this.y = y;
}
}
public Vector getRotatedPoint(boolean isClockwise, float rotation, Vector bodyCenter, Vector rotationPoint){
float sin = MathUtils.sin(rotation * MathUtils.degreesToRadians);
float cos = MathUtils.cos(rotation * MathUtils.degreesToRadians);
float xSin = (bodyCenter.x - rotationPoint.x) * sin;
float xCos = (bodyCenter.x - rotationPoint.x) * cos;
float ySin = (bodyCenter.y - rotationPoint.y) * sin;
float yCos = (bodyCenter.y - rotationPoint.y) * cos;
float cx, cy;
if (isClockwise) {
//For clockwise rotation
cx = rotationPoint.x + xCos + ySin;
cy = rotationPoint.y - xSin + yCos;
} else {
//For counter clockwise rotation
cx = rotationPoint.x + xCos - ySin;
cy = rotationPoint.y + xSin + yCos;
}
return new Vector(cx, cy);
}
to use it you have to call first the method and then draw a circle
Vector newCenter = getRotatedPoint(true, 10f, planetCenter, sunCenter)
canvas.drawCircle(newCenter.x, newCenter.y, 48, paint2);
I'm trying to draw some text on bitmap with a fixed position (Bottom left corner) no matter how bitmap size different.
Code below works but, the Text is drawn on the center of the bitmap
public Bitmap drawTextToBitmap(Context gContext,
Bitmap bitmap,
String gText) {
Resources resources = gContext.getResources();
float scale = resources.getDisplayMetrics().density;
android.graphics.Bitmap.Config bitmapConfig =
bitmap.getConfig();
if (bitmapConfig == null) {
bitmapConfig = android.graphics.Bitmap.Config.ARGB_8888;
}
bitmap = bitmap.copy(bitmapConfig, true);
Canvas canvas = new Canvas(bitmap);
Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG);
paint.setColor(getResources().getColor(R.color.fujiColor));
paint.setTypeface(Typeface.createFromAsset(getAssets(), "fonts/DS-DIGI.TTF"));
paint.setTextSize((int) (14 * scale));
paint.setShadowLayer(1f, 0f, 1f, getResources().getColor(R.color.fujiShadowColor));
Rect bounds = new Rect();
paint.getTextBounds(gText, 0, gText.length(), bounds);
int x = (bitmap.getWidth() - bounds.width()) / 2;
int y = (bitmap.getHeight() + bounds.height()) / 2;
canvas.drawText(gText, x, y, paint);
return bitmap;
}
What I need is something similar to this :
Thank you.
As mentioned in the official docs, the text is drawn taking the (x,y) values as origin. Change the x,y values. Something along the following lines should work.
int horizontalSpacing = 24;
int verticalSpacing = 36;
int x = horizontalSpacing;//(bitmap.getWidth() - bounds.width()) / 2;
int y = bitmap.getHeight()-verticalSpacing;//(bitmap.getHeight() + bounds.height()) / 2;
I'm trying to create a 360 video sphere (like the ones for cardboard) on Android. I have done this with a photo by rendering a sphere in OpenGL ES1.0 and than attaching a texture to it. Afterwards I can use the sensor values to rotate the sphere.
However, I can't figure out how to change the picture to a video. I've tried frame by frame rendering using texSubImage2D() but it's SUPER SLOW. My video is probably going to be about 4k density as I need a good quality even when only small portion of it is shown.
I've read some theoretical stuff about how this should be done (i.e. Frame Buffers, External Texture, Synchronization, etc.) but I couldn't find any example for these things, so some code would be EXTREMELY appreciated...
Here is how I render the Sphere, draw it and attach a texture to it (i.e. my Sphere class)...
import rapid.decoder.BitmapDecoder;
public class Sphere {
/** Buffer holding the vertices. */
private final List<FloatBuffer> mVertexBuffer = new ArrayList<FloatBuffer>();
/** The vertices for the sphere. */
private final List<float[]> mVertices = new ArrayList<float[]>();
/** Buffer holding the texture coordinates. */
private final List<FloatBuffer> mTextureBuffer = new ArrayList<FloatBuffer>();
/** Mapping texture coordinates for the vertices. */
private final List<float[]> mTexture = new ArrayList<float[]>();
/** The texture pointer. */
private final int[] mTextures = new int[1];
/** Total number of strips for the given depth. */
private final int mTotalNumStrips;
public Sphere(final int depth, final float radius) {
// Calculate basic values for the sphere.
this.mTotalNumStrips = Maths.power(2, depth - 1) * 5; //last 5 is related to properties of a icosahedron
final int numVerticesPerStrip = Maths.power(2, depth) * 3;
final double altitudeStepAngle = Maths.rad120 / Maths.power(2, depth);
final double azimuthStepAngle = Maths.rad360 / this.mTotalNumStrips;
double x, y, z, h, altitude, azimuth;
Log.e("mTotalNumStrips", ""+mTotalNumStrips);
Log.e("numVerticesPerStrip", ""+numVerticesPerStrip);
for (int stripNum = 0; stripNum < this.mTotalNumStrips; stripNum++) {
// Setup arrays to hold the points for this strip.
final float[] vertices = new float[numVerticesPerStrip * 3]; // x,y,z
final float[] texturePoints = new float[numVerticesPerStrip * 2]; // 2d texture
int vertexPos = 0;
int texturePos = 0;
// Calculate position of the first vertex in this strip.
altitude = Maths.rad90;
azimuth = stripNum * azimuthStepAngle;
// Draw the rest of this strip.
for (int vertexNum = 0; vertexNum < numVerticesPerStrip; vertexNum += 2) {
// First point - Vertex.
y = radius * Math.sin(altitude);
h = radius * Math.cos(altitude);
z = h * Math.sin(azimuth);
x = h * Math.cos(azimuth);
vertices[vertexPos++] = (float) x;
vertices[vertexPos++] = (float) y;
vertices[vertexPos++] = (float) z;
// First point - Texture.
texturePoints[texturePos++] = (float) (1 + azimuth / Maths.rad360);
texturePoints[texturePos++] = (float) (1 - (altitude + Maths.rad90) / Maths.rad180);
// Second point - Vertex.
altitude -= altitudeStepAngle;
azimuth -= azimuthStepAngle / 2.0;
y = radius * Math.sin(altitude);
h = radius * Math.cos(altitude);
z = h * Math.sin(azimuth);
x = h * Math.cos(azimuth);
vertices[vertexPos++] = (float) x;
vertices[vertexPos++] = (float) y;
vertices[vertexPos++] = (float) z;
// Second point - Texture.
texturePoints[texturePos++] = (float) (1 + azimuth / Maths.rad360);
texturePoints[texturePos++] = (float) (1 - (altitude + Maths.rad90) / Maths.rad180);
azimuth += azimuthStepAngle;
}
this.mVertices.add(vertices);
this.mTexture.add(texturePoints);
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(numVerticesPerStrip * 3 * Float.SIZE);
byteBuffer.order(ByteOrder.nativeOrder());
FloatBuffer fb = byteBuffer.asFloatBuffer();
fb.put(this.mVertices.get(stripNum));
fb.position(0);
this.mVertexBuffer.add(fb);
// Setup texture.
byteBuffer = ByteBuffer.allocateDirect(numVerticesPerStrip * 2 * Float.SIZE);
byteBuffer.order(ByteOrder.nativeOrder());
fb = byteBuffer.asFloatBuffer();
fb.put(this.mTexture.get(stripNum));
fb.position(0);
this.mTextureBuffer.add(fb);
}
}
public void loadGLTexture(final GL10 gl, final Context context, final int texture) {
Bitmap bitmap = BitmapDecoder.from(context.getResources(), texture)
.scale(4048, 2024)
.decode();
// Generate one texture pointer, and bind it to the texture array.
gl.glGenTextures(1, this.mTextures, 0);
gl.glBindTexture(GL10.GL_TEXTURE_2D, this.mTextures[0]);
// Create nearest filtered texture.
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_NEAREST);
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR);
// Use Android GLUtils to specify a two-dimensional texture image from our bitmap.
GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, bitmap, 0);
// Tide up.
bitmap.recycle();
}
/**
* The draw method for the square with the GL context.
*
* #param gl Graphics handle.
*/
public void draw(final GL10 gl) {
// bind the previously generated texture.
gl.glBindTexture(GL10.GL_TEXTURE_2D, this.mTextures[0]);
// Point to our buffers.
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
// Set the face rotation, clockwise in this case.
gl.glFrontFace(GL10.GL_CW);
// Point to our vertex buffer.
for (int i = 0; i < this.mTotalNumStrips; i++) {
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, this.mVertexBuffer.get(i));
gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, this.mTextureBuffer.get(i));
// Draw the vertices as triangle strip.
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, this.mVertices.get(i).length / 3);
}
// Disable the client state before leaving.
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
}
}
And this is my renderer...
#Override
public void onDrawFrame(final GL10 gl) {
zvector = new float[] {0,0,1,0};
resultvector = new float[] {0,0,1,0};
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glLoadIdentity();
float radiansX = (float) Math.toRadians(gyro_angle[1]);
float radiansY = (float) Math.toRadians(-gyro_angle[0]);
float radiansZ = (float) Math.toRadians(-gyro_angle[2]);
// Finds the Sin and Cosin for the half angle.
float sinX =(float) Math.sin(radiansX * 0.5);
float cosX =(float) Math.cos(radiansX * 0.5);
float sinY =(float) Math.sin(radiansY * 0.5);
float cosY =(float) Math.cos(radiansY * 0.5);
float sinZ =(float) Math.sin(radiansZ * 0.5);
float cosZ =(float) Math.cos(radiansZ * 0.5);
// Formula to construct a new Quaternion based on direction and angle.
quatX[0] = cosX;
quatX[1] = 1 * sinX;
quatX[2] = 0 * sinX;
quatX[3] = 0 * sinX;
quatY[0] = cosY;
quatY[1] = 0 * sinY;
quatY[2] = 1 * sinY;
quatY[3] = 0 * sinY;
quatZ[0] = cosZ;
quatZ[1] = 0 * sinZ;
quatZ[2] = 0 * sinZ;
quatZ[3] = 1 * sinZ;
quat1 = multiplyQuat(quatX, quatY);
quat2 = multiplyQuat(quat1, quatZ);
mMatrix = getMatrixfromQuat(quat1);
gl.glLoadMatrixf(mMatrix, 0);
this.mSphere.draw(gl);
}
#Override
public void onSurfaceChanged(final GL10 gl, final int width, final int height) {
final float aspectRatio = (float) width / (float) (height == 0 ? 1 : height);
gl.glViewport(0, 0, width, height);
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
GLU.gluPerspective(gl, 45.0f, aspectRatio, 0.1f, 100.0f);
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
}
#Override
public void onSurfaceCreated(final GL10 gl, final EGLConfig config) {
this.mSphere.loadGLTexture(gl, this.mContext, R.drawable.pic360);
gl.glEnable(GL10.GL_TEXTURE_2D);
gl.glShadeModel(GL10.GL_SMOOTH);
gl.glClearColor(0.0f, 0.0f, 0.0f, 0.5f);
gl.glClearDepthf(1.0f);
gl.glEnable(GL10.GL_DEPTH_TEST);
gl.glDepthFunc(GL10.GL_LEQUAL);
gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);
}
//CONSTRUCTER
public GlRenderer(final Context context) {
this.mContext = context;
this.mSphere = new Sphere(5, 2);
sensorManager = (SensorManager) this.mContext.getSystemService(this.mContext.SENSOR_SERVICE);
sensorGyroscope = sensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE);
sensorAccelerometer = sensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER);
sensorMagneticField = sensorManager.getDefaultSensor(Sensor.TYPE_MAGNETIC_FIELD);
valuesAccelerometer = new float[3];
valuesMagneticField = new float[3];
matrixR = new float[9];
matrixI = new float[9];
matrixValues = new float[3];
sensorManager.registerListener(this, sensorGyroscope, SensorManager.SENSOR_DELAY_FASTEST);
sensorManager.registerListener(this, sensorAccelerometer, SensorManager.SENSOR_DELAY_FASTEST);
sensorManager.registerListener(this, sensorMagneticField, SensorManager.SENSOR_DELAY_FASTEST);
}
//HERE GOES SOME CURRENTLY IRRELEVANT STUFF ABOUT THE SENSORS AND QUATERNIONS
I had some this type of video texturing problem. First time I used ffmpeg for video decoding but the performance was so poor (just like you- Extracting frame by frame). For improving performance I used android default mediaplayer. You can use surface texture to create an opengl surface (sphere, cylinder, cube etc...) and then set the surface in the media player
Surface surface = new Surface(mSurface);//mSurface is your surface texture
mMediaPlayer.setSurface(surface);
mMediaPlayer.setScreenOnWhilePlaying(true);
This is just a technique. I did this for some commercial enclosed project, so I cant share the code. I hope I'll published a free code in github soon.
Problem: I have limited the maximum and minimum zoom, everything is OK, but:
If I try to zoom less than the minimum, then to zoom+, it is necessary to do the same amount of motion in the opposite direction. The same is with zoom+. And on the technical side, it looks like this:
values[Matrix.MSCALE_X] changes, despite the fact that before this is:
m.reset();
m.setScale(sx1, sx1);
where sx1 is a FIXED value. Here is my code:
public boolean onScale(ScaleGestureDetector detector) {
try{
saved_gest_scale = newScale;
newScale *= detector.getScaleFactor();
scaleFactor = newScale / oldScale;
target_img_width *= 1/scaleFactor;
if (target_img_width > width)//limit max zoom
{
//fit on screen:
m = new Matrix();
m.reset();
float w_coef = img_width / width;
float sx1 = 1/w_coef;
m.setScale(sx1, sx1);
///========
//apply new size
float sx2 = target_img_width/width;
sx2 = 1/sx2;
m.setScale(sx2, sx2);
float[] values = new float[9];
m.getValues(values);
//center image:
float globalX = values[Matrix.MTRANS_X];
float globalY = values[Matrix.MTRANS_Y];
float wid = values[Matrix.MSCALE_X]*img_width;
float heig = values[Matrix.MSCALE_Y]*img_height;
m.postTranslate(width/2 - wid/2, height/2-heig/2);
if (wid <= width)//limit min-zoom
{
newScale = sx1;
m = new Matrix();
m.reset();
m.setScale(sx1, sx1);
wid = sx1*img_width;
heig = sx1*img_height;
debug.setText(wid + "<width" + "mx=" + values[Matrix.MSCALE_X] );
m.postTranslate(width/2 - wid/2, height/2-heig/2);
}
iw.setImageMatrix(m);
}
oldScale = newScale;
}catch (Exception xx)
{
debug.setText("detector "+ xx.toString());
}
return true;
}
Please help.
Use one parameter scale wich is changed in the onScale method. Also use two constants minScale and maxScale. In the onScale method check if new scale fits in the acceptable interval, and if it does - redraw the image with new scale.
Right now your code is in mess. Somehow limit min-zoom check is located inside limit max zoom, but scaling methods are spread all around. Thus it's very unclean when a new scale becomes current and why it is working wrong.
How to rotate the camera when using class VideoCapture on OpenCV? (Sample Face Detection on Android).
I'm rotating the canvas with:
if (getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT)
{
Matrix matrix = new Matrix();
matrix.preTranslate(
(canvas.getWidth() - bmp.getWidth()) / 2,
(canvas.getHeight() - bmp.getHeight()) / 2);
matrix.postRotate(270f, (canvas.getWidth()) / 2,
(canvas.getHeight()) / 2);
canvas.drawBitmap(bmp, matrix, null);
}
but image from Camera doesn't rotate: Face Detect dont work.
The camera receives the stream from the following:
protected Bitmap processFrame(VideoCapture capture) {
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
capture.retrieve(mGray,
Highgui.CV_CAP_ANDROID_GREY_FRAME);
UPDATE
I did the following:
#Override
protected Bitmap processFrame(VideoCapture capture) {
if (getResources().getConfiguration().orientation == Configuration.ORIENTATION_PORTRAIT) {
Core.flip(mRgba.t(), mRgba, 0);
}
else {
}
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
capture.retrieve(mDetect_thread.mGray,
Highgui.CV_CAP_ANDROID_GREY_FRAME);
But is dont work. When I run the program in portret orientation(on android device)- program don't start When i run the rogram in landscape orientation - programm work, but when i rotation the device, program work, but image on display dont rotation
Your question is mostly a duplicate of this, except that you are looking for the Android version. It is quite similar but here it is, 90º rotation can be obtained by transposing and then flipping the image:
# rotate 90º counter-clockwise
Core.flip(mRgba.t(), mRgba, 0); //mRgba.t() is the transpose
# rotate 90º clockwise
Core.flip(mRgba.t(), mRgba, 1);
For other rotations you can use warpAffine:
Point center = new Point(x,y);
double angle = 90;
double scale = 1.0;
Mat mapMatrix = Imgproc.getRotationMatrix2D(center, angle, scale);
Imgproc.warpAffine(srcMat, dstMat, mapMatrix, Imgproc.INTER_LINEAR);
EDIT - more complete examples follow:
/**
* Image is first resized-to-fit the dst Mat and then rotated.
* mRgba is the source image, mIntermediateMat should have the same type.
*/
private void rotationTutorial(){
double ratio = mRgba.height() / (double) mRgba.width();
int rotatedHeight = mRgba.height();
int rotatedWidth = (int) Math.round(mRgba.height() * ratio);
Imgproc.resize(mRgba, mIntermediateMat, new Size(rotatedHeight, rotatedWidth));
Core.flip(mIntermediateMat.t(), mIntermediateMat, 0);
Mat ROI = mRgba.submat(0, mIntermediateMat.rows(), 0, mIntermediateMat.cols());
mIntermediateMat.copyTo(ROI);
}
/**
* Image is rotated - cropped-to-fit dst Mat.
*
*/
private void rotationAffineTutorial(){
// assuming source image's with and height are a pair value:
int centerX = Math.round(mRgba.width()/2);
int centerY = Math.round(mRgba.height()/2);
Point center = new Point(centerY,centerX);
double angle = 90;
double scale = 1.0;
double ratio = mRgba.height() / (double) mRgba.width();
int rotatedHeight = (int) Math.round(mRgba.height());
int rotatedWidth = (int) Math.round(mRgba.height() * ratio);
Mat mapMatrix = Imgproc.getRotationMatrix2D(center, angle, scale);
Size rotatedSize = new Size(rotatedWidth, rotatedHeight);
mIntermediateMat = new Mat(rotatedSize, mRgba.type());
Imgproc.warpAffine(mRgba, mIntermediateMat, mapMatrix, mIntermediateMat.size(), Imgproc.INTER_LINEAR);
Mat ROI = mRgba.submat(0, mIntermediateMat.rows(), 0, mIntermediateMat.cols());
mIntermediateMat.copyTo(ROI);
}
Note:
These examples might be orientation-specific, I made them for landscape orientation.
You should not call the code from these examples for every video frame. Some of the code should only run once.
If you only need to do 90, 180, or 270 degrees rotation (Which seems to be your case) you better use Core.flip() which is faster.
Here below a method that does it for you:
public static Mat rotate(Mat src, double angle)
{
Mat dst = new Mat();
if(angle == 180 || angle == -180) {
Core.flip(src, dst, -1);
} else if(angle == 90 || angle == -270) {
Core.flip(src.t(), dst, 1);
} else if(angle == 270 || angle == -90) {
Core.flip(src.t(), dst, 0);
}
return dst;
}