I want to scale an image using openGL, can anyone provide me with such code about how to do this ?
PS, I am using JGL as an openGL library for Java.
I will be brief on this, as you can find tutorials for pretty much every part of the solution.
You need to load your image from the disk, and create a texture.
You need to create a framebuffer object (FBO) with the desired target dimensions (in your case, double width, double height). Make the FBO active for rendering.
Render a fullscreen quad with your texture applied.
Read the result using glReadPixels().
And that's it ... the texturemapping hardware will take care of rescaling it for you. But it will likely be slower than if done on CPU, especially for "scaling 2x".
EDIT: as requested by OP, it is necessary to include source code.
So ... for loading an image in Java, you would go for this:
BufferedImage img;
try {
img = ImageIO.read(new File("myLargeImage.jpg"));
} catch (IOException e) { /* ... */ }
int w = img.getWidth(), h = img.getHeight();
For JGL, one might want to convert an image to an array:
byte [][][] imageData = new byte[img.getWidth()][img.getHeight()][3]; // alloc buffer for RGB data
for(int y = 0; y < h; ++ y) {
for(int x = 0; x < w; ++ x) {
int RGBA = img.getRGB(x, y);
imageData[x][y][0] = RGBA & 0xff;
imageData[x][y][1] = (RGBA >> 8) & 0xff;
imageData[x][y][2] = (RGBA >> 16) & 0xff;
}
}
Note that there could be alpha channel as well (for transparency) and that this will likely be quite slow. One could also use:
int[] rgbaData = img.GetRGB(0, 0, w, h, new int[w * h], 0, w);
But that doesn't return the data in the correct format expected by JGL. Tough luck.
Then you need to fill a texture:
int[] texId = {0};
gl.glGenTextures(1, texId); // generate texture id for your texture (can skip this line)
gl.glEnable(GL.GL_TEXTURE_2D);
gl.glBindTexture(GL.GL_TEXTURE_2D, texId[0]); // bind the texture
gl.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1); // set alignment of data in memory (a good thing to do before glTexImage)
gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP);
gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP); // set clamp (GL_CLAMP_TO_EDGE would be better)
gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR);
gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR); // set linear filtering (so you can scale your image)
gl.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB, w, h, 0, GL.GL_RGB, GL.GL_UNSIGNED_BYTE, imageData); // upload image data to the texture
Once you have a texture, you can draw stuff. Let's resample your image:
int newW = ..., newH = ...; // fill-in your values
gl.glViewport(0, 0, newW, newH); // set viewport
gl.glMatrixMode(GL.GL_MODELVIEW);
gl.glLoadIdentity();
gl.glMatrixMode(GL.GL_PROJECTION);
gl.glLoadIdentity();
gl.glColor3f(1.0f, 1.0f, 1.0f); // set "white" color
gl.glDisable(GL.GL_CULL_FACE); // disable backface culling
gl.glDisable(GL.GL_LIGHTING); // disable lighting
gl.glDisable(GL.GL_DEPTH_TEST); // disable depth test
// setup OpenGL so that it renders texture colors without modification (some of that is not required by default)
gl.glBegin(GL_QUADS);
gl.glTexCoord2f(0.0f, 1.0f);
gl.glVertex2f(-1.0f, -1.0f);
gl.glTexCoord2f(1.0f, 1.0f);
gl.glVertex2f(+1.0f, -1.0f);
gl.glTexCoord2f(1.0f, 0.0f);
gl.glVertex2f(+1.0f, +1.0f);
gl.glTexCoord2f(0.0f, 0.0f);
gl.glVertex2f(-1.0f, +1.0f);
gl.glEnd();
// draw a fullscreen quad with your texture on it (scaling is performed here)
Now that the scaled image is rendered, all that needs to be done, is to download it.
byte[][][] newImageData = new byte[newW][newH][3];
gl.glPixelStorei(GL.GL_PACK_ALIGNMENT, 1); // set alignment of data in memory (this time pack alignment; a good thing to do before glReadPixels)
gl.glReadPixels(0, 0, newW, newH, GL.GL_RGB, GL.GL_UNSIGNED_BYTE, newImageData);
And then the data can be converted to BufferedImage in similar way the input image img was converted to imageData.
Note that I used variable gl, that is an instance of the GL class. Put the code into some JGL example and it should work.
One word of caution, JGL doesn't seem to support framebuffer objects, so that you are limited by output image size by your OpenGL window size (attempting to create larger images will result in black borders). It can be solved by using multipass rendering (render your image tile by tile and assemble the full image in memory).
Related
I want to display a text within a OpenGL Android Application. I have tried the code below, but, nothing is displayed. Some other attempts have been tried but none was displayed either. Could anyone help me with some advice?
String aText = "Prueba";
float aFontSize = 100;
int[] textureId = new int[1];
Paint textPaint = new Paint();
textPaint.setTextSize(aFontSize);
textPaint.setFakeBoldText(false);
textPaint.setAntiAlias(true);
textPaint.setARGB(255, 255, 255, 255);
// If a hinting is available on the platform you are developing, you should enable it (uncomment the line below).
//textPaint.setHinting(Paint.HINTING_ON);
textPaint.setSubpixelText(true);
textPaint.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.SCREEN));
float realTextWidth = textPaint.measureText(aText);
// Creates a new mutable bitmap, with 128px of width and height
int bitmapWidth = (int)(realTextWidth + 2.0f);
int bitmapHeight = (int)aFontSize + 2;
Bitmap textBitmap = Bitmap.createBitmap(bitmapWidth, bitmapHeight, Bitmap.Config.ARGB_8888);
textBitmap.eraseColor(Color.argb(0, 255, 255, 255));
// Creates a new canvas that will draw into a bitmap instead of rendering into the screen
Canvas bitmapCanvas = new Canvas(textBitmap);
// Set start drawing position to [1, base_line_position]
// The base_line_position may vary from one font to another but it usually is equal to 75% of font size (height).
bitmapCanvas.drawText(aText, 1, 1.0f + aFontSize * 0.75f, textPaint);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, textureId[0]);
// Assigns the OpenGL texture with the Bitmap
GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, GLES20.GL_RGBA, textBitmap, 0);
// Free memory resources associated with this texture
textBitmap.recycle();
// After the image has been subloaded to texture, regenerate mipmaps
GLES20.glGenerateMipmap(GLES20.GL_TEXTURE_2D);
I'm writing an application that needs to work with 16-bit "5-5-5" RGB colors (that is, 5 bits for each color and one bit of padding). In order to handle these images, I am using the BufferedImage class provided by AWT. The BufferedImage class specifically allows for the usage of non-RGB color spaces by taking either a ColorModel object or a predefined image type constant - one of which is the 5-5-5 pixel format that I need.
My problem is this: the BufferedImage "setRGB()" method states in its description that color values provided are "assumed to be in the default RGB color model, TYPE_INT_ARGB, and default sRGB color space" (per the BufferedImage documentation page). No other method seems to accept values designed for different color spaces, either.
Is there a way to use my non-standard color space directly with BufferedImage, or would I have to rely on the class's internal color conversion mechanisms to handle all of my colors? (Or am I just misreading/misunderstanding something about how the class works?)
BufferedImage.TYPE_USHORT_555_RGB still uses a completely standard RGB color space (in fact, it uses sRGB), so I don't think a different color space is what you are looking for.
If you want to perform painting or other operations in Java, just use the normal methods like setRGB/getRGB() and createGraphics()/Grapics2D. Everything will be properly converted to and from the packed USHORT_555_RGB format for you.
For example:
BufferedImage image = new BufferedImage(w, h, BufferedImage.TYPE_USHORT_555_RGB);
// Do some custom painting
Graphics2D g = image.createGraphics();
g.drawImage(otherImage, 0, 0, null); // image type here does not matter
g.setColor(Color.ORANGE); // Color in sRGB, but does not matter
g.fillOval(0, 0, w, h);
g.dispose();
image.setRGB(0, h/2, w, 1, new int[w]); // Silly way to create a horizontal black line at the center of the image... Don't do this, use fillRect(0, h/2, 1, w)! ;-)
// image will still be USHORT_555_RGB *internally*
However, if you have pixel data in the USHORT_555_RGB format (ie. from an external library/api/service), it may be faster and more accurate to set these values directly to the raster/databuffer. Or if you need to pass the pixel values back to the same library/api/service.
For example, using the Raster:
BufferedImage image = new BufferedImage(w, h, BufferedImage.TYPE_USHORT_555_RGB);
// Some fictional API. It's assumed that data.length == w * h
short[] apiPixels = api.getPixelsUSHORT_555_RGB(w, h);
WritableRaster raster = image.getRaster();
// Set short values to image
raster.setDataElements(0, 0, w, h, apiPixels);
// Get short values from image
short[] pixels = (short[]) raster.getDataElements(0, 0, w, h, null); // TYPE_USHORT_555_RGB -> always short[]
api.setPixels(pixels, w, h); // Another fictional API
Or, alternatively, use the DataBuffer:
BufferedImage image = new BufferedImage(w, h, BufferedImage.TYPE_USHORT_555_RGB);
// Some fictional API. It's assumed that data.length == w * h
short[] apiPixels = api.getPixelsUSHORT_555_RGB(w, h);
DataBufferUShort buffer = (DataBufferUShort) image.getRaster().getDataBuffer(); // TYPE_USHORT_555_RGB -> always DataBufferUShort
// Set short values to image
System.arraycopy(apiPixels, 0, buffer.getData(), 0, apiPixels.length);
// Get short values from image
api.setPixels(buffer.getData(), w, h);
In most cases it does not matter which method you use, but the first approach (using Raster only) may keep the image managed, which will make images display faster on screen from your Java process.
PS: If a different color space is really what you need (ie. the pixel array from the external library/api/service uses a different color space, and you need to view the pixels in this color space), you can create a BufferedImage in USHORT_555_RGB style with a custom color space like this:
// Either use one of the built-in color spaces, or load one from disk
ColorSpace colorSpace = ColorSpace.getInstance(ColorSpace.CS_LINEAR_RGB);
ColorSpace colorSpaceToo = new ICC_ColorSpace(ICC_Profile.getInstance(Files.newInputStream(new File("/path/to/custom_rgb_profile.icc").toPath())));
// Create a color model using your color space, TYPE_USHORT and 5/5/5 mask, no transparency
ColorModel colorModel = new DirectColorModel(colorSpace, 15, 0x7C00, 0x03E0, 0x001F, 0, false, DataBuffer.TYPE_USHORT);
// And finally, create an image from the color model and a compatible raster
BufferedImage imageToo = new BufferedImage(colorModel, colorModel.createCompatibleWritableRaster(w, h), colorModel.isAlphaPremultiplied(), null);
Just remember that as the Java2D graphics operations and setRGB/getRGB are still using sRGB, now all operations on your image will be converted back and forth between your color space and sRGB. Performance will not be as good.
I have a Java OpenGL (JOGL) app, and I'm trying to create a texture mapped quad that covers the entire screen. In draw some pixels to a buffer and then I want to read those pixels into a texture and redraw them on screen (with a fragment shader applied). My code for mapping the texture to the viewport is:
gl.glMatrixMode(GL.GL_PROJECTION);
gl.glPushMatrix();
gl.glLoadIdentity();
gl.glOrtho( 0, width, height, 0, -1, 1 );
gl.glMatrixMode(GL.GL_MODELVIEW);
gl.glPushMatrix();
gl.glLoadIdentity();
IntBuffer ib = IntBuffer.allocate(1);
gl.glEnable(GL.GL_TEXTURE_2D);
gl.glGenTextures(1, ib);
gl.glPixelStorei(GL.GL_PACK_ALIGNMENT, 1);
//buff contains pixels read from glReadPixels
gl.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, width, height, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, buff);
gl.glBindTexture(GL.GL_TEXTURE_2D, ib.get(0));
gl.glBegin(GL.GL_QUADS);
gl.glTexCoord2f(0,1);
gl.glVertex2f(0,0);
gl.glTexCoord2f(0,0);
gl.glVertex2f(0,height);
gl.glTexCoord2f(1,0);
gl.glVertex2f(width,height);
gl.glTexCoord2f(1,1);
gl.glVertex2f(width,0);
gl.glEnd();
gl.glBindTexture(GL.GL_TEXTURE_2D, 0);
gl.glPopMatrix();
gl.glPopMatrix();
The end result is a quad that is not covering the whole viewport (it's partially on) and that does not contain the pixels from the buffer. What am I doing incorrectly here?
thanks,
Jeff
First, you should only create the texture in your initialization code. You should not be calling glTexImage2D every frame. Only call glTexImage2D again if the size of the texture changes; glTexSubImage2D can be used to upload data to the texture. Think of glTexImage2D as "new", while glTexSubImage2D as a memory copy.
Do this once, after initializing OpenGL.
IntBuffer ib = IntBuffer.allocate(1); //Store this in your object
gl.glGenTextures(1, ib);
gl.glPixelStorei(GL.GL_PACK_ALIGNMENT, 1);
//buff contains pixels read from glReadPixels
gl.glBindTexture(GL.GL_TEXTURE_2D, ib.get(0));
gl.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, width, height, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, 0);
gl.glBindTexture(GL.GL_TEXTURE_2D, 0);
Then, each frame, do this:
gl.glMatrixMode(GL.GL_PROJECTION);
gl.glPushMatrix();
gl.glLoadIdentity();
gl.glMatrixMode(GL.GL_MODELVIEW);
gl.glPushMatrix();
gl.glLoadIdentity();
gl.glBindTexture(GL.GL_TEXTURE_2D, ib.get(0)); //Retrieved from your object
gl.glEnable(GL.GL_TEXTURE_2D);
gl.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, width, height, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, buff);
gl.glBegin(GL.GL_QUADS);
gl.glTexCoord2f(0,1);
gl.glVertex2f(-1, -1);
gl.glTexCoord2f(0, 0);
gl.glVertex2f(-1, 1);
gl.glTexCoord2f(1, 0);
gl.glVertex2f(1, 1);
gl.glTexCoord2f(1, 1);
gl.glVertex2f(1, -1);
gl.glEnd();
gl.glMatrixMode(GL.GL_MODELVIEW);
gl.glPopMatrix();
gl.glMatrixMode(GL.GL_PROJECTION);
gl.glPopMatrix();
gl.glMatrixMode(GL.GL_MODELVIEW);
By using identity for projection and modelview, we are able to supply vertex coordinates directly in clip-space. The [-1, 1] range in clip-space maps to [0, width/height] in window space. So we don't have to know or care about how big the window is; as long as the glViewport was set up correctly, this should work.
It may not be the problem, but it won't be helping: You are popping the modelview matrix twice for a single push. You are not popping the projection matrix at all.
I would recommend setting the projection matrix once at startup, without doing any pushes or pops. You don't really need to push and pop the modelview matrix either. (You could do your texture setup once at startup, too.)
I would start with checking glError with code like the below. Note I used the GL2 object because there were some issues with older versions of JOGL and the GL object, silly things like GL_QUADS not being there.
If you have a shader enabled with the above code, you need to do the texturing by reading the sampler. If so, please attach the shader code you are using with this rendering code.
private static void checkForGLErrors(GL2 gl) {
int errno = gl.glGetError();
switch (errno) {
case GL2.GL_INVALID_ENUM:
System.err.println("OpenGL Error: Invalid ENUM");
break;
case GL2.GL_INVALID_VALUE:
System.err.println("OpenGL Error: Invalid Value");
break;
case GL2.GL_INVALID_OPERATION:
System.err.println("OpenGL Error: Invalid Operation");
break;
case GL2.GL_STACK_OVERFLOW:
System.err.println("OpenGL Error: Stack Overflow");
break;
case GL2.GL_STACK_UNDERFLOW:
System.err.println("OpenGL Error: Stack Underflow");
break;
case GL2.GL_OUT_OF_MEMORY:
System.err.println("OpenGL Error: Out of Memory");
break;
default:
return;
}
}
I would also try to avoid generating the texture every frame if it is something that doesn't change. You can save the textureId and bind it later.
Update: Turns out this is a driver issue with the powerVR SGX in my Nexus S. Code works fluidly on all other devices I've tested it on.
I'll be making a smaller test case and submitting a bug report... to someone. Don't know who.
Hey guys,
First off, I'm working on a port of AndAR (ARToolkit for Android) to GLES 2.0 using the Java GLES2.0 bindings. The entirety of my code can be found here if you're curious, but I'll try to sum up the problem in this question. AndARShaders
I'm attempting to implement this paper to generate AR renderings which reflect and refract believably: Virtual Reflections in Augmented Reality Environments. In order to do this, the screen space bounding box of the object to be rendered is determined, then used to generate texture coordinates for planes representing each face of the cubemap. This means rendering a cubemap for each frame for each model. I'm only rendering one model at a time right now. I'm trying to use framebuffer objects to render the cubemap based on the method described in the paper.
Anyway, to the problem.
I have it mostly implemented. As far as I can tell, at least the front face of the cubemap has vertices and UV coordinates generated correctly. I can render my front face to the system provided frame buffer for the screen and it renders just fine without problems for as long as I'd like. The problem comes in with rendering it to a framebuffer object.
When I render my cubemap faces to a framebuffer object linked to a cubemap texture, GL eats ALL of my memory within a few seconds and crashes with a GLError 1285 (OUT OF MEMORY). If I don't bind the FBO, I can render the cubemap faces to the screen without any memory issues. Cube map texture size is 128 PX square, which should be reasonable for a mobile device. Somehow, GL is leaking memory
Here's the rough order I'm doing things. This is the entry to the render for this frame. (src/edu/dhbw/andar/ARGLES20Renderer.java ~Line 179)
// BEGIN TO DRAW FRAME. DRAW BACKGROUND CAMERA IMAGE TO QUAD
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
GLES20.glUseProgram(mProgram);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0); // Camera image is stored in Texture0
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, textureName);
//load new preview frame as a texture, if needed
GLES20.glTexSubImage2D(GLES20.GL_TEXTURE_2D, 0, 0, 0, previewFrameWidth, previewFrameHeight, mode, GLES20.GL_UNSIGNED_BYTE, frameData);
//draw camera preview frame:
squareBuffer.position(0);
GLES20.glVertexAttribPointer(maPositionHandle, 3, GLES20.GL_FLOAT, false, GraphicsUtil.TRIANGLE_VERTICES_DATA_STRIDE_BYTES, squareBuffer);
GLES20.glEnableVertexAttribArray(maPositionHandle);
textureBuffer.position(0);
GLES20.glVertexAttribPointer(maTextureHandle, 2, GLES20.GL_FLOAT, false, GraphicsUtil.TRIANGLE_VERTICES_UV_STRIDE_BYTES, textureBuffer);
GLES20.glEnableVertexAttribArray(maTextureHandle);
Matrix.multiplyMM(mMVPMatrix, 0, mProjMatrix, 0, mVMatrix, 0);
GLES20.glUniformMatrix4fv(muMVPMatrixHandle, 1, false, mMVPMatrix, 0);
GLES20.glUniform1i(mSamplerLoc, 0);
//draw camera square
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
GLES20.glDisableVertexAttribArray(maPositionHandle);
GLES20.glDisableVertexAttribArray(maTextureHandle);
DRAW_OBJECTS();
And the code to draw the objects goes somewhat like this:
( src/edu/dhbw/andar/ARGLES20Object.java ~ Line 36 )
( src/edu/dhbw/andar/pub/CustomGLES20Object.java ~Line 55 )
// Use the new program for the object (Refract/reflect shader)
GLES20.glUseProgram( mProgram );
// Transform to where the marker is
Matrix.multiplyMM(mMVPMatrix, 0, glCameraMatrix, 0, glMatrix, 0);
GLES20.glUniformMatrix4fv(muMVPMatrixHandle, 1, false, mMVPMatrix, 0);
// Create a cubemap for this object from vertices
GENERATE_CUBEMAP( box.vertArray() );
// Feed in Verts
box.verts().position(0);
box.normals().position(0);
GLES20.glVertexAttribPointer(maPositionHandle, 3, GLES20.GL_FLOAT, false, VERTEX_NORMAL_DATA_STRIDE, box.verts());
GLES20.glEnableVertexAttribArray(maPositionHandle);
GLES20.glVertexAttribPointer(maNormalHandle, 3, GLES20.GL_FLOAT, false, VERTEX_NORMAL_DATA_STRIDE, box.normals());
GLES20.glEnableVertexAttribArray(maNormalHandle);
// Set Uniforms...
GLES20.glUniform4f(muColor, 0.0f, 1.0f, 0.0f, 1.0f);
...
// Draw the cube faces
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
...
GLES20.glDisableVertexAttribArray(maPositionHandle);
GLES20.glDisableVertexAttribArray(maNormalHandle);
Note the GENERATE_CUBEMAP( Vertices ) Toward the beginning of rendering the object. Here's what that does. Screen space bounding box [ssbb] has been calculated from vertices.
(src/edu/dhbw/andar/ARGLES20Renderer.java ~Line 280)
// Grab the current viewport and program for restoration later
int[] OldViewport = new int[4], OldProgram = new int[1];
GLES20.glGetIntegerv(GLES20.GL_VIEWPORT, OldViewport, 0);
GLES20.glGetIntegerv(GLES20.GL_CURRENT_PROGRAM, OldProgram, 0);
// Update dynamic cubemap based on screen space bounding box for this frame
mDC.UpdateUVs( DynamicCubemap.CorrectSSBB( ssbb ), widthcorrection, heightcorrection );
// Set up the program used to render to the texture
GLES20.glUseProgram(mProgram);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, textureName);
float[] projmatrix = new float[16]; // Projection Matrix
Matrix.orthoM(projmatrix, 0, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f);
Matrix.multiplyMM(mMVPMatrix, 0, projmatrix, 0, mVMatrix, 0);
GLES20.glUniformMatrix4fv(muMVPMatrixHandle, 1, false, mMVPMatrix, 0);
GLES20.glUniform1i(mSamplerLoc, 0); // Use the camera texture (bound in unit zero)
// Render to the front face of the cubemap
// Note: If I don't bind the new Framebuffer, this
// renders the face to the screen very nicely without memory issues
GLES20.glActiveTexture(GLES20.GL_TEXTURE1);
GLES20.glBindTexture(GLES20.GL_TEXTURE_CUBE_MAP, 0); // Ensure we aren't rendering to the same texture we're using
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, mFrameBuffers[5]);
GLES20.glViewport( 0, 0, edu.dhbw.andar.Config.CUBEMAP_SIZE, edu.dhbw.andar.Config.CUBEMAP_SIZE);
GLES20.glClear( GLES20.GL_COLOR_BUFFER_BIT );
mDC.DrawFace( 5, maPositionHandle, maTextureHandle ); // Draw the front face with glDrawArrays
// Unbind the framebuffer, we no longer need to render to textures.
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
// Ensure the newly generated cubemap is bound to the correct texture unit
GLES20.glBindTexture(GLES20.GL_TEXTURE_CUBE_MAP, mCubeMapTexture);
// Bind the old program and viewport
GLES20.glUseProgram( OldProgram[0] );
GLES20.glViewport( OldViewport[0], OldViewport[1], OldViewport[2], OldViewport[3] );
And That's it... Here's how I initialize my FBOs and Cubemap Textures when the program starts.
(src/edu/dhbw/andar/ARGLES20Renderer.java ~Line 128)
// Generate Cubemap Textures
int[] cubemaptextures = new int[1];
GLES20.glGenTextures(1, cubemaptextures, 0 );
mCubeMapTexture = cubemaptextures[0];
GLES20.glActiveTexture(GLES20.GL_TEXTURE1);
GLES20.glBindTexture(GLES20.GL_TEXTURE_CUBE_MAP, mCubeMapTexture);
for( int i = 0; i < 6; i++ ) {
GLES20.glTexImage2D(GLES20.GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, mode, CUBEMAP_SIZE, CUBEMAP_SIZE, 0, mode, GLES20.GL_UNSIGNED_BYTE, ByteBuffer.wrap(frame));
}
GLES20.glTexParameterf(GLES20.GL_TEXTURE_CUBE_MAP, GLES20.GL_TEXTURE_WRAP_S, GLES20.GL_CLAMP_TO_EDGE);
GLES20.glTexParameterf(GLES20.GL_TEXTURE_CUBE_MAP, GLES20.GL_TEXTURE_WRAP_T, GLES20.GL_CLAMP_TO_EDGE);
GLES20.glTexParameteri(GLES20.GL_TEXTURE_CUBE_MAP, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_NEAREST);
GLES20.glTexParameteri(GLES20.GL_TEXTURE_CUBE_MAP, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_NEAREST);
GLES20.glBindTexture(GLES20.GL_TEXTURE_CUBE_MAP, 0);
// Create a set of FrameBuffers for the cubemap
mFrameBuffers = new int[6];
GLES20.glGenFramebuffers(6, mFrameBuffers, 0);
for( int i = 0; i < 6; i++ ) {
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, mFrameBuffers[i]);
GLES20.glFramebufferTexture2D( GLES20.GL_FRAMEBUFFER, GLES20.GL_COLOR_ATTACHMENT0,
GLES20.GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, mCubeMapTexture, 0 );
GLES20.glCheckFramebufferStatus( GLES20.GL_FRAMEBUFFER );
}
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
Perhaps my ordering is wrong, or my setup is incorrect?
Sorry for the LONG post. I really did everything in my power to make this as short as possible while still giving enough information to solve the problem. I cut out a lot of extra code which is application specific. If you're interested, or you think the problem might be caused elsewhere, I included links to the actual source files so you can take a quick peek.
Thanks for your time! I've wasted FAR too much time on this.
-Griff
Edit: clarified texture size
How can I have that functionality in my game through which the players can change their hairstyle, look, style of clothes, etc., and so whenever they wear a different item of clothing their avatar is updated with it.
Should I:
Have my designer create all possible combinations of armor, hairstyles, and faces as sprites (this could be a lot of work).
When the player chooses what they should look like during their introduction to the game, my code would automatically create this sprite, and all possible combinations of headgear/armor with that sprite. Then each time they select some different armor, the sprite for that armor/look combination is loaded.
Is it possible to have a character's sprite divided into components, like face, shirt, jeans, shoes, and have the pixel dimensions of each of these. Then whenever the player changes his helmet, for example, we use the pixel dimensions to put the helmet image in place of where its face image would normally be. (I'm using Java to build this game)
Is this not possible in 2D and I should use 3D for this?
Any other method?
Please advise.
One major factor to consider is animation. If a character has armour with shoulder pads, those shoulderpads may need to move with his torso. Likewise, if he's wearing boots, those have to follow the same cycles as hid bare feet would.
Essentially what you need for your designers is a Sprite Sheet that lets your artists see all possible frames of animation for your base character. You then have them create custom hairstyles, boots, armour, etc. based on those sheets. Yes, its a lot of work, but in most cases, the elements will require a minimal amount of redrawing; boots are about the only thing I could see really taking a lot of work to re-create since they change over multiple frames of animation. Be rutheless with your sprites, try to cut down the required number as much as possible.
After you've amassed a library of elements you can start cheating. Recycle the same hair style and adjust its colour either in Photoshop or directly in the game with sliders in your character creator.
The last step, to ensure good performance in-game, would be to flatten all the different elements' sprite sheets into a single sprite sheet that is then split up and stored in sprite buffers.
3D will not be necessary for this, but the painter algorithm that is common in the 3D world might IMHO save you some work:
The painter algorithm works by drawing the most distant objects first, then overdrawing with objects closer to the camera. In your case, it would boild down to generating the buffer for your sprite, drawing it onto the buffer, finding the next dependant sprite-part (i.e. armour or whatnot), drawing that, finding the next dependant sprite-part (i.e. a special sign that's on the armour), and so on. When there are no more dependant parts, you paint the full generated sprite on to the display the user sees.
The combinated parts should have an alpha channel (RGBA instead of RGB) so that you will only combine parts that have an alpha value set to a value of your choice. If you cannot do that for whatever reason, just stick with one RGB combination that you will treat as transparent.
Using 3D might make combining the parts easier for you, and you'd not even have to use an offscreen buffer or write the pixel combinating code. The flip-side is that you need to learn a little 3D if you don't know it already. :-)
Edit to answer comment:
The combination part would work somewhat like this (in C++, Java will be pretty similar - please note that I did not run the code below through a compiler):
//
// #param dependant_textures is a vector of textures where
// texture n+1 depends on texture n.
// #param combimed_tex is the output of all textures combined
void Sprite::combineTextures (vector<Texture> const& dependant_textures,
Texture& combined_tex) {
vector< Texture >::iterator iter = dependant_textures.begin();
combined_tex = *iter;
if (dependant_textures.size() > 1)
for (iter++; iter != dependant_textures.end(); iter++) {
Texture& current_tex = *iter;
// Go through each pixel, painting:
for (unsigned char pixel_index = 0;
pixel_index < current_tex.numPixels(); pixel_index++) {
// Assuming that Texture had a method to export the raw pixel data
// as an array of chars - to illustrate, check Alpha value:
int const BYTESPERPIXEL = 4; // RGBA
if (!current_tex.getRawData()[pixel_index * BYTESPERPIXEL + 3])
for (int copied_bytes = 0; copied_bytes < 3; copied_bytes++)
{
int index = pixel_index * BYTESPERPIXEL + copied_bytes;
combined_tex.getRawData()[index] =
current_tex.getRawData()[index];
}
}
}
}
To answer your question for a 3D solution, you would simply draw rectangles with their respective textures (that would have an alpha channel) over each other. You would set the system up to display in an orthogonal mode (for OpenGL: gluOrtho2D()).
I'd go with the procedural generation solution (#2). As long as there isn't a limiting amount of sprites to be generated, such that the generation takes too long. Maybe do the generation when each item is acquired, to lower the load.
Since I was asked in comments to supply a 3D way aswell, here is some, that is an excerpt of some code I wrote quite some time ago. It's OpenGL and C++.
Each sprite would be asked to draw itself. Using the Adapter pattern, I would combine sprites - i.e. there would be sprites that would hold two or more sprites that had a (0,0) relative position and one sprite with a real position having all those "sub-"sprites.
void Sprite::display (void) const
{
glBindTexture(GL_TEXTURE_2D, tex_id_);
Display::drawTranspRect(model_->getPosition().x + draw_dimensions_[0] / 2.0f,
model_->getPosition().y + draw_dimensions_[1] / 2.0f,
draw_dimensions_[0] / 2.0f, draw_dimensions_[1] / 2.0f);
}
void Display::drawTranspRect (float x, float y, float x_len, float y_len)
{
glPushMatrix();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(1.0, 1.0, 1.0, 1.0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f); glVertex3f(x - x_len, y - y_len, Z);
glTexCoord2f(1.0f, 0.0f); glVertex3f(x + x_len, y - y_len, Z);
glTexCoord2f(1.0f, 1.0f); glVertex3f(x + x_len, y + y_len, Z);
glTexCoord2f(0.0f, 1.0f); glVertex3f(x - x_len, y + y_len, Z);
glEnd();
glDisable(GL_BLEND);
glPopMatrix();
}
The tex_id_ is an integral value that identifies which texture is used to OpenGL. The relevant parts of the texture manager are these. The texture manager actually emulates an alpha channel by checking to see if the color read is pure white (RGB of (ff,ff,ff)) - the loadFile code operates on 24 bits per pixel BMP files:
TextureManager::texture_id
TextureManager::createNewTexture (Texture const& tex) {
texture_id id;
glGenTextures(1, &id);
glBindTexture(GL_TEXTURE_2D, id);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, 4, tex.width_, tex.height_, 0,
GL_BGRA_EXT, GL_UNSIGNED_BYTE, tex.texture_);
return id;
}
void TextureManager::loadImage (FILE* f, Texture& dest) const {
fseek(f, 18, SEEK_SET);
signed int compression_method;
unsigned int const HEADER_SIZE = 54;
fread(&dest.width_, sizeof(unsigned int), 1, f);
fread(&dest.height_, sizeof(unsigned int), 1, f);
fseek(f, 28, SEEK_SET);
fread(&dest.bpp_, sizeof (unsigned short), 1, f);
fseek(f, 30, SEEK_SET);
fread(&compression_method, sizeof(unsigned int), 1, f);
// We add 4 channels, because we will manually set an alpha channel
// for the color white.
dest.size_ = dest.width_ * dest.height_ * dest.bpp_/8 * 4;
dest.texture_ = new unsigned char[dest.size_];
unsigned char* buffer = new unsigned char[3 * dest.size_ / 4];
// Slurp in whole file and replace all white colors with green
// values and an alpha value of 0:
fseek(f, HEADER_SIZE, SEEK_SET);
fread (buffer, sizeof(unsigned char), 3 * dest.size_ / 4, f);
for (unsigned int count = 0; count < dest.width_ * dest.height_; count++) {
dest.texture_[0+count*4] = buffer[0+count*3];
dest.texture_[1+count*4] = buffer[1+count*3];
dest.texture_[2+count*4] = buffer[2+count*3];
dest.texture_[3+count*4] = 0xff;
if (dest.texture_[0+count*4] == 0xff &&
dest.texture_[1+count*4] == 0xff &&
dest.texture_[2+count*4] == 0xff) {
dest.texture_[0+count*4] = 0x00;
dest.texture_[1+count*4] = 0xff;
dest.texture_[2+count*4] = 0x00;
dest.texture_[3+count*4] = 0x00;
dest.uses_alpha_ = true;
}
}
delete[] buffer;
}
This was actually a small Jump'nRun that I developed occasionally in my spare time. It used gluOrtho2D() mode aswell, btw. If you leave means to contact you, I will send you the source if you want.
Older 2d games such as Diablo and Ultima Online use a sprite compositing technique to do this. You could search for art from those kind of older 2d isometric games to see how they did it.