I started learning OpenGL. I can load a .obj model and draw it with an elementBuffer. But I'm stuck at trying to two different models at a time. The model I want to draw is in an Entity class.
Most tutorials I can find about this only shows how to load and draw a sinlge model. None explains (In a way I can find/understand at least) how to handle multiple models.
Here is all my code:
public static void main(String[] args) throws LWJGLException, IOException
{
PixelFormat pixelFormat = new PixelFormat();
ContextAttribs contextAtrributes = new ContextAttribs(3, 2);
contextAtrributes.withForwardCompatible(true);
contextAtrributes.withProfileCore(true);
Display.setDisplayMode(new DisplayMode(WIDTH, HEIGHT));
Display.setTitle("Textured quad!");
Display.create(pixelFormat, contextAtrributes);
Mouse.create();
Mouse.setGrabbed(true);
Keyboard.create();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
entity = new Entity("planeTex.obj");
entity2 = new Entity("modelTex2.obj");
Shaders.load();
Textures.load();
Camera.create(new Vector3f(0, 1, -0.75f), new Vector3f(-50, 0, 20), HEIGHT, WIDTH);
while (!Display.isCloseRequested())
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
entity.draw();
entity2.draw();
Display.update();
Display.sync(60);
}
}
public class Entity
{
private int vao, vbo, ebo;
private int elementSize;
public Entity(String name)
{
vao = glGenVertexArrays();
glBindVertexArray(vao);
vbo = glGenBuffers();
*Load vertex data into buffer*
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, buffer, GL_STATIC_DRAW);
ebo = glGenBuffers();
*load data into elementBuffer*
*Set elementSize to the element count*
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, elementBuffer, GL_STATIC_DRAW);
}
public void draw()
{
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glDrawElements(GL_TRIANGLES, elementSize, GL_UNSIGNED_INT, 0);
}
}
public class Shaders
{
public static int vertexShader, fragmentShader;
public static int shaderProgram;
public static int uniTrans;
public static void load()
{
vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, loadFile("vertex.shader"));
glCompileShader(vertexShader);
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, loadFile("fragment.shader"));
glCompileShader(fragmentShader);
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glBindFragDataLocation(shaderProgram, 0, "outColor");
glLinkProgram(shaderProgram);
glUseProgram(shaderProgram);
// Specify the layout of the vertex data
int posAttrib = glGetAttribLocation(shaderProgram, "position");
glEnableVertexAttribArray(posAttrib);
glVertexAttribPointer(posAttrib, 3, GL_FLOAT, false, (Float.SIZE / 8) * 8, 0);
int colAttrib = glGetAttribLocation(shaderProgram, "color");
glEnableVertexAttribArray(colAttrib);
glVertexAttribPointer(colAttrib, 3, GL_FLOAT, false, (Float.SIZE / 8) * 8, (Float.SIZE / 8) * 3);
int texAttrib = glGetAttribLocation(shaderProgram, "texcoord");
glEnableVertexAttribArray(texAttrib);
glVertexAttribPointer(texAttrib, 2, GL_FLOAT, false, (Float.SIZE / 8) * 8, (Float.SIZE / 8) * 6);
uniTrans = glGetUniformLocation(Shaders.shaderProgram, "model");
}
}
The result is that only the Entity object created last will be drawn. No matter the draw order.
Well, I fixed it by placing this chunk
// Specify the layout of the vertex data
int posAttrib = glGetAttribLocation(Shaders.shaderProgram, "position");
glEnableVertexAttribArray(posAttrib);
glVertexAttribPointer(posAttrib, 3, GL_FLOAT, false, (Float.SIZE / 8) * 8, 0);
int colAttrib = glGetAttribLocation(Shaders.shaderProgram, "color");
glEnableVertexAttribArray(colAttrib);
glVertexAttribPointer(colAttrib, 3, GL_FLOAT, false, (Float.SIZE / 8) * 8, (Float.SIZE / 8) * 3);
int texAttrib = glGetAttribLocation(Shaders.shaderProgram, "texcoord");
glEnableVertexAttribArray(texAttrib);
glVertexAttribPointer(texAttrib, 2, GL_FLOAT, false, (Float.SIZE / 8) * 8, (Float.SIZE / 8) * 6);
In the entity class draw loop just before the glDrawElements method. Placing the chunk anywhere else would result in a crashed program:
"Cannot use offsets when Array Buffer Object is disabled"
Otherwise it would simply draw nothing at all. I got the feeling from NicolBolas that I should put it in the Entity constructor, but as I said, it didn't work anywhere.
Related
I've seen a couple of questions about the same topic but still couldn't figure out what was wrong with my code.
It worked to get a window running, where a keypress would change the background color with glClearColor().
But now, when trying to draw a quad onto the screen, the screen stays black.
A little summary of how I think it should work:
How mesh is created:
public Mesh(Vertex[] verts, int[] indices) {
this.verts = verts;
this.indices = indices;
}
//Differend class
public Mesh mesh = new Mesh(new Vertex[] {
new Vertex(new Vector3f(-0.5f, 0.5f, 0.0f)),
new Vertex(new Vector3f(-0.5f, -0.5f, 0.0f)),
new Vertex(new Vector3f( 0.5f, -0.5f, 0.0f)),
new Vertex(new Vector3f( 0.5f, 0.5f, 0.0f))
}, new int[] {
0, 1, 2,
0, 3, 2
});
For initializing the game, this is called:
public void create() {
vao = GL30.glGenVertexArrays();
GL30.glBindVertexArray(vao);
FloatBuffer positionBuffer = MemoryUtil.memAllocFloat(verts.length * 3);
float[] positionData = new float[verts.length * 3];
for (int i = 0; i < verts.length; i++) {
positionData[i * 3] = verts[i].getPosition().getX();
positionData[i * 3 + 1] = verts[i].getPosition().getY();
positionData[i * 3 + 2] = verts[i].getPosition().getZ();
}
positionBuffer.put(positionData).flip();
pbo = GL15.glGenBuffers();
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, pbo);
GL15.glBufferData(GL15.GL_ARRAY_BUFFER, positionBuffer, GL15.GL_STATIC_DRAW);
GL20.glVertexAttribPointer(0, 3, GL11.GL_FLOAT, false, 0, 0);
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, 0);
IntBuffer indexBuffer = MemoryUtil.memAllocInt(indices.length * 3);
indexBuffer.put(indices).flip();
ibo = GL15.glGenBuffers();
GL15.glBindBuffer(GL15.GL_ELEMENT_ARRAY_BUFFER, ibo);
GL15.glBufferData(GL15.GL_ELEMENT_ARRAY_BUFFER, indexBuffer, GL15.GL_STATIC_DRAW);
GL15.glBindBuffer(GL15.GL_ELEMENT_ARRAY_BUFFER, 0);
}
And lastly, the mesh should be rendered with this:
public void renderMesh(Mesh mesh) {
clear();
GL30.glBindVertexArray(mesh.getVAO());
GL30.glEnableVertexAttribArray(0);
GL15.glBindBuffer(GL15.GL_ELEMENT_ARRAY_BUFFER, mesh.getIBO());
GL11.glDrawElements(GL11.GL_TRIANGLES, mesh.getIndices().length, GL11.GL_FLOAT, 0);
GL15.glBindBuffer(GL15.GL_ELEMENT_ARRAY_BUFFER, 0);
GL30.glDisableVertexAttribArray(0);
GL30.glBindVertexArray(0);
}
I've checked that the methods are actually invoked by using System.out.println.
The game's input still works. It's just that the screen is black and not showing any quad.
Why is it not drawing anything onto the screen?
The type argument which is set to glDrawElements hs to correspond to the index buffer, rather than the vertex buffer.
In your case it has to be GL_UNSIGNED_INT rather than GL_FLOAT. See glDrawElements.
GL11.glDrawElements(GL11.GL_TRIANGLES, mesh.getIndices().length, GL11.GL_FLOAT, 0);
GL11.glDrawElements(GL11.GL_TRIANGLES, mesh.getIndices().length, GL11.GL_UNSIGNED_INT, 0);
Note, GL_FLOAT is not an accepted value for glDrawElements and will cause an INVALID_ENUM error.
I want to store three float values and two byte-values in an interleaved OpenGL vbo.
Unfortunately, the rendered data is obviously not correct.
When I rendered the same data with two different VBOs, all worked fine, so I don't assume that there is a problem with my shaders.
/*
* 20^3 blocks per chunk, 6 faces per block, 3 vertices per face
* max. every second face can be visible
*/
private static final int MAX_FLOAT_AMOUNT = 20 * 20 * 20 * 6 * 3 / 2;
/*
* 20^3 blocks per chunk, 6 faces per block, 2 bytes per face
* max. every second face can be visible
*/
private static final int MAX_BYTE_AMOUNT = 20 * 20 * 20 * 6 * 2 / 2;
private int dataVboIndex;
protected int vaoId;
protected int indicesCount;
protected boolean isInitialized = false;
public static ByteBuffer dataFloatBuffer = BufferUtils.createByteBuffer(4 * MAX_FLOAT_AMOUNT + MAX_BYTE_AMOUNT);
DefaultChunkVao(int indiciesVboId) {
init();
}
DefaultChunkVao(boolean initialize) {
if(initialize) init();
}
private void init() {
isInitialized = true;
// bind vao
vaoId = glGenVertexArrays();
glBindVertexArray(vaoId);
//create vbo
dataVboIndex = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, dataVboIndex);
dataFloatBuffer.clear();
dataFloatBuffer.position(4 * MAX_FLOAT_AMOUNT + MAX_BYTE_AMOUNT);
dataFloatBuffer.flip();
// allocate memory
glBufferData(GL_ARRAY_BUFFER, dataFloatBuffer, GL_STREAM_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, false, 3 * 4 + 2, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 1, GL_UNSIGNED_BYTE, false, 3 * 4 + 2, 3 * 4);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 1, GL_UNSIGNED_BYTE, false, 3 * 4 + 2, 3 * 4 + 1);
// unbind vao
glBindVertexArray(0);
}
public void updateData(float[] data) {
if(!isInitialized) init();
dataFloatBuffer.clear();
for(int counter = 0; counter < data.length; counter+=0) {
dataFloatBuffer.putFloat(data[counter++]);
dataFloatBuffer.putFloat(data[counter++]);
dataFloatBuffer.putFloat(data[counter++]);
dataFloatBuffer.put((byte) data[counter++]);
dataFloatBuffer.put((byte) data[counter++]);
}
dataFloatBuffer.flip();
glBindBuffer(GL_ARRAY_BUFFER, dataVboIndex);
glBufferSubData(GL_ARRAY_BUFFER, 0, dataFloatBuffer);
glBindBuffer(GL_ARRAY_BUFFER, 0);
this.indicesCount = data.length / 5;
}
The MAX_FLOAT_AMOUNT and the MAX_BYTE_AMOUNT constants contains the amount of floats resp. bytes per VBO. Am I right in assuming that I have to multiply the amount of floats with 4 when I determine the capacity of the ByteBuffer since every float has 4 bytes?
And what did I make wrong that I the my byte values are always 0 in the shader?
EDIT: I was able to reproduce the issue with a simpler example. Here I want to store the position and two bytes in the VBO. In the fragment shader I check if the byte1 value is passed correctly. If so, the shader renders the shape green, else blue. Unfortunately, my shape is rendered blue, hence I assume that the byte1 value is not passed correctly.
vertexShader
#version 400 core
in vec3 position;
in int byte1;
in int byte2;
flat out int p_byte1;
flat out int p_byte2;
void main(void) {
gl_Position = vec4(position, 1);
p_byte1 = byte1;
p_byte2 = byte2;
}
fragmentShader:
#version 400 core
flat in int p_byte1;
flat in int p_byte2;
out vec3 out_color;
void main(void) {
if(p_byte1 == 45) {
out_color = vec3(0, 1, 0);
} else out_color = vec3(0, 0, 1);
}
creating the VAO:
vaoId = glGenVertexArrays();
glBindVertexArray(vaoId);
final int vbo = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, vbo);
float[] data = {0, 0, 0, 20f, 20f, 1, 1, 1, 20f, 20f, 1, 0, 1, 20f, 20f, 0, 1, 1, 20f, 20f};
ByteBuffer dataBuffer = BufferUtils.createByteBuffer(4 * (3 * 4) + 1 * (2 * 4));
for(int counter = 0; counter < data.length; counter+=0) {
dataBuffer.putFloat(data[counter++]);
dataBuffer.putFloat(data[counter++]);
dataBuffer.putFloat(data[counter++]);
dataBuffer.put((byte) data[counter++]);
dataBuffer.put((byte) data[counter++]);
}
dataBuffer.flip();
glBufferData(GL_ARRAY_BUFFER, dataBuffer, GL_STREAM_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, false, 3 * 4 + 2, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 1, GL_UNSIGNED_BYTE, false, 3 * 4 + 2, 3 * 4);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 1, GL_UNSIGNED_BYTE, false, 3 * 4 + 2, 3 * 4 + 1);
glBindVertexArray(0);
I was able to find the solution to my question. It is necessary to use glVertexAttribIPointer instead of glVertexAttribPointer for integer data types.
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, false, 3 * 4 + 2, 0);
glEnableVertexAttribArray(1);
glVertexAttribIPointer(1, 1, GL_UNSIGNED_BYTE, 3 * 4 + 2, 3 * 4);
glEnableVertexAttribArray(2);
glVertexAttribIPointer(2, 1, GL_UNSIGNED_BYTE, 3 * 4 + 2, 3 * 4 + 1);
I started watching these tutorials for creating a 2d top-down game using LWJGL and I read that VBO's should be fast but for rendering 48*48 tiles per frame I get only about 100FPS which is pretty slow because I will add a lot more stuff to the game than just some static, not moving or changing, tiles.
What can I do to make this faster? Keep in mind that I just started learning lwjgl and opengl so I probably won't know many things.
Anyways, here are some parts of my code (I removed some parts from the code that were kinda meaningless and replaced them with some descriptions):
The main loop
double targetFPS = 240.0;
double targetUPS = 60.0;
long initialTime = System.nanoTime();
final double timeU = 1000000000 / targetUPS;
final double timeF = 1000000000 / targetFPS;
double deltaU = 0, deltaF = 0;
int frames = 0, updates = 0;
long timer = System.currentTimeMillis();
while (!window.shouldClose()) {
long currentTime = System.nanoTime();
deltaU += (currentTime - initialTime) / timeU;
deltaF += (currentTime - initialTime) / timeF;
initialTime = currentTime;
if (deltaU >= 1) {
// --- [ update ] ---
--INPUT HANDLING FOR BASIC MOVEMENT, CLOSING THE GAME AND TURNING VSYNC ON AND OFF USING A METHOD FROM THE INPUT HANDLER CLASS--
world.correctCamera(camera, window);
window.update();
updates++;
deltaU--;
}
if (deltaF >= 1) {
// --- [ render ] ---
glClear(GL_COLOR_BUFFER_BIT);
world.render(tileRenderer, shader, camera, window);
window.swapBuffers();
frames++;
deltaF--;
}
--PRINTING THE FPS AND UPS EVERY SECOND--
}
The input handler methods used:
I have this in my constructor:
this.keys = new boolean[GLFW_KEY_LAST];
for(int i = 0; i < GLFW_KEY_LAST; i++)
keys[i] = false;
And here are the methods:
public boolean isKeyDown(int key) {
return glfwGetKey(window, key) == 1;
}
public boolean isKeyPressed(int key) {
return (isKeyDown(key) && !keys[key]);
}
public void update() {
for(int i = 32; i < GLFW_KEY_LAST; i++)
keys[i] = isKeyDown(i);
}
This is the render method from the World class:
public void render(TileRenderer renderer, Shader shader, Camera camera, Window window) {
int posX = ((int) camera.getPosition().x + (window.getWidth() / 2)) / (scale * 2);
int posY = ((int) camera.getPosition().y - (window.getHeight() / 2)) / (scale * 2);
for (int i = 0; i < view; i++) {
for (int j = 0; j < view; j++) {
Tile t = getTile(i - posX, j + posY);
if (t != null)
renderer.renderTile(t, i - posX, -j - posY, shader, world, camera);
}
}
}
This is the renderTile() method from TileRenderer:
public void renderTile(Tile tile, int x, int y, Shader shader, Matrix4f world, Camera camera) {
shader.bind();
if (tileTextures.containsKey(tile.getTexture()))
tileTextures.get(tile.getTexture()).bind(0);
Matrix4f tilePosition = new Matrix4f().translate(new Vector3f(x * 2, y * 2, 0));
Matrix4f target = new Matrix4f();
camera.getProjection().mul(world, target);
target.mul(tilePosition);
shader.setUniform("sampler", 0);
shader.setUniform("projection", target);
model.render();
}
This is the constructor and render method from Model class:
public Model(float[] vertices, float[] texture_coords, int[] indices) {
draw_count = indices.length;
v_id = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, v_id);
glBufferData(GL_ARRAY_BUFFER, createBuffer(vertices), GL_STATIC_DRAW);
t_id = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, t_id);
glBufferData(GL_ARRAY_BUFFER, createBuffer(texture_coords), GL_STATIC_DRAW);
i_id = glGenBuffers();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, i_id);
IntBuffer buffer = BufferUtils.createIntBuffer(indices.length);
buffer.put(indices);
buffer.flip();
glBufferData(GL_ELEMENT_ARRAY_BUFFER, buffer, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
public void render() {
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, v_id);
glVertexAttribPointer(0, 3, GL_FLOAT, false, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, t_id);
glVertexAttribPointer(1, 2, GL_FLOAT, false, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, i_id);
glDrawElements(GL_TRIANGLES, draw_count, GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
}
I store the vertices, texture coords and indices in the tile renderer:
float[] vertices = new float[]{
-1f, 1f, 0, //top left 0
1f, 1f, 0, //top right 1
1f, -1f, 0, //bottom right 2
-1f, -1f, 0, //bottom left 3
};
float[] texture = new float[]{
0, 0,
1, 0,
1, 1,
0, 1,
};
int[] indices = new int[]{
0, 1, 2,
2, 3, 0
};
I don't know what else to put here but the full source code and resources + shader files are available on github here.
With your current system, what I would recommend doing is grouping your tiles based on texture. Create something like this:
Map<Texture, List<Tile>> tiles = new HashMap<Texture, List<Tile>>()
Then when you go to render your map of tiles, you will only need to set the texture once per group of tiles, rather than once per tile. This saves PCI-E bandwidth for pushing textures/texture ids to the GPU. You would achieve that like this (pseudo code):
for (Texture tex : tile.keySet())
{
BIND TEXTURE
for (Tile tile : tiles.get(tex))
{
SET UNIFORMS
RENDER
}
}
Something else I see along these lines is that you are pushing the projection matrix to each tile individually. When you are running a shader program, the value of a given uniform stays the same until you change it or until the program ends. Set the projection matrix uniform once.
It also appears that you are calling this every renderTile(...). Given the value does not change, calculate it once before the render pass, then pass it in as a variable in the renderTile(...) method rather than passing in camera and world.
I've got this class which I'm developing which I hope to eventually use to draw a terrain map. The code currently looks like this:
IntBuffer ib = BufferUtils.createIntBuffer(3);
int vHandle = ib.get(0);
int cHandle = ib.get(1);
int iHandle = ib.get(2);
FloatBuffer vBuffer = BufferUtils.createFloatBuffer(19);
FloatBuffer cBuffer = BufferUtils.createFloatBuffer(18);
ShortBuffer iBuffer = BufferUtils.createShortBuffer(6);
TerrainFlat(float x,float y,float z, float[] input /* here is where I want to put my draw distance stuff*/){
this.Map = input;
this.x=x;
this.y=y;
this.z=z;
}
#Override
public void initilize(){
float[] vertexData = {50, 20, 100, 50, -20, 100, 10, -20, 100, -10, -20, 100, -50, -20, 100, -50, 20, 100};
float[] colorData = {1, 0, 0, 0, 1, 0, 0, 0, 1, 0 , 0 , 1 , 0 , 1 , 0 , 1 , 0 , 0 };
short[] indexData = {0,1,2,3,4,5};
vBuffer.put(vertexData);
vBuffer.flip();
cBuffer.put(colorData);
cBuffer.flip();
iBuffer.put(indexData);
iBuffer.flip();
}
#Override
public void draw(){
glClear(GL_COLOR_BUFFER_BIT);
glGenBuffersARB(ib);
vHandle = ib.get(0);
cHandle = ib.get(1);
iHandle = ib.get(2);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vHandle);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, vBuffer, GL_STATIC_DRAW_ARB);
glVertexPointer(3, GL_FLOAT, /* stride */3 << 2, 0L);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, cHandle);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, cBuffer, GL_STATIC_DRAW_ARB);
glColorPointer(3, GL_FLOAT, /* stride */3 << 2, 0L);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iHandle);
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iBuffer, GL_STATIC_DRAW_ARB);
glDrawElements(GL_TRIANGLES, /* elements */6, GL_UNSIGNED_SHORT, 0L);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
}
#Override
public void destroy(){
ib.put(0, vHandle);
ib.put(1, cHandle);
ib.put(2, iHandle);
glDeleteBuffersARB(ib);
}
Now I'm wondering if I can bind my data in the initilize function, instead of the Draw function, in order to move the declarations:
FloatBuffer vBuffer = BufferUtils.createFloatBuffer(19);
FloatBuffer cBuffer = BufferUtils.createFloatBuffer(18);
ShortBuffer iBuffer = BufferUtils.createShortBuffer(6);
into the initilize function, or at the very least after the terrain functions so I can control their lengths via a draw distance function.
UPDATE:
I change my code based on the answer given (thank you btw) to this:
#Override
public void initilize(){
float[] vertexData = {50, 20, 100, 50, -20, 100, 10, -20, 100, -10, -20, 100, -50, -20, 100, -50, 20, 100};
float[] colorData = {1, 0, 0, 0, 1, 0, 0, 0, 1, 0 , 0 , 1 , 0 , 1 , 0 , 1 , 0 , 0 };
short[] indexData = {0,1,2,3,4,5};
vBuffer.put(vertexData);
vBuffer.flip();
cBuffer.put(colorData);
cBuffer.flip();
iBuffer.put(indexData);
iBuffer.flip();
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vHandle);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, vBuffer, GL_STATIC_DRAW_ARB);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, cHandle);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, cBuffer, GL_STATIC_DRAW_ARB);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iHandle);
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iBuffer, GL_STATIC_DRAW_ARB);
}
#Override
public void setUp(float posX, float posY, float posZ){
}
#Override
public void draw(){
glClear(GL_COLOR_BUFFER_BIT);
glGenBuffersARB(ib);
vHandle = ib.get(0);
cHandle = ib.get(1);
iHandle = ib.get(2);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vHandle);
glVertexPointer(3, GL_FLOAT, /* stride */3 << 2, 0L);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, cHandle);
glColorPointer(3, GL_FLOAT, /* stride */3 << 2, 0L);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, iHandle);
glDrawElements(GL_TRIANGLES, /* elements */6, GL_UNSIGNED_SHORT, 0L);
glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
}
This executes but fails to draw anything, I do not have FACE_CULL enabled either.
You need to bind the buffers before drawing (except you are always drawing the same buffers) but you can move the glBufferData calls to your initialize function.
Something like this should work:
public void initilize() {
...
// set data for vHandle
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vHandle);
glBufferDataARB(GL_ARRAY_BUFFER_ARB, vBuffer, GL_STATIC_DRAW_ARB);
glVertexPointer(3, GL_FLOAT, /* stride */3 << 2, 0L);
// ... setup other handles
...
}
public void draw() {
...
glBindBufferARB(GL_ARRAY_BUFFER_ARB, vHandle);
// ... bind other buffers
glDrawElements(GL_TRIANGLES, /* elements */6, GL_UNSIGNED_SHORT, 0L);
...
}
Cannot seem to get Vertex Attribute Arrays working properly for per vertex data.
Here's the SSCCE:
private static void createDisplay(int w, int h) {
try {
Display.create();
Display.setDisplayMode(new DisplayMode(w, h));
}
catch (LWJGLException e) {
e.printStackTrace();
}
float size = 1;
float aspect = (float) Display.getWidth() / Display.getHeight();
GL11.glMatrixMode(GL11.GL_PROJECTION);
GL11.glLoadIdentity();
GL11.glOrtho(-size * aspect, size * aspect, -size, size, -1, 1);
GL11.glMatrixMode(GL11.GL_MODELVIEW);
GL11.glLoadIdentity();
}
public static void main(String[] args) {
createDisplay(1200, 800);
GL11.glViewport(0, 0, Display.getWidth(), Display.getHeight());
ShaderManager.createShader("2Dv", new File("src/Shaders/2D.vert"), SHADER_VERT);
ShaderManager.createShader("2Df", new File("src/Shaders/2D.frag"), SHADER_FRAG);
ShaderManager.createProgram("2D", "2Dv", "2Df");
// Shader compiles and links correctly.
ShaderManager.useProgram("2D");
// Calls glUseProgram(programID);
float[] vertexData = new float[] {-0.5f, -0.5f, 0.5f, -0.5f, 0.5f, 0.5f, -0.5f, 0.5f};
int vao = GL30.glGenVertexArrays();
if (vao == 0)
System.exit(-1);
GL30.glBindVertexArray(vao);
int vertexBuffer = GL15.glGenBuffers();
GL15.glBindBuffer(GL15.GL_ARRAY_BUFFER, vertexBuffer);
GL15.glBufferData(GL15.GL_ARRAY_BUFFER, BufferUtil.asDirectFloatBuffer(vertexData), GL15.GL_DYNAMIC_DRAW);
// GL11.glEnableClientState(GL11.GL_VERTEX_ARRAY);
// GL11.glVertexPointer(2, GL11.GL_FLOAT, 2 * 4, 0);
int loc = ShaderManager.currentProgram.getAttribute("vertex");
if (loc == -1)
Debug.log(Debug.INSTANCE_MANAGEMENT, "Attribute [", "", "] not found in Shader [",
ShaderManager.currentProgram.toString(), "].");
else {
GL20.glVertexAttribPointer(loc, 2, GLCONST.TYPE_FLOAT, false, 2 * 4, 0);
GL20.glEnableVertexAttribArray(loc);
}
GL30.glBindVertexArray(0);
GL11.glColor3f(1, 0, 0);
GL11.glClearColor(0.5f, 0.5f, 0.8f, 1);
int indexBuffer = GL15.glGenBuffers();
GL15.glBindBuffer(GL15.GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
GL15.glBufferData(GL15.GL_ELEMENT_ARRAY_BUFFER, BufferUtil.asDirectFloatBuffer(new float[] {0, 1, 2, 3}),
GL15.GL_DYNAMIC_DRAW);
while (!Display.isCloseRequested()) {
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT);
GL30.glBindVertexArray(vao);
GL15.glBindBuffer(GL15.GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
GL11.glDrawElements(GL11.GL_QUADS, 4, GL11.GL_UNSIGNED_INT, 0);
GL30.glBindVertexArray(0);
Display.update();
int error = GL11.glGetError();
if (error != GL11.GL_NO_ERROR)
System.out.println(GLU.gluErrorString(error));
}
}
The problem lies in the usage of vertex Attribute arrays. The old code I used was:
GL11.glEnableClientState(GL11.GL_VERTEX_ARRAY);
GL11.glVertexPointer(2, GL11.GL_FLOAT, 2 * 4, 0);
The new version is:
int loc = ShaderManager.currentProgram.getAttribute("vertex");//Call to glGetAttribLocation();
if (loc == -1){
System.exit(-1);
}
GL20.glVertexAttribPointer(loc, 2, GL11.GL_FLOAT, false, 2 * 4, 0);
GL20.glEnableVertexAttribArray(loc);
The original code was the commented 2 lines. Upon running, this correctly sent vertex data to gl_Vertex and rendered a square of size 1.
The new code shuld send vertex data to the vertex attribute, but it gets nothing.
When the original code is uncommented and both old and new code used, both gl_Vertex and vertex attribute get vertex data.
What is going wrong here?
So I figured out the problem after a while. The problem is due to an AMD driver bug when using a OpenGL 3.0+ core profile.
The "vertex" attribute array was assigned a location of 1.
The bug occurs when the attribute array 0 is unused. Nothing is rendered if array 0 is not enabled.
To fix this probelm I simply explicitly assigned "vertex" to location 0.
layout(location = 0) in vec4 vertex;