I have a shader which I pass in each position of my vertex. It draws a neat square which I render my tileset to. It uses logic to iterate through the texture by index. It is in fact 2D. However, in my renderer, I render each tile by iteration of the region (basically a sub-section of an entire map in memory), however my problem is that I pass a uniform through dictating the final position of the map. How would I not have to pass the size of the current tile through the shader in order for it to correctly render in a grid? I use one shader for the map being rendered. Would initially setting the shader and allowing the shader to change this value be efficient? Would it stay for the others?
Shader.v
#version 400 core
in vec2 position;
in vec2 texcoord;
out vec2 pass_texcoord;
uniform mat4 matrix_transform;
uniform mat4 matrix_projection;
uniform mat4 matrix_camera;
uniform int index;
uniform ivec2 stride;
uniform ivec2 tiles;
uniform ivec2 map_position;
uniform sampler2D u_texture;
void main(void) {
ivec2 texsize = textureSize(u_texture, 0);
float xratio = float(stride.x)/texsize.x;
float yratio = float(stride.y)/texsize.y;
float yoffset = floor(index/tiles.x);
pass_texcoord = vec2(texcoord.s*xratio + mod(index, tiles.x)*xratio, texcoord.t*yratio + yoffset*yratio);
gl_Position = matrix_projection * matrix_camera * matrix_transform * vec4(position.x+map_position.x, position.y-map_position.y, 1.0, 1.0);
}
Renderer
public static void renderMap(Map map, Region2f region, SpriteElement element, Matrix4f projection, Camera camera) {
final Model model = element.getModel();
final ShaderModel shader = element.getShaderModel();
GL30.glBindVertexArray(model.getVao());
GL20.glEnableVertexAttribArray(0);
GL20.glEnableVertexAttribArray(1);
GL20.glUseProgram(shader.getShader().getProgram());
shader.bindTexture(element.getTexture());
shader.updateView(projection, element.getMatrix(), camera.getMatrix());
shader.updateTileset(element.getSprite());
for (int layer=0; layer<map.getLayers(); layer++) {
for (int y=(int)region.getPoint1().y; y<region.getPoint2().x; y++) {
for (int x=(int)region.getPoint1().x; x<region.getPoint2().y; x++) {
final int current = map.getData()[layer][x][y];
if(current == -1)
continue;
shader.updateInts(current, y, x);
GL11.glDrawElements(GL11.GL_TRIANGLES, model.getVertexCount(), GL11.GL_UNSIGNED_INT, 0);
}
}
}
GL20.glUseProgram(0);
GL20.glDisableVertexAttribArray(0);
GL20.glDisableVertexAttribArray(1);
GL30.glBindVertexArray(0);
}
SpriteShader.class, extends ShaderModel which just contains my vao and vbo.
public class SpriteShader extends ShaderModel {
public SpriteShader(Shader shader) {
super(shader);
}
private int u_texture;
private int index, stride, tiles;
private int matrix_projection, matrix_transform, matrix_camera;
@Override
public void init() {
shader.bindAttribute(0, "position");
shader.bindAttribute(1, "texcoord");
u_texture = shader.getUniform("u_texture");
index = shader.getUniform("index");
stride = shader.getUniform("stride");
tiles = shader.getUniform("tiles");
matrix_projection = shader.getUniform("matrix_projection");
matrix_transform = shader.getUniform("matrix_transform");
matrix_camera = shader.getUniform("matrix_camera");
}
@Override
public void updateView(Matrix4f... matrices) {
shader.bindUniformm(matrix_projection, matrices[0]);
shader.bindUniformm(matrix_transform, matrices[1]);
shader.bindUniformm(matrix_camera, matrices[2]);
}
@Override
public void updateInts(int... ints) {
shader.bindUniformi(index, ints[0]);
}
@Override
public void bindTexture(Texture texture) {
GL13.glActiveTexture(GL13.GL_TEXTURE0);
GL11.glBindTexture(GL11.GL_TEXTURE_2D, texture.getId());
shader.bindUniformi(u_texture, 0);
}
@Override
public void updateTileset(Sprite sprite) {
shader.bindUniformi(stride, sprite.getStrideX(), sprite.getStrideY());
shader.bindUniformi(tiles, sprite.getTilesX(), sprite.getTilesY());
}
}


In theory it’d be better, but since most of the gpus work with 16 or 32 bits indices the driver is likely to convert the byte indices to short, cancelling all the benefits