How to affect the depth-buffer with texture-based masking?

Please consider the following image:

The dot-pattern is based on a 16x16 repeated texture (with smooth edges) obtained via a simple “ALPHA” mapping, each mapped surface being colored differently on the fly.

This technique is very handy and I use it massively in my 3d text experiments but there is one question mark still unsolved for me at the moment: how can I achieve proper depth buffering without the need to manually z-sort things?

As you can see on the provided image, the left and right parts are both problematic:

  • the left part has is not using depth-buffering at all, hence the 2 rectangles are totally interleaved…
  • the right part has depth-buffering turned on but then, the blue rectangle is not translucent anymore…

An element to be taken in count (that certainly complicates things) is the need to keep the dots’ edges totally smooth.

I’m including some minimalised source code to reproduce the example:

import java.awt.*;
import java.awt.event.*;
import java.nio.*;
import javax.media.opengl.*;
import javax.media.opengl.glu.GLU;
import com.sun.opengl.util.*;

public class AlphaDepth implements GLEventListener
{
  Frame frame;
  Animator animator;

  float rotation = 45f;

  public static void main(String[] args)
  {
    new AlphaDepth();
  }

  public AlphaDepth()
  {
    Rectangle ur = GraphicsEnvironment.getLocalGraphicsEnvironment().getMaximumWindowBounds();

    frame = new Frame();
    frame.setBounds(ur.x, ur.y, ur.width, ur.height);

    GLCanvas canvas = new GLCanvas();
    frame.add(canvas);
    frame.show();

    canvas.addGLEventListener(this);

    animator = new Animator(canvas);
    animator.start();

    frame.addWindowListener(new WindowAdapter()
    {
      public void windowClosing(WindowEvent e)
      {
        exit();
      }
    });
  }

  void exit()
  {
    new Thread(new Runnable()
    {
      public void run()
      {
        animator.stop();
        frame.dispose();
        System.exit(0);
      }
    }).start();
  }

  public void init(GLAutoDrawable drawable)
  {
    GL gl = drawable.getGL();
    gl.setSwapInterval(1);

    // ---

    gl.glDisable(GL.GL_LIGHTING);
    gl.glShadeModel(GL.GL_FLAT);
    gl.glHint(GL.GL_PERSPECTIVE_CORRECTION_HINT, GL.GL_NICEST);

    gl.glClearColor(1f, 1f, 1f, 1f);

    // ---

    textureInit(gl);
  }

  public void reshape(GLAutoDrawable drawable, int x, int y, int w, int h)
  {
    GL gl = drawable.getGL();

    gl.glMatrixMode(GL.GL_PROJECTION);
    gl.glLoadIdentity();
    (new GLU()).gluPerspective(45f, w / (float) h, 1f, 1000f);

    gl.glMatrixMode(GL.GL_MODELVIEW);
    gl.glLoadIdentity();
  }

  public void display(GLAutoDrawable drawable)
  {
    GL gl = drawable.getGL();
    gl.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT);

    // ---

    gl.glLoadIdentity();
    gl.glTranslatef(0, 0f, -400f);
    gl.glRotatef(30f, 1f, 0f, 0f);
    gl.glRotatef(rotation, 0f, 1f, 0f);

    rotation += 0.5f;

    // ---

    gl.glEnable(GL.GL_DEPTH_TEST);
    gl.glDepthMask(true);

    gl.glEnable(GL.GL_BLEND);
    gl.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA);

    gl.glColor4f(0f, 0f, 1f, 1f);
    textureDraw(gl, 0, -100, -100, 200, 200);

    gl.glRotatef(90f, 0f, 1f, 0f);
    gl.glColor4f(1f, 0f, 0f, 1f);
    textureDraw(gl, 0, -100, -100, 200, 200);
  }

  public void displayChanged(GLAutoDrawable drawable, boolean modeChanged, boolean deviceChanged)
  {}

  // ---

  int[] textureId;
  int textureW = 16, textureH = 16;

  int[] textureData = {//
      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xcf, 0xff, 0xff, 0xcf, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0x00, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x5f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, 0x00, 0x00,//
      0x00, 0x00, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00,//
      0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,//
      0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,//
      0x00, 0x00, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00,//
      0x00, 0x00, 0x5f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0x00, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x00, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xcf, 0xff, 0xff, 0xcf, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,//
      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00//
  };

  void textureInit(GL gl)
  {
    ByteBuffer buffer = ByteBuffer.allocate(textureW * textureH);
    for (int i = 0; i < textureW * textureH; i++)
    {
      buffer.put((byte) textureData[i]);
    }
    buffer.rewind();

    textureId = new int[1];
    gl.glGenTextures(1, textureId, 0);

    gl.glBindTexture(GL.GL_TEXTURE_2D, textureId[0]);
    (new GLU()).gluBuild2DMipmaps(GL.GL_TEXTURE_2D, GL.GL_ALPHA, textureW, textureH, GL.GL_ALPHA, GL.GL_UNSIGNED_BYTE, buffer);

    gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT);
    gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT);
    gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR);
    gl.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR);
  }

  void textureDraw(GL gl, int id, float x1, float y1, float w, float h)
  {
    gl.glBindTexture(GL.GL_TEXTURE_2D, textureId[0]);
    gl.glEnable(GL.GL_TEXTURE_2D);

    float x2 = x1 + w;
    float y2 = y1 + h;

    float nx = w / 8f;
    float ny = h / 8f;

    gl.glBegin(GL.GL_QUADS);
    gl.glTexCoord2f(nx, 0f);
    gl.glVertex3f(x2, y2, 0f);
    gl.glTexCoord2f(nx, ny);
    gl.glVertex3f(x2, y1, 0f);
    gl.glTexCoord2f(0f, ny);
    gl.glVertex3f(x1, y1, 0f);
    gl.glTexCoord2f(0f, 0f);
    gl.glVertex3f(x1, y2, 0f);
    gl.glEnd();
  }
}

Any clues? Directions?

Thanks, Ariel
http://ariel.chronotext.org

[quote]This technique is very handy and I use it massively in my 3d text experiments but there is one question mark still unsolved for me at the moment: how can I achieve proper depth buffering without the need to manually z-sort things?
[/quote]
Short answer: Depth peeling.

Realistic answer: You can’t. Depth peeling provides a way of doing this without manually sorting, but it’s expensive, slow and will eat all your fillrate. IMHO it’s not practical for games or anything realtime yet.

Remember that the depth test is purely a binary thing, whereas for soft blended edges you need a range of values. You can’t get the nice antialiased edges and use the depth buffer at the same time. You’ve got two options:

  • Use alpha test rather than alpha blend. You get automatic sorting, but you can’t do transparency so you’ll get hard edges.
  • Manually sort (and possibly split) faces and disable the depth buffer.

I think you could do this in realtime by manually writing gl_FragDepth in a GLSL fragment shader, but it’s just a guess and I don’t know how widely this feature is supported.

That doesn’t solve anything though, the depth test is still a boolean pass/fail, so you still can’t get blended edges to work.

I also forgot the 3rd option - switch to a blending mode which is order-independant, like additive (probably not suitable) or alpha-to-coverage (probably a good fit if you’re really interested, but does require a framebuffer with alpha bits).

Thank you guys for the feedback…

The direction of alpha-to-coverage sounds good! I wonder if it works on “standard setups” (i.e. average-to-good card + not the very latest drivers…) and if it requires to deal with shaders, etc…

I will start to investigate and eventually post some findings to this thread…

A+

Humus has a good alpha-to-coverage demo.

@Orangy Tang:

just curious, but how is this different from the advise you gave in this thread about rendering the leaves of thousands of trees: http://www.java-gaming.org/forums/index.php?topic=3107.0

It seems the same problem: textures using transparancy (the space between the leaves, or the space between the dots above) but no levels except 0 or 1. But you don’t want jagged leaves either and in the thread they seem to suggest it is possible to render it while still looking good.

I have no idea how this all works, just trying to learn from those in the know :slight_smile:

It’s not the same problem though, as arielm specifically said he wants to keep the blended edges. If the blended edges weren’t required, I’d just use the alpha test (like for trees).

Personally I think the alpha-to-convergence method is a very heavyweight solution for a minor quality gain, and it’s not one I’d use myself.

on an nv7900, alpha-to-coverage is a standard, built in feature. You cannot tell the difference between alpha to coverage and alpha testing on those cards…

DP