Screenspace to Worldspace Coordinates [SOLVED]

SOLVED EDIT:
Embarrassingly, I solved the problem soon after creating this post. What was originally:


public Matrix translate(Vector v)
{
	Matrix t = new Matrix().loadIdentity();
	t.setElement(2, 0, v.getX());
	t.setElement(2, 1, v.getY());
	mul(t);
	return this;
}

Should have been:


public Matrix translate(Vector v)
{
	Matrix t = new Matrix().loadIdentity();
	t.setElement(0, 2, v.getX());
	t.setElement(1, 2, v.getY());
	mul(t);
	return this;
}

I had the row/column addresses mixed up :P. Anyway, thanks to those who read the post even though I answered it shortly after making it!

ORIGINAL POST:
So I’m making a tool to edit worlds for my game and I’ve run into the problem of converting screenspace coordinates into worldspace coordinates. I need this so I can guide the cell-selection cursor with my mouse no matter how zoomed in or panned around the view is. Upon some reading I decided to use a view matrix to compute all the transformations going on with the view, then apply it’s inverse to the mouse coordinates. Unfortunately this doesn’t seem to be working, and I’ve no idea why.

The more I pan in one direction the more the cursor moves in the other. The more zoomed in the view is the closer the cursor gets to the origin. Just to be clear, I’m not using the view matrix to apply transformations on the renderings, I’m just using it to compute cursor coordinates.

Cursor code:


//reset view matrix
view = new Matrix();
//add translate transformation (to center of display so zooming appears to happen from center)
view.translate(GLDisplay.center());
//add scale transformation (scale x & y by zoom)
view.scale(new Vector(zoom, zoom));
//add translate transformation (to position of view)
view.translate(camera.get());
//set view matrix to inverse of it's self
view.invert();
//apply view matrix to mouse screenspace coordinates (supposed to give cursor coordinates in worldspace)
cursor = view.transform(new Vector(Mouse.getX(), Mouse.getY()));
//round cursor coordinates down to whole numbers
cursor.floor();

Matrix code:


//Multiply this matrix by matrix "a"
public Matrix mul(Matrix a)
{
	for(int r = 0; r < 3; r++)
	{
		for(int c = 0; c < 3; c++)
		{
			setElement
			(r, c,
				m[r][0] * a.getElement(0, c) +
				m[r][1] * a.getElement(1, c) +
				m[r][2] * a.getElement(2, c)
			);
		}
	}
	return this;
}

//Creates a translation matrix from a vector and applies it to this matrix
public Matrix translate(Vector v)
{
	Matrix t = new Matrix().loadIdentity();
	t.setElement(2, 0, v.getX());
	t.setElement(2, 1, v.getY());
	mul(t);
	return this;
}

//Creates a scaling matrix from a vector and applies it to this matrix
public Matrix scale(Vector v)
{
	Matrix s = new Matrix().loadIdentity();
	s.setElement(0, 0, v.getX());
	s.setElement(1, 1, v.getY());
	mul(s);
	return this;
}

//Returns the determinate of a minor matrix (used in inverting a matrix)
public float minorDeterminant(float[][] minor)
{
	return (minor[0][0] * minor[1][1]) - (minor[1][0] * minor[0][1]);
}

//Inverts this matrix
public Matrix invert()
{
	//matrix of minor determinates used to create the cofactor matrix
	Matrix minors = new Matrix();
	//generates matrix of minor determinants (minors)
	{
		//minor matrix to be recycled in making the matrix of minor determinates
		float[][] minor = new float[2][2];
		minor[0][0] = m[1][1];    minor[0][1] = m[1][2];
		minor[1][0] = m[2][1];    minor[1][1] = m[2][2];
		minors.setElement(0, 0, minorDeterminant(minor));
		
		minor[0][0] = m[1][0];    minor[0][1] = m[1][2];
		minor[1][0] = m[2][0];    minor[1][1] = m[2][2];
		minors.setElement(0, 1, minorDeterminant(minor));
		
		minor[0][0] = m[1][0];    minor[0][1] = m[1][1];
		minor[1][0] = m[2][0];    minor[1][1] = m[2][1];
		minors.setElement(0, 2, minorDeterminant(minor));
		
		minor[0][0] = m[0][1];    minor[0][1] = m[0][2];
		minor[1][0] = m[2][1];    minor[1][1] = m[2][2];
		minors.setElement(1, 0, minorDeterminant(minor));
		
		minor[0][0] = m[0][0];    minor[0][1] = m[0][2];
		minor[1][0] = m[2][0];    minor[1][1] = m[2][2];
		minors.setElement(1, 1, minorDeterminant(minor));
		
		minor[0][0] = m[0][0];    minor[0][1] = m[0][1];
		minor[1][0] = m[2][0];    minor[1][1] = m[2][1];
		minors.setElement(1, 2, minorDeterminant(minor));
		
		minor[0][0] = m[0][1];    minor[0][1] = m[0][2];
		minor[1][0] = m[1][1];    minor[1][1] = m[1][2];
		minors.setElement(2, 0, minorDeterminant(minor));
		
		minor[0][0] = m[0][0];    minor[0][1] = m[0][2];
		minor[1][0] = m[1][0];    minor[1][1] = m[1][2];
		minors.setElement(2, 1, minorDeterminant(minor));
		
		minor[0][0] = m[0][0];    minor[0][1] = m[0][1];
		minor[1][0] = m[1][0];    minor[1][1] = m[1][1];
		minors.setElement(2, 2, minorDeterminant(minor));
	}
	
	//Cofactor matrix to be transposed to create the adjugate matrix
	Matrix cofactor = new Matrix();
	//applies "checkerboard" signing of cofactor elements
	{
		cofactor.setElement(0, 0, minors.getElement(0, 0));
		cofactor.setElement(0, 1, -minors.getElement(0, 1));
		cofactor.setElement(0, 2, minors.getElement(0, 2));
		
		cofactor.setElement(1, 0, -minors.getElement(1, 0));
		cofactor.setElement(1, 1, minors.getElement(1, 1));
		cofactor.setElement(1, 2, -minors.getElement(1, 2));
		
		cofactor.setElement(2, 0, minors.getElement(2, 0));
		cofactor.setElement(2, 1, -minors.getElement(2, 1));
		cofactor.setElement(2, 2, minors.getElement(2, 2));
	}

	//Adjugate matrix to be divided by the determinant to invert this matrix
	Matrix adjugate = new Matrix();
	//loads cofactor elements into adjugate and transposes them
	{
		adjugate.setElement(0, 0, cofactor.getElement(0, 0));
		adjugate.setElement(0, 1, cofactor.getElement(1, 0));
		adjugate.setElement(0, 2, cofactor.getElement(2, 0));

		adjugate.setElement(1, 0, cofactor.getElement(0, 1));
		adjugate.setElement(1, 1, cofactor.getElement(1, 1));
		adjugate.setElement(1, 2, cofactor.getElement(2, 1));

		adjugate.setElement(2, 0, cofactor.getElement(0, 2));
		adjugate.setElement(2, 1, cofactor.getElement(1, 2));
		adjugate.setElement(2, 2, cofactor.getElement(2, 2));
	}
	
	//finds the determinate of this matrix
	float determinant =
	(
		(m[0][0] * m[1][1] * m[2][2]) +
		(m[0][1] * m[1][2] * m[2][0]) +
		(m[0][2] * m[1][0] * m[2][1]) +
		(m[0][1] * m[1][0] * m[2][2]) -
		(m[0][0] * m[1][2] * m[2][1]) -
		(m[0][2] * m[1][1] * m[2][0])
	);
	//sets each element of this matrix to the corresponding element of the adjugate divided by the determinant
	for(int r = 0; r < 3; r++)
	{
		for(int c = 0; c < 3; c++)
		{
			m[r][c] = adjugate.getElement(r, c) / determinant;
		}
	}
	
	return this;
}

//applies this matrix to a given vector
public Vector transform(Vector v)
{
	Vector vt = v.get();
	v.setX(m[0][0] * vt.getX() + m[0][1] * vt.getY() + m[0][2]);
	v.setY(m[1][0] * vt.getX() + m[1][1] * vt.getY() + m[1][2]);
	return v;
}

I’ve combed this code over for half the day, and spent the other half learning about matrices and writing it in the first place. I know my code mustn’t be perfect, but I don’t have the know-how to see what I’ve done wrong. If someone can spot the error in my ways or simply point me to a better way to go about this, I’d be really appreciative. I know it’s quite a bit of code, but I’ll take any help I can get. Thanks for taking time to read this and share your input, it means a lot.