OpenCV HoughLine only detect one line in image - java

I am following the docs/tutorial of openCV to detect lines in image. However, I only got one out of the four similar lines in the image.
Here is the result
And here is my code:
Mat im = Imgcodecs.imread("C:/Users/valer/eclipse-workspace/thesis-application/StartHere/resource/4 lines.JPG");
Mat gray = new Mat(im.rows(), im.cols(), CvType.CV_8SC1);
Imgproc.cvtColor(im, gray, Imgproc.COLOR_RGB2GRAY);
Imgproc.Canny(gray, gray, 50, 150);
Mat lines = new Mat();
Imgproc.HoughLines(gray, lines, 1, Math.PI/180, 200);
for (int i = 0; i < lines.cols(); i++){
double data[] = lines.get(0, i);
double rho = data[0];
double theta = data[1];
double cosTheta = Math.cos(theta);
double sinTheta = Math.sin(theta);
double x0 = cosTheta * rho;
double y0 = sinTheta * rho;
Point pt1 = new Point(x0 + 10000 * (-sinTheta), y0 + 10000 * cosTheta);
Point pt2 = new Point(x0 - 10000 * (-sinTheta), y0 - 10000 * cosTheta);
Imgproc.line(im, pt1, pt2, new Scalar(0, 0, 200), 3);
}
Imgcodecs.imwrite("C:/Users/valer/eclipse-workspace/thesis-application/StartHere/resource/process/line_output.jpg", im);
I have tried playing around with the parameters for threshold, but I kept getting same (and sometimes worst) results.
Would anyone please point out where am I doing wrong?

In the lines matrix result, lines are stored by row, not by column.
So lines.rows() gives you line count and you can iterate with lines.get(i, 0) to fetch each line.

Related

In android how do I count the number of lines in a image using openCV

This is what I tried.I got the output as a grayscale image in img2 of an Imageview object.
The problem is as lines.cols() considers everything as a line.
I want the count to be exactly the number of larger lines as shown in the 1stpic (I mean the lines that seperates the parking lot,in which the car can ocupy) My output image Can anyone guide me how to get the exact count of parking lines.I have used openCV version 2.4.I have been working on this for the past 2 days
public String getCount() {
Bitmap bitmap = BitmapFactory.decodeResource(getApplicationContext().getResources(), R.drawable.park);
mat = new Mat();
edges = new Mat();
Mat mRgba = new Mat(612, 816, CvType.CV_8UC1);
Mat lines = new Mat();
Utils.bitmapToMat(bitmap, mat);
Imgproc.Canny(mat, edges, 50, 90);
int threshold = 50;
int minLineSize = 20;
int lineGap = 20;
Imgproc.HoughLinesP(edges, lines, 1, Math.PI / 180, threshold, minLineSize, lineGap);
int count = lines.cols();
int coun= lines.rows();
System.out.println("count = " + count);
System.out.println("coun = " + coun);
String cou = String.valueOf(count);
for (int x = 0; x < lines.cols(); x++) {
double[] vec = lines.get(0, x);
double x1 = vec[0],
y1 = vec[1],
x2 = vec[2],
y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Core.line(mRgba, start, end, new Scalar(255, 0, 0), 3);
}
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mRgba, bmp);
bitmap = bmp;
Drawable d = new BitmapDrawable(Resources.getSystem(), bitmap);
img2.setImageDrawable(d);
return cou;
}
You should modify one of the many answers regarding counting by OpenCV where kind of detection varies for every distinct case. You should make your own model for parking lines. Check some of these approaches/detectors Haar cascade classifier, latent SVM or Bag of Words.
You could also modify some of the answers that works for something else like this answer below for coins, only you should search for shape of parking lines instead of coins:
http://answers.opencv.org/question/36111/how-to-count-number-of-coins-in-android-opencv/

Issue with OpenCV Warp perspective and euclidean distance

My program uses a C# port of OpenCV to find the largest rectangle in the scene and warps the perspective. It almost works, but has one very annoying bug that I just can't figure out.
Using it on a square works perfectly, but it will only work correctly on rectangles in one orientation.
For example, if I give it an image of a note card, I get:
But if I rotate that same note card 90 degrees, I get this:
I know the code for finding rectangles is correct, so it has to be in the warp perspective process which looks like this:
public Mat GetPerspective (List<Point> corners, Mat sub)
{
//Pretty sure these four lines are the problem
double top = Math.Sqrt(Math.Pow(corners[0].x - corners[1].x, 2) + Math.Pow(corners[0].y - corners[1].y, 2));
double right = Math.Sqrt(Math.Pow(corners[1].x - corners[2].x, 2) + Math.Pow(corners[1].y - corners[2].y, 2));
double bottom = Math.Sqrt(Math.Pow(corners[2].x - corners[3].x, 2) + Math.Pow(corners[2].y - corners[3].y, 2));
double left = Math.Sqrt(Math.Pow(corners[3].x - corners[1].x, 2) + Math.Pow(corners[3].y - corners[1].y, 2));
Mat quad = Mat.zeros(new Size(Math.Max(top, bottom), Math.Max(left, right)), CvType.CV_8UC3);
List<Point> result_points = new List<Point>();
result_points.Add(new Point(0,0));
result_points.Add(new Point(quad.cols(), 0));
result_points.Add(new Point(quad.cols(), quad.rows()));
result_points.Add(new Point(0, quad.rows()));
Mat cornerPts = Converters.vector_Point2f_to_Mat(corners);
Mat resultPts = Converters.vector_Point2f_to_Mat(result_points);
Mat transformation = Imgproc.getPerspectiveTransform(cornerPts, resultPts);
Imgproc.warpPerspective(sub, quad, transformation, quad.size());
return quad;
}
try this
long[] tl = corners[0];
long[] tr = corners[1];
long[] br = corners[2];
long[] bl = corners[3];
float widthA = (float) Math.sqrt((Math.pow((br[0] - bl[0]), 2)) + Math.pow((br[1] - bl[1]), 2));
float widthB = (float) Math.sqrt((Math.pow((tr[0] - tl[0]), 2)) + Math.pow((tr[1] - tl[1]), 2));
int maxWidth = (int) Math.max((widthA), (widthB));
float heightA = (float) Math.sqrt((Math.pow((tr[0] - br[0]), 2)) + Math.pow((tr[1] - br[1]), 2));
float heightB = (float) Math.sqrt((Math.pow((tl[0] - bl[0]), 2)) + Math.pow((tl[1] - bl[1]), 2));
int maxHeight = (int) Math.max(heightA, heightB);
You just mispronounced the left point in your presentation
enter code here
public Mat GetPerspective(List<Point> corners, Mat sub)
{
//Pretty sure these four lines are the problem
double top = System.Math.Sqrt(System.Math.Pow(corners[0].x - corners[1].x, 2) + System.Math.Pow(corners[0].y - corners[1].y, 2));
double right = System.Math.Sqrt(System.Math.Pow(corners[1].x - corners[2].x, 2) + System.Math.Pow(corners[1].y - corners[2].y, 2));
double bottom = System.Math.Sqrt(System.Math.Pow(corners[2].x - corners[3].x, 2) + System.Math.Pow(corners[2].y - corners[3].y, 2));
double left = System.Math.Sqrt(System.Math.Pow(corners[3].x - corners[0].x, 2) + System.Math.Pow(corners[3].y - corners[0].y, 2));
Mat quad = Mat.zeros(new Size(System.Math.Max(top, bottom), System.Math.Max(left, right)), CvType.CV_8UC3);
List<Point> result_points = new List<Point>();
result_points.Add(new Point(0, 0));
result_points.Add(new Point(quad.cols(), 0));
result_points.Add(new Point(quad.cols(), quad.rows()));
result_points.Add(new Point(0, quad.rows()));
Mat cornerPts = OpenCVForUnity.UtilsModule.Converters.vector_Point2f_to_Mat(corners);
Mat resultPts = OpenCVForUnity.UtilsModule.Converters.vector_Point2f_to_Mat(result_points);
Mat transformation = Imgproc.getPerspectiveTransform(cornerPts, resultPts);
Imgproc.warpPerspective(sub, quad, transformation, quad.size());
return quad;
}
enter image description here

Hough circle detection accuracy very low

I am trying to detect a circular shape from an image which appears to have very good definition. I do realize that part of the circle is missing but from what I've read about the Hough transform it doesn't seem like that should cause the problem I'm experiencing.
Input:
Output:
Code:
// Read the image
Mat src = Highgui.imread("input.png");
// Convert it to gray
Mat src_gray = new Mat();
Imgproc.cvtColor(src, src_gray, Imgproc.COLOR_BGR2GRAY);
// Reduce the noise so we avoid false circle detection
//Imgproc.GaussianBlur( src_gray, src_gray, new Size(9, 9), 2, 2 );
Mat circles = new Mat();
/// Apply the Hough Transform to find the circles
Imgproc.HoughCircles(src_gray, circles, Imgproc.CV_HOUGH_GRADIENT, 1, 1, 160, 25, 0, 0);
// Draw the circles detected
for( int i = 0; i < circles.cols(); i++ ) {
double[] vCircle = circles.get(0, i);
Point center = new Point(vCircle[0], vCircle[1]);
int radius = (int) Math.round(vCircle[2]);
// circle center
Core.circle(src, center, 3, new Scalar(0, 255, 0), -1, 8, 0);
// circle outline
Core.circle(src, center, radius, new Scalar(0, 0, 255), 3, 8, 0);
}
// Save the visualized detection.
String filename = "output.png";
System.out.println(String.format("Writing %s", filename));
Highgui.imwrite(filename, src);
I have Gaussian blur commented out because (counter intuitively) it was greatly increasing the number of equally inaccurate circles found.
Is there anything wrong with my input image that would cause Hough to not work as well as I expect? Are my parameters way off?
EDIT: first answer brought up a good point about the min/max radius hint for Hough. I resisted adding those parameters as the example image in this post is just one of thousands of images all with varying radii from ~20 to almost infinity.
I've adjusted my RANSAC algorithm from this answer: Detect semi-circle in opencv
Idea:
choose randomly 3 points from your binary edge image
create a circle from those 3 points
test how "good" this circle is
if it is better than the previously best found circle in this image, remember
loop 1-4 until some number of iterations reached. then accept the best found circle.
remove that accepted circle from the image
repeat 1-6 until you have found all circles
problems:
at the moment you must know how many circles you want to find in the image
tested only for that one image.
c++ code
result:
code:
inline void getCircle(cv::Point2f& p1,cv::Point2f& p2,cv::Point2f& p3, cv::Point2f& center, float& radius)
{
float x1 = p1.x;
float x2 = p2.x;
float x3 = p3.x;
float y1 = p1.y;
float y2 = p2.y;
float y3 = p3.y;
// PLEASE CHECK FOR TYPOS IN THE FORMULA :)
center.x = (x1*x1+y1*y1)*(y2-y3) + (x2*x2+y2*y2)*(y3-y1) + (x3*x3+y3*y3)*(y1-y2);
center.x /= ( 2*(x1*(y2-y3) - y1*(x2-x3) + x2*y3 - x3*y2) );
center.y = (x1*x1 + y1*y1)*(x3-x2) + (x2*x2+y2*y2)*(x1-x3) + (x3*x3 + y3*y3)*(x2-x1);
center.y /= ( 2*(x1*(y2-y3) - y1*(x2-x3) + x2*y3 - x3*y2) );
radius = sqrt((center.x-x1)*(center.x-x1) + (center.y-y1)*(center.y-y1));
}
std::vector<cv::Point2f> getPointPositions(cv::Mat binaryImage)
{
std::vector<cv::Point2f> pointPositions;
for(unsigned int y=0; y<binaryImage.rows; ++y)
{
//unsigned char* rowPtr = binaryImage.ptr<unsigned char>(y);
for(unsigned int x=0; x<binaryImage.cols; ++x)
{
//if(rowPtr[x] > 0) pointPositions.push_back(cv::Point2i(x,y));
if(binaryImage.at<unsigned char>(y,x) > 0) pointPositions.push_back(cv::Point2f(x,y));
}
}
return pointPositions;
}
float verifyCircle(cv::Mat dt, cv::Point2f center, float radius, std::vector<cv::Point2f> & inlierSet)
{
unsigned int counter = 0;
unsigned int inlier = 0;
float minInlierDist = 2.0f;
float maxInlierDistMax = 100.0f;
float maxInlierDist = radius/25.0f;
if(maxInlierDist<minInlierDist) maxInlierDist = minInlierDist;
if(maxInlierDist>maxInlierDistMax) maxInlierDist = maxInlierDistMax;
// choose samples along the circle and count inlier percentage
for(float t =0; t<2*3.14159265359f; t+= 0.05f)
{
counter++;
float cX = radius*cos(t) + center.x;
float cY = radius*sin(t) + center.y;
if(cX < dt.cols)
if(cX >= 0)
if(cY < dt.rows)
if(cY >= 0)
if(dt.at<float>(cY,cX) < maxInlierDist)
{
inlier++;
inlierSet.push_back(cv::Point2f(cX,cY));
}
}
return (float)inlier/float(counter);
}
float evaluateCircle(cv::Mat dt, cv::Point2f center, float radius)
{
float completeDistance = 0.0f;
int counter = 0;
float maxDist = 1.0f; //TODO: this might depend on the size of the circle!
float minStep = 0.001f;
// choose samples along the circle and count inlier percentage
//HERE IS THE TRICK that no minimum/maximum circle is used, the number of generated points along the circle depends on the radius.
// if this is too slow for you (e.g. too many points created for each circle), increase the step parameter, but only by factor so that it still depends on the radius
// the parameter step depends on the circle size, otherwise small circles will create more inlier on the circle
float step = 2*3.14159265359f / (6.0f * radius);
if(step < minStep) step = minStep; // TODO: find a good value here.
//for(float t =0; t<2*3.14159265359f; t+= 0.05f) // this one which doesnt depend on the radius, is much worse!
for(float t =0; t<2*3.14159265359f; t+= step)
{
float cX = radius*cos(t) + center.x;
float cY = radius*sin(t) + center.y;
if(cX < dt.cols)
if(cX >= 0)
if(cY < dt.rows)
if(cY >= 0)
if(dt.at<float>(cY,cX) <= maxDist)
{
completeDistance += dt.at<float>(cY,cX);
counter++;
}
}
return counter;
}
int main()
{
//RANSAC
cv::Mat color = cv::imread("HoughCirclesAccuracy.png");
// convert to grayscale
cv::Mat gray;
cv::cvtColor(color, gray, CV_RGB2GRAY);
// get binary image
cv::Mat mask = gray > 0;
unsigned int numberOfCirclesToDetect = 2; // TODO: if unknown, you'll have to find some nice criteria to stop finding more (semi-) circles
for(unsigned int j=0; j<numberOfCirclesToDetect; ++j)
{
std::vector<cv::Point2f> edgePositions;
edgePositions = getPointPositions(mask);
std::cout << "number of edge positions: " << edgePositions.size() << std::endl;
// create distance transform to efficiently evaluate distance to nearest edge
cv::Mat dt;
cv::distanceTransform(255-mask, dt,CV_DIST_L1, 3);
unsigned int nIterations = 0;
cv::Point2f bestCircleCenter;
float bestCircleRadius;
//float bestCVal = FLT_MAX;
float bestCVal = -1;
//float minCircleRadius = 20.0f; // TODO: if you have some knowledge about your image you might be able to adjust the minimum circle radius parameter.
float minCircleRadius = 0.0f;
//TODO: implement some more intelligent ransac without fixed number of iterations
for(unsigned int i=0; i<2000; ++i)
{
//RANSAC: randomly choose 3 point and create a circle:
//TODO: choose randomly but more intelligent,
//so that it is more likely to choose three points of a circle.
//For example if there are many small circles, it is unlikely to randomly choose 3 points of the same circle.
unsigned int idx1 = rand()%edgePositions.size();
unsigned int idx2 = rand()%edgePositions.size();
unsigned int idx3 = rand()%edgePositions.size();
// we need 3 different samples:
if(idx1 == idx2) continue;
if(idx1 == idx3) continue;
if(idx3 == idx2) continue;
// create circle from 3 points:
cv::Point2f center; float radius;
getCircle(edgePositions[idx1],edgePositions[idx2],edgePositions[idx3],center,radius);
if(radius < minCircleRadius)continue;
//verify or falsify the circle by inlier counting:
//float cPerc = verifyCircle(dt,center,radius, inlierSet);
float cVal = evaluateCircle(dt,center,radius);
if(cVal > bestCVal)
{
bestCVal = cVal;
bestCircleRadius = radius;
bestCircleCenter = center;
}
++nIterations;
}
std::cout << "current best circle: " << bestCircleCenter << " with radius: " << bestCircleRadius << " and nInlier " << bestCVal << std::endl;
cv::circle(color,bestCircleCenter,bestCircleRadius,cv::Scalar(0,0,255));
//TODO: hold and save the detected circle.
//TODO: instead of overwriting the mask with a drawn circle it might be better to hold and ignore detected circles and dont count new circles which are too close to the old one.
// in this current version the chosen radius to overwrite the mask is fixed and might remove parts of other circles too!
// update mask: remove the detected circle!
cv::circle(mask,bestCircleCenter, bestCircleRadius, 0, 10); // here the radius is fixed which isnt so nice.
}
cv::namedWindow("edges"); cv::imshow("edges", mask);
cv::namedWindow("color"); cv::imshow("color", color);
cv::imwrite("detectedCircles.png", color);
cv::waitKey(-1);
return 0;
}
If you'd set minRadius and maxRadius paramaeters properly, it'd give you good results.
For your image, I tried following parameters.
method - CV_HOUGH_GRADIENT
minDist - 100
dp - 1
param1 - 80
param2 - 10
minRadius - 250
maxRadius - 300
I got the following output
Note: I tried this in C++.

Java OpenCV + Tesseract OCR "code" regocnition

I'm trying to automate a process where someone manually converts a code to a digital one.
Then I started reading about OCR. So I installed tesseract OCR and tried it on some images. It doesn't even detect something close to the code.
I figured after reading some questions on stackoverflow, that the images need some preprocessing like skewing the image to a horizontal one, which can been done by openCV for example.
Now my questions are:
What kind of preprocessing or other methods should be used in a case like the above image?
Secondly, can I rely on the output? Will it always work in cases like the above image?
I hope someone can help me!
I have decided to capture the whole card instead of the code only. By capturing the whole card it is possible to transform it to a plain perspective and then I could easily get the "code" region.
Also I learned a lot of things. Especially regarding speed. This function is slow on high resolution images. It can take up to 10 seconds with a size of 3264 x 1836.
What I did to speed things up, is re-sizing the input matrix by a factor of 1 / 4. Which makes it 4^2 times faster and gave me a minimal lose of precision. The next step is scaling the quadrangle which we found back to the normal size. So that we can transform the quadrangle to a plain perspective using the original source.
The code I created for detecting the largest area is heavily based on code I found on stackoverflow. Unfortunately they didn't work as expected for me, so I combined more code snippets and modified a lot.
This is what I got:
private static double angle(Point p1, Point p2, Point p0 ) {
double dx1 = p1.x - p0.x;
double dy1 = p1.y - p0.y;
double dx2 = p2.x - p0.x;
double dy2 = p2.y - p0.y;
return (dx1 * dx2 + dy1 * dy2) / Math.sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}
private static MatOfPoint find(Mat src) throws Exception {
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<>();
List<Mat> blurredChannel = new ArrayList<>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve;
double maxArea = 0;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = {c, 0};
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 10, 20, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1 ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY, (src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++)
{
double cosine = Math.abs(angle(curves.get(j % 4), curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.3) {
maxArea = area;
maxId = contours.indexOf(contour);
//contours.set(maxId, getHull(contour));
}
}
}
}
}
if (maxId >= 0) {
return contours.get(maxId);
//Imgproc.drawContours(src, contours, maxId, new Scalar(255, 0, 0, .8), 8);
}
return null;
}
You can call it like so:
MathOfPoint contour = find(src);
See this answer for quadrangle detection from a contour and transforming it to a plain perspective:
Java OpenCV deskewing a contour

Quaternions and drawing with glMultMatrix (OpenGL)

I got a problem again. Since a couple of days I try to write a camera in Java without a gimbal lock. For solving this I try to use Quaternions and glMultMatrix from OpenGL. I also use the library "LWJGL" especially the classes Matrix4f, Vector4f and Quaternions.
Here is the code which calculates the Quaternions:
int DX = Mouse.getDX(); //delta-mouse-movement
int DY = Mouse.getDY();
Vector4f axisY = new Vector4f();
axisY.set(0, 1, 0,DY);
Vector4f axisX = new Vector4f();
axisX.set(1, 0, 0, DX);
Quaternion q1 = new Quaternion();
q1.setFromAxisAngle(axisX);
Quaternion q2 = new Quaternion();
q2.setFromAxisAngle(axisY);
Quaternion.mul(q1, q2, q1);
Quaternion.mul(camera,q1,camera);
And whit this I convert the Quaternion into a matrix:
public Matrix4f quatToMatrix(Quaternion q){
double sqw = q.w*q.w;
double sqx = q.x*q.x;
double sqy = q.y*q.y;
double sqz = q.z*q.z;
Matrix4f m = new Matrix4f();
// invs (inverse square length) is only required if quaternion is not already normalised
double invs = 1 / (sqx + sqy + sqz + sqw);
m.m00 = (float)(( sqx - sqy - sqz + sqw)*invs) ; // since sqw + sqx + sqy + sqz =1/invs*invs
m.m11 = (float)((-sqx + sqy - sqz + sqw)*invs);
m.m22 =(float) ((-sqx - sqy + sqz + sqw)*invs);
double tmp1 = q.x*q.y;
double tmp2 = q.z*q.w;
m.m10 = (float) (2.0 * (tmp1 + tmp2)*invs);
m.m01 = (float) (2.0 * (tmp1 - tmp2)*invs) ;
tmp1 = q.x*q.z;
tmp2 = q.y*q.w;
m.m20 = (float)(2.0 * (tmp1 - tmp2)*invs) ;
m.m02 = (float)(2.0 * (tmp1 + tmp2)*invs) ;
tmp1 = q.y*q.z;
tmp2 = q.x*q.w;
m.m21 = (float)(2.0 * (tmp1 + tmp2)*invs) ;
m.m12 = (float)(2.0 * (tmp1 - tmp2)*invs) ;
return m;
}
A converted Quaternion looks for example like this:
-0.5191307 0.027321965 -0.85425806 0.0
0.048408303 -0.9969446 -0.061303165 0.0
-0.8533229 -0.07317754 0.51622194 0.0
0.0 0.0 0.0 1.0
After this I draw the scene with this code:
java.nio.FloatBuffer fb = BufferUtils.createFloatBuffer(32);
quatToMatrix(camera).store(fb);
GL11.glMultMatrix(fb);
drawPlayer();
My problem now is that the camera maybe doesn't move, or doesn't move enough, because I only see my player model and nothing else (There also is another cube in the scene I draw after the player model).
I don't know what exactly is wrong. Is it the drawing, the rotation, or the converting?
Please help me.
EDIT:
that is my OpenGL initialisation:
GL11.glMatrixMode(GL11.GL_PROJECTION);
GL11.glLoadIdentity();
GLU.gluPerspective(45.0f, ((float) setting.displayW() / (float) setting.displayH()), 0.1f,10000.0f);
GL11.glMatrixMode(GL11.GL_MODELVIEW);
GL11.glLoadIdentity();
GL11.glShadeModel(GL11.GL_SMOOTH);
GL11.glEnable(GL11.GL_DEPTH_TEST);
GL11.glDepthFunc(GL11.GL_LEQUAL);
GL11.glHint(GL11.GL_PERSPECTIVE_CORRECTION_HINT, GL11.GL_NICEST);
Any Idea what is wrong?
you've got some errors in your mouse movement to quaternion function (where do you make a quaternion of the X movement?). Besides that, we'd also need to see the rest of your drawing setup code (projection matrix, modelview initialization).

Categories

Resources