I'm trying to fade in/out an image using JOGL. I can render the image fine, however it is always rendered with full intensity. Below is the code:
private void render(GLAutoDrawable glad) {
GL2 gl = glad.getGL().getGL2();
// Prepares the rendering, things such as disabling depth, clearing the buffer bit etc
setUpGL(gl);
BufferedImage background = main.getCurrentState().getImage();
// Render the background, adapted from here: http://bit.ly/QjeusP
renderImage(gl, background, background.getWidth(), background.getHeight());
gl.glEnable(GL2.GL_BLEND);
gl.glBlendFunc(GL2.GL_SRC_ALPHA, GL2.GL_ONE_MINUS_SRC_ALPHA);
gl.glColor4f(0, 0, 0, alpha);
gl.glBegin(GL2.GL_QUADS);
gl.glVertex2f(0, background.getHeight());
gl.glVertex2f(background.getWidth(), background.getHeight());
gl.glVertex2f(background.getWidth(), 0);
gl.glVertex2f(0, 0);
gl.glEnd();
gl.glDisable(GL2.GL_BLEND);
}
the variable alpha is a float, which is incremented by 0.1 for each render, if it exceeds 1.0, it is decremented by 0.1 until it is less than 0, then incremented, and so on.
EDIT 1: Here is the code that updates the alpha value:
private void update() {
if (isTransparent) {
alpha += 0.1f;
if (alpha >= 1.0f) {
alpha = 1.0f;
isTransparent = false;
}
}
else {
alpha -= 0.1f;
if (alpha <= 0.0f) {
alpha = 0.0f;
isTransparent = true;
}
}
}
It may be a depth testing issue. I don't know what renderImage does, but if it just renders a screen-aligned textured quad at z = 0, then the quad you render over it will not get rendered when depth testing is enabled, as it is at the same depth as the texture quad. So try to glDisable(GL_DEPTH_TEST) while rendering your half-black quad (and glEnable it afterwards).
EDIT: Stupid thought, but could it be you're changing the alpha that fast so that you in the end only have a constant gray shade?
Related
I'm playing around with some basic OpenGL stuff and I'm trying to set up a simple square with lighting enabled, but the lighting is not correct so there is something wrong with my normals i guess.
Or is my understanding of normals totally wrong?
Here's my rendering code (btw I'm using lwjgl):
public class Renderer {
DisplayMode displayMode;
int i;
int width;
int height;
private boolean drawAxes = false;
private float rotation = 40.0f;
private float zoom = -20f;
// ----------- Variables added for Lighting Test -----------//
private FloatBuffer matSpecular;
private FloatBuffer lightPosition;
private FloatBuffer whiteLight;
private FloatBuffer lModelAmbient;
public Renderer(int width, int height) {
this.width = width;
this.height = height;
}
public static Renderer start() throws LWJGLException {
Renderer r = new Renderer(800, 600);
r.initContext();
r.run();
return r;
}
private void initContext() throws LWJGLException {
Display.setFullscreen(false);
DisplayMode d[] = Display.getAvailableDisplayModes();
for (int i = 0; i < d.length; i++) {
if (d[i].getWidth() == width && d[i].getHeight() == height && d[i].getBitsPerPixel() == 32) {
displayMode = d[i];
break;
}
}
Display.setDisplayMode(displayMode);
Display.create();
}
private void run() {
initGL();
while (!Display.isCloseRequested()) {
preRender();
render();
Display.update();
Display.sync(60);
}
Display.destroy();
}
private void initGL() {
GL11.glClearColor(0.0f, 0.0f, 0.0f, 0.0f); // Black Background
GL11.glClearDepth(1.0); // Depth Buffer Setup
GL11.glEnable(GL11.GL_DEPTH_TEST); // Enables Depth Testing
GL11.glDepthFunc(GL11.GL_LEQUAL); // The Type Of Depth Testing To Do
GL11.glMatrixMode(GL11.GL_PROJECTION); // Select The Projection Matrix
GL11.glLoadIdentity(); // Reset The Projection Matrix
// Calculate The Aspect Ratio Of The Window
GLU.gluPerspective(45.0f, (float) displayMode.getWidth() / (float) displayMode.getHeight(), 0.1f, 100.0f);
GL11.glMatrixMode(GL11.GL_MODELVIEW); // Select The Modelview Matrix
// Really Nice Perspective Calculations
GL11.glHint(GL11.GL_PERSPECTIVE_CORRECTION_HINT, GL11.GL_NICEST);
GL11.glPolygonMode(GL11.GL_FRONT_AND_BACK, GL11.GL_FILL);
initLightArrays();
glShadeModel(GL_SMOOTH);
glMaterial(GL_FRONT, GL_SPECULAR, matSpecular); // sets specular material color
glMaterialf(GL_FRONT, GL_SHININESS, 100.0f); // sets shininess
glLight(GL_LIGHT0, GL_POSITION, lightPosition); // sets light position
glLight(GL_LIGHT0, GL_SPECULAR, whiteLight); // sets specular light to white
glLight(GL_LIGHT0, GL_DIFFUSE, whiteLight); // sets diffuse light to white
glLightModel(GL_LIGHT_MODEL_AMBIENT, lModelAmbient); // global ambient light
glEnable(GL_LIGHTING); // enables lighting
glEnable(GL_LIGHT0); // enables light0
glEnable(GL_COLOR_MATERIAL); // enables opengl to use glColor3f to define material color
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE); // tell opengl glColor3f effects the ambient and diffuse properties of material
}
private void preRender() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
GL11.glTranslatef(0f, 0f, zoom);
GL11.glRotatef(-60f, 1f, 0f, 0f);
GL11.glRotatef(rotation, 0f, 0f, 1f);
}
private void render() {
FloatBuffer cBuffer = BufferUtils.createFloatBuffer(6*3);
float[] cArray = { 1f,1f,1f,
1f,1f,1f,
1f,1f,1f,
1f,1f,1f,
1f,1f,1f,
1f,1f,1f};
cBuffer.put(cArray);
cBuffer.flip();
FloatBuffer vBuffer = BufferUtils.createFloatBuffer(6*3);
float[] vArray = { 1f,1f,0f,
-1f,-1f,0,
1f,-1f,0,
1f,1f,0f,
-1f,1f,0,
-1f,-1f,0};
vBuffer.put(vArray);
vBuffer.flip();
FloatBuffer nBuffer = BufferUtils.createFloatBuffer(6*3);
float[] nArray = { 0f,0f,1f,
0f,0f,1f,
0f,0f,1f,
0f,0f,1f,
0f,0f,1f,
0f,0f,1f};
nBuffer.put(nArray);
nBuffer.flip();
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glColorPointer(3, 0, cBuffer);
glVertexPointer(3, 0, vBuffer);
glNormalPointer(3, nBuffer);
glDrawArrays(GL_TRIANGLES, 0, 6);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
if (drawAxes) {
drawAxes(6);
}
glTranslatef(0.0f, 0.0f, 3);
glColor3f(0.1f, 0.4f, 0.9f);
}
public static void main(String[] args) throws LWJGLException {
System.setProperty("org.lwjgl.opengl.Display.allowSoftwareOpenGL", "true");
Renderer.start();
}
You are setting your normal pointer wrong:
glColorPointer(3, 0, cBuffer);
glVertexPointer(3, 0, vBuffer);
glNormalPointer(3, nBuffer);
The fixed-function GL might always expects normals to be 3-dimensional vectors, henze the size parameter (which tells the GL how many values are there in every vector) is not present in glNormalPointer. The 3 you are setting here is the stride parameter, which specifies the byte offset between consecutive array elements. Now 3 does not make any sence, it will interpret the second normal as to beginning 3 bytes into the arry, which means it combines the last byte of your first normal's x component together with 3 bytes from your first normal's y component when it reads the second normal'x s component, and so on...
Since your array is tightly packed, you can use the shortcut 0 here, like you do with the other pointers.
However, you must be aware that all of that is deprecated since almost a decade in OpenGL, modern core versions of OpenGL do not support the fixed function pipeline at all. If you are learning OpenGL nowadays, I strongly recommend you to learn modern, shader-based GL instead.
Without seeing more of your code, it's very difficult to see exactly what's going wrong.
However, I do see one thing that could be a problem:
FloatBuffer vBuffer = BufferUtils.createFloatBuffer(6*3);
float[] vArray = { 1f,1f,0f,
1f,-1f,0,
-1f,-1f,0,
1f,1f,0f,
-1f,1f,0,
-1f,-1f,0};
vBuffer.put(vArray);
vBuffer.flip();
The winding order on your triangles are not the same. The first triangle winds clockwise, whereas the second triangle winds counter-clockwise. You'll need to reorder the vertices to make sure that they wind in the same direction. OpenGL usually prefers things to wind counter-clockwise, so if I were you, I'd flip the first triangle.
If you're still getting the problem after you've done this, then post the rest of your draw code, as what you're showing here doesn't give a lot of information.
I have problem, that balls that move too fast can fly through wall (walls are 4 pixels wide, and speed of ball occasionally is more than 400 pixels per second (which is more than 4 pixels per update assuming fps is 60)). I researched it on StackOverflow, but the solution for others is not suitable for me, as they are using rectangles, and i am using pixel collision. Here is method which returns if ball intersects with wall (method is in Ball class):
public boolean intersects(Wall w) {
BufferedImage im1 = new BufferedImage (size, size, BufferedImage.TYPE_INT_ARGB); // size is diameter of the ball
BufferedImage im2 = new BufferedImage (size, size, BufferedImage.TYPE_INT_ARGB);
Graphics2D g1 = im1.createGraphics();
Graphics2D g2 = im2.createGraphics();
g1.translate(-x + size/2, -y + size/2);
g2.translate(-x + size/2, -y + size/2);
render(g1);
w.render(g2);
g1.dispose();
g2.dispose();
for (int x = 0; x < im1.getWidth(); x++){
for (int y = 0; y < im1.getHeight(); y++){
Color c1 = new Color(im1.getRGB(x, y), true);
Color c2 = new Color(im2.getRGB(x, y), true);
if (c1.getAlpha() != 0 && c2.getAlpha() != 0){
return true;
}
}
}
return false;
}
Here is how ball is drawn:
public void render(Graphics2D g) {
color = new Color (Color.HSBtoRGB(hue, 0.5f, 0.5f));
g.setColor (color);
g.fillOval((int)(x-size/2), (int) (y-size/2), size, size);
}
The wall is simply defined as 2 points, and here is how wall is drawn:
public void render(Graphics2D g2) {
g2.setColor(new Color(r, g, b));
g2.setStroke(new BasicStroke(width)); //width = 4
g2.draw(new Line2D.Float(p1.x, p1.y, p2.x, p2.y));
}
I have a couple of quick ideas that you might want to attempt.
Why not check the endpoints against the dimensions of the wall (some simple algebra and I think what you might already be trying to do). If you need help with the algebra involved I'll include a link below. Basically just record the starting point of the ball (prior to moving), and on update check the old location versus the estimated location, and run a function to see if a collision occurs. This will become tricky however if you want realistic physics.
https://gamedev.stackexchange.com/questions/26004/how-to-detect-2d-line-on-line-collision
In the above link, just assume your walls are one line, and the previous coordinates and the expected next coordinates of the ball form the other line. This method works very well if you only have the ball moving in straight lines.
Your only alternative might be to have two simultaneous models running (basically a visual space which you have in showing the ball and walls, and a virtual setup checking the physics behind what you are showing.
What I want to have is the Bitmap Font to change in size accordingly when changing screen sizes. What I mean is on my computer, the font appears rather large, but on my phone, it is a little font that is harder to read. I could change the size, but I want it to look similar on all screens, instead of having it large on one screen and smaller on another. Here is my code to see what I have to work with:
public void render() {
//score system
scoreFont.setColor(1.0f, 1.0f, 1.0f, 1.0f);
scoreBatch.begin();
scoreFont.draw(scoreBatch, Long.toString(getScore()), 10, Gdx.graphics.getHeight() - 10);
scoreFont.setScale(3, 3);
scoreBatch.end();
}
public void resize(int width, int height) {
camera.viewportWidth = 450;
camera.viewportHeight = 250;
camera.update();
stage.setViewport(450, 250, true);
stage.getCamera().translate(-stage.getGutterWidth(), -stage.getGutterHeight(), 0);
}
Think of it this way, you always want to have the same ratio.
For example:
500/3 = 450/x
x is the new size of your text. So you have to do some cross multiplying.
500x = 1350
1350รท500 = x
So now to do this programmatically.
public void resizeText(float width, float currentSize, float currentWidth){
//currentWidth/currentSize = width/x
a = width * currentSize;//450 * 3 in example above
b = a/currentWidth;
return b;//returns the x or the new size that your text should be
}
Also you said that it needs to change depending on of the size is over a certain amount.
So here's a little thing I've devised
ApplicationType appType = Gdx.app.getType();
if (appType == ApplicationType.Android || appType == ApplicationType.iOS) {
screenFont.setScale(your number)
} else { screen font.setScale(otherNum)} //if its a desktop
Scale by Gdx.graphics.getDensity()
I'm playing around with creating a small voxel based project with LWJGL. Part of the project is loading small chunks of landscape around the player as they move. The loading part of this works okay, but I ran into an issue where as I walked along the +X axis, the chunks of landscape moving the same distance along the -X axis would load. This would also happen for the Z axis.
I got curious, so I tried reversing the X and Z axis rendering direction on the chunks, which seemed to fix the issue. However, I also decided to render the axis as lines as well, and verify that everything was now drawing correctly, with which I generated the following image:
(I can't embed images apparently, so link: http://i.imgur.com/y5hO1Im.png)
In this image, the red, blue and green lines are drawn along the negative axes, whereas the purple, yellow and cyan lines are drawn along the positive axes. What's really weird about this is that the image is showing that the camera is in the +X and +Z range, but internally, the position vector of the camera is in the -X and -Z range. This would make sense as to why the chunks were loading on the opposite axis, as if the camera was rendering on +X but was internally at a position of -X, then the -X chunks would be loaded instead.
So I'm not sure what's going on here anymore. I'm sure there's a small setting or incorrect positive/negative that I'm missing, but I just can't seem to find anything. So I guess my question is, is the camera rendering correctly with the internal position? If so, do I need to just reverse everything that I render? If not, is there something clearly visible in the camera that is messing up the rendering?
Some snippets of relevant code, trying to not to overflow the post with code blocks
Camera.java
public class Camera {
// Camera position
private Vector3f position = new Vector3f(x, y, z);
// Camera view properties
private float pitch = 1f, yaw = 0.0f, roll = 0.0f;
// Mouse sensitivity
private float mouseSensitivity = 0.25f;
// Used to change the yaw of the camera
public void yaw(float amount) {
this.yaw += (amount * this.mouseSensitivity);
}
// Used to change the pitch of the camera
public void pitch(float amount) {
this.pitch += (amount * this.mouseSensitivity);
}
// Used to change the roll of the camera
public void roll(float amount) {
this.roll += amount;
}
// Moves the camera forward relative to its current rotation (yaw)
public void walkForward(float distance) {
position.x -= distance * (float)Math.sin(Math.toRadians(yaw));
position.z += distance * (float)Math.cos(Math.toRadians(yaw));
}
// Moves the camera backward relative to its current rotation (yaw)
public void walkBackwards(float distance) {
position.x += distance * (float)Math.sin(Math.toRadians(yaw));
position.z -= distance * (float)Math.cos(Math.toRadians(yaw));
}
// Strafes the camera left relative to its current rotation (yaw)
public void strafeLeft(float distance) {
position.x -= distance * (float)Math.sin(Math.toRadians(yaw-90));
position.z += distance* (float)Math.cos(Math.toRadians(yaw-90));
}
// Strafes the camera right relative to its current rotation (yaw)
public void strafeRight(float distance) {
position.x -= distance * (float)Math.sin(Math.toRadians(yaw+90));
position.z += distance * (float)Math.cos(Math.toRadians(yaw+90));
}
// Translates and rotates the matrix so that it looks through the camera
public void lookThrough() {
GL11.glRotatef(pitch, 1.0f, 0.0f, 0.0f);
GL11.glRotatef(yaw, 0.0f, 1.0f, 0.0f);
GL11.glTranslatef(position.x, position.y, position.z);
}
}
Main.java render code
private void render() {
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT | GL11.GL_DEPTH_BUFFER_BIT);
GL11.glLoadIdentity();
// Set the view matrix to the player's view
this.player.lookThrough();
// Render the visible chunks
this.chunkManager.render();
// Draw axis
GL11.glBegin(GL11.GL_LINES);
// X Axis
GL11.glColor3f(1, 0, 0);
GL11.glVertex3f(-100, 0, 0);
GL11.glVertex3f(0, 0, 0);
GL11.glColor3f(1, 1, 0);
GL11.glVertex3f(0, 0, 0);
GL11.glVertex3f(100, 0, 0);
// Y Axis
GL11.glColor3f(0, 1, 0);
GL11.glVertex3f(0, -100, 0);
GL11.glVertex3f(0, 0, 0);
GL11.glColor3f(0, 1, 1);
GL11.glVertex3f(0, 0, 0);
GL11.glVertex3f(0, 100, 0);
// Z Axis
GL11.glColor3f(0, 0, 1);
GL11.glVertex3f(0, 0, -100);
GL11.glVertex3f(0, 0, 0);
GL11.glColor3f(1, 0, 1);
GL11.glVertex3f(0, 0, 0);
GL11.glVertex3f(0, 0, 100);
GL11.glEnd();
// Render the origin
this.origin.render();
}
chunkManager.render() just iterates through each of the loaded chunks and calls .render() on them, which in turn creates a giant solid cube that is rendered at the origin of the chunk.
More code can be provided if needed.
Replace
GL11.glTranslatef(position.x, position.y, position.z);
with
GL11.glTranslatef(-position.x, -position.y, -position.z);
Think about it, you want to be translating the world to the inverse of where the camera is so that that 0,0,0 is where the camera is.
Using some math, i created the following java-function, to input a Bitmap, and have it crop out a centered square in which a circle is cropped out again with a black border around it.
The rest of the square should be transparent.
Additionatly, there is a transparent distance to the sides to not damage the preview when sending the image via Messengers.
The code of my function is as following:
public static Bitmap edit_image(Bitmap src,boolean makeborder) {
int width = src.getWidth();
int height = src.getHeight();
int A, R, G, B;
int pixel;
int middlex = width/2;
int middley = height/2;
int seitenlaenge,startx,starty;
if(width>height)
{
seitenlaenge=height;
starty=0;
startx = middlex - (seitenlaenge/2);
}
else
{
seitenlaenge=width;
startx=0;
starty = middley - (seitenlaenge/2);
}
int kreisradius = seitenlaenge/2;
int mittx = startx + kreisradius;
int mitty = starty + kreisradius;
int border=2;
int seitenabstand=55;
Bitmap bmOut = Bitmap.createBitmap(seitenlaenge+seitenabstand, seitenlaenge+seitenabstand, Bitmap.Config.ARGB_8888);
bmOut.setHasAlpha(true);
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
int distzumitte = (int) (Math.pow(mittx-x,2) + Math.pow(mitty-y,2)); // (Xm-Xp)^2 + (Ym-Yp)^2 = dist^2
distzumitte = (int) Math.sqrt(distzumitte);
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
R = (int)Color.red(pixel);
G = (int)Color.green(pixel);
B = (int)Color.blue(pixel);
int color = Color.argb(A, R, G, B);
int afterx=x-startx+(seitenabstand/2);
int aftery=y-starty+(seitenabstand/2);
if(x < startx || y < starty || afterx>=seitenlaenge+seitenabstand || aftery>=seitenlaenge+seitenabstand) //seitenrand
{
continue;
}
else if(distzumitte > kreisradius)
{
color=0x00FFFFFF;
}
else if(distzumitte > kreisradius-border && makeborder) //border
{
color = Color.argb(A, 0, 0, 0);
}
bmOut.setPixel(afterx, aftery, color);
}
}
return bmOut;
}
This function works fine, but there are some problems occuring that i wasn't able to resolve yet.
The quality of the image is decreased significantly
The border is not really round, but appears to be flat at the edges of the image (on some devices?!)
I'd appreciate any help regarding that problems. I got to admit that i'm not the best in math and there should probably be a better formula to ceate the border.
your source code is hard to read, since it is a mix of German and English in the variable names. Additionally you don't say which image library you use, so we don't exactly know where the classes Bitmap and Color come from.
Anyway, it is very obvious, that you are operating only on a Bitmap. Bitmap means the whole image is stored in the RAM pixel by pixel. There is no lossy compression. I don't see anything in your source code, that can affect the quality of the image.
It is very likely, that the answer is in the Code that you don't show us. Additionally, what you describe (botrh of the problems) sounds like a very typical low quality JPEG compression. I am sure, somewhere after you call you function, you convert/save the image to a JPEG. Try to do that at that position to BMP, TIFF or PNG and see that the error disappears magically. Maybe you can also set the quality level of the JPEG somewhere to avoid that.
To make it easier for others (maybe) also to find a good answer, please allow me to translate your code to English:
public static Bitmap edit_image(Bitmap src,boolean makeborder) {
int width = src.getWidth();
int height = src.getHeight();
int A, R, G, B;
int pixel;
int middlex = width/2;
int middley = height/2;
int sideLength,startx,starty;
if(width>height)
{
sideLength=height;
starty=0;
startx = middlex - (sideLength/2);
}
else
{
sideLength=width;
startx=0;
starty = middley - (sideLength/2);
}
int circleRadius = sideLength/2;
int middleX = startx + circleRadius;
int middleY = starty + circleRadius;
int border=2;
int sideDistance=55;
Bitmap bmOut = Bitmap.createBitmap(sideLength+sideDistance, sideLength+sideDistance, Bitmap.Config.ARGB_8888);
bmOut.setHasAlpha(true);
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
int distanceToMiddle = (int) (Math.pow(middleX-x,2) + Math.pow(middleY-y,2)); // (Xm-Xp)^2 + (Ym-Yp)^2 = dist^2
distanceToMiddle = (int) Math.sqrt(distanceToMiddle);
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
R = (int)Color.red(pixel);
G = (int)Color.green(pixel);
B = (int)Color.blue(pixel);
int color = Color.argb(A, R, G, B);
int afterx=x-startx+(sideDistance/2);
int aftery=y-starty+(sideDistance/2);
if(x < startx || y < starty || afterx>=sideLength+sideDistance || aftery>=sideLength+sideDistance) //margin
{
continue;
}
else if(distanceToMiddle > circleRadius)
{
color=0x00FFFFFF;
}
else if(distanceToMiddle > circleRadius-border && makeborder) //border
{
color = Color.argb(A, 0, 0, 0);
}
bmOut.setPixel(afterx, aftery, color);
}
}
return bmOut;
}
I think that you need to check PorterDuffXferMode.
You will find some technical informations about compositing images modes HERE.
There is some good example of making bitmap with rounded edges HERE. You just need to tweak a bit source code and you're ready to go...
Hope it will help.
Regarding the quality I can't see anything wrong with your method. Running the code with Java Swing no quality is lost. The only problem is that the image has aliased edges.
The aliasing problem will tend to disappear as the screen resolution increases and would be more noticeable for lower resolutions. This might explain why you see it in some devices only.The same problem applies to your border but in that case it would be more noticable since the color is single black.
Your algorithm defines a square area of the original image. To find the square it starts from the image's center and expand to either the width or the height of the image whichever is smaller. I am referring to this area as the square.
The aliasing is caused by your code that sets the colors (I am using pseudo-code):
if ( outOfSquare() ) {
continue; // case 1: this works but you depend upon the new image' s default pixel value i.e. transparent black
} else if ( insideSquare() && ! insideCircle() ) {
color = 0x00FFFFFF; // case 2: transparent white. <- Redundant
} else if ( insideBorder() ) {
color = Color.argb(A, 0, 0, 0); // case 3: Black color using the transparency of the original image.
} else { // inside the inner circle
// case 4: leave image color
}
Some notes about the code:
Case 1 depends upon the default pixel value of the original image i.e. transparent black. It works but better to set it explicitly
Case 2 is redundant. Handle it in the same way you handle case 1. We are only interested in what happens inside the circle.
Case 3 (when you draw the border) is not clear what it expects. Using the alpha of the original image has the potential of messing up your new image if it happens that the original alpha varies along the circle's edges. So this is clearly wrong and depending on the image, can potentially be another cause of your problems.
Case 4 is ok.
Now at your circle's periphery the following color transitions take place:
If border is not used: full transparency -> full image color (case 2 and 4 in the pseudocode)
If border is used: full transparency -> full black -> full image color (cases 2, 3 and 4)
To achieve a better quality at the edges you need to introduce some intermediate states that would make the transitions smoother (the new transitions are shown in italics):
Border is not used: full transparency -> partial transparency with image color -> full image color
Border is used: full transparency -> partial transparency of Black color -> full Black color -> partial transparency of Black color + Image color (i.e. blending) -> Full image color
I hope that helps