I have a big problem. I try to create a neural network and want to train it with a backpropagation algorithm. I found this tutorial here http://mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/ and tried to recreate it in Java. And when I use the training data he uses, I get the same results as him.
Without backpropagation my TotalError is nearly the same as his. And when I use the back backpropagation 10 000 time like him, than I get the nearly the same error. But he uses 2 Input Neurons, 2 Hidden Neurons and 2 Outputs but I'd like to use this neural network for OCR, so I need definitely more Neurons. But if I use for example 49 Input Neurons, 49 Hidden Neurons and 2 Output Neurons, It takes very long to change the weights to get a small error. (I believe it takes forever.....). I have a learningRate of 0.5. In the constructor of my network, I generate the neurons and give them the same training data like the one in the tutorial and for testing it with more neurons, I gave them random weights, inputs and targets. So can't I use this for many Neurons, does it takes just very long or is something wrong with my code ? Shall I increase the learning rate, the bias or the start weight?
Hopefully you can help me.
package de.Marcel.NeuralNetwork;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Random;
public class Network {
private ArrayList<Neuron> inputUnit, hiddenUnit, outputUnit;
private double[] inHiWeigth, hiOutWeigth;
private double hiddenBias, outputBias;
private double learningRate;
public Network(double learningRate) {
this.inputUnit = new ArrayList<Neuron>();
this.hiddenUnit = new ArrayList<Neuron>();
this.outputUnit = new ArrayList<Neuron>();
this.learningRate = learningRate;
generateNeurons(2,2,2);
calculateTotalNetInputForHiddenUnit();
calculateTotalNetInputForOutputUnit();
}
public double calcuteLateTotalError () {
double e = 0;
for(Neuron n : outputUnit) {
e += 0.5 * Math.pow(Math.max(n.getTarget(), n.getOutput()) - Math.min(n.getTarget(), n.getOutput()), 2.0);
}
return e;
}
private void generateNeurons(int input, int hidden, int output) {
// generate inputNeurons
for (int i = 0; i < input; i++) {
Neuron neuron = new Neuron();
// for testing give each neuron an input
if(i == 0) {
neuron.setInput(0.05d);
} else if(i == 1) {
neuron.setOutput(0.10d);
}
inputUnit.add(neuron);
}
// generate hiddenNeurons
for (int i = 0; i < hidden; i++) {
Neuron neuron = new Neuron();
hiddenUnit.add(neuron);
}
// generate outputNeurons
for (int i = 0; i < output; i++) {
Neuron neuron = new Neuron();
if(i == 0) {
neuron.setTarget(0.01d);
} else if(i == 1) {
neuron.setTarget(0.99d);
}
outputUnit.add(neuron);
}
// generate Bias
hiddenBias = 0.35;
outputBias = 0.6;
// generate connections
double startWeigth = 0.15;
// generate inHiWeigths
inHiWeigth = new double[inputUnit.size() * hiddenUnit.size()];
for (int i = 0; i < inputUnit.size() * hiddenUnit.size(); i += hiddenUnit.size()) {
for (int x = 0; x < hiddenUnit.size(); x++) {
int z = i + x;
inHiWeigth[z] = round(startWeigth, 2, BigDecimal.ROUND_HALF_UP);
startWeigth += 0.05;
}
}
// generate hiOutWeigths
hiOutWeigth = new double[hiddenUnit.size() * outputUnit.size()];
startWeigth += 0.05;
for (int i = 0; i < hiddenUnit.size() * outputUnit.size(); i += outputUnit.size()) {
for (int x = 0; x < outputUnit.size(); x++) {
int z = i + x;
hiOutWeigth[z] = round(startWeigth, 2, BigDecimal.ROUND_HALF_UP);
startWeigth += 0.05;
}
}
}
private double round(double unrounded, int precision, int roundingMode)
{
BigDecimal bd = new BigDecimal(unrounded);
BigDecimal rounded = bd.setScale(precision, roundingMode);
return rounded.doubleValue();
}
private void calculateTotalNetInputForHiddenUnit() {
// calculate totalnetinput for each hidden neuron
for (int s = 0; s < hiddenUnit.size(); s++) {
double net = 0;
int x = (inHiWeigth.length / inputUnit.size());
// calculate toAdd
for (int i = 0; i < x; i++) {
int v = i + s * x;
double weigth = inHiWeigth[v];
double toAdd = weigth * inputUnit.get(i).getInput();
net += toAdd;
}
// add bias
net += hiddenBias * 1;
net = net *-1;
double output = (1.0 / (1.0 + (double)Math.exp(net)));
hiddenUnit.get(s).setOutput(output);
}
}
private void calculateTotalNetInputForOutputUnit() {
// calculate totalnetinput for each hidden neuron
for (int s = 0; s < outputUnit.size(); s++) {
double net = 0;
int x = (hiOutWeigth.length / hiddenUnit.size());
// calculate toAdd
for (int i = 0; i < x; i++) {
int v = i + s * x;
double weigth = hiOutWeigth[v];
double outputOfH = hiddenUnit.get(s).getOutput();
double toAdd = weigth * outputOfH;
net += toAdd;
}
// add bias
net += outputBias * 1;
net = net *-1;
double output = (double) (1.0 / (1.0 + Math.exp(net)));
outputUnit.get(s).setOutput(output);
}
}
private void backPropagate() {
// calculate ouputNeuron weigthChanges
double[] oldWeigthsHiOut = hiOutWeigth;
double[] newWeights = new double[hiOutWeigth.length];
for (int i = 0; i < hiddenUnit.size(); i += 1) {
double together = 0;
double[] newOuts = new double[hiddenUnit.size()];
for (int x = 0; x < outputUnit.size(); x++) {
int z = x * hiddenUnit.size() + i;
double weigth = oldWeigthsHiOut[z];
double target = outputUnit.get(x).getTarget();
double output = outputUnit.get(x).getOutput();
double totalErrorChangeRespectOutput = -(target - output);
double partialDerivativeLogisticFunction = output * (1 - output);
double totalNetInputChangeWithRespect = hiddenUnit.get(x).getOutput();
double puttedAllTogether = totalErrorChangeRespectOutput * partialDerivativeLogisticFunction
* totalNetInputChangeWithRespect;
double weigthChange = weigth - learningRate * puttedAllTogether;
// set new weigth
newWeights[z] = weigthChange;
together += (totalErrorChangeRespectOutput * partialDerivativeLogisticFunction * weigth);
double out = hiddenUnit.get(x).getOutput();
newOuts[x] = out * (1.0 - out);
}
for (int t = 0; t < newOuts.length; t++) {
inHiWeigth[t + i] = (double) (inHiWeigth[t + i] - learningRate * (newOuts[t] * together * inputUnit.get(t).getInput()));
}
hiOutWeigth = newWeights;
}
}
}
And my Neuron Class:
package de.Marcel.NeuralNetwork;
public class Neuron {
private double input, output;
private double target;
public Neuron () {
}
public void setTarget(double target) {
this.target = target;
}
public void setInput (double input) {
this.input = input;
}
public void setOutput(double output) {
this.output = output;
}
public double getInput() {
return input;
}
public double getOutput() {
return output;
}
public double getTarget() {
return target;
}
}
Think about it: you have 10,000 propagations through 49->49->2 neurons. Between the input layer and the hidden layer, you have 49 * 49 links to propagate through, so parts of your code are being executed about 24 million times (10,000 * 49 * 49). That is going to take time. You could try 100 propogations, and see how long it takes, just to give you an idea.
There are a few things that can be done to increase performance, like using a plain array instead of an ArrayList, but this is a better topic for the Code Review site. Also, don't expect this to give drastic improvements.
Your back propagation code has complexity of O(h*o + h^2) * 10000, where h is the number of hidden neurons and o is the number of output neurons. Here's why.
You have a loop that executes for all of your hidden neurons...
for (int i = 0; i < hiddenUnit.size(); i += 1) {
... containing another loop that executes for all the output neurons...
for (int x = 0; x < outputUnit.size(); x++) {
... and an additional inner loop that executes again for all the hidden neurons...
double[] newOuts = new double[hiddenUnit.size()];
for (int t = 0; t < newOuts.length; t++) {
... and you execute all of that ten thousand times. Add on top of this O(i + h + o) [initial object creation] + O(i*h + o*h) [initial weights] + O(h*i) [calculate net inputs] + O(h*o) [calculate net outputs].
No wonder it's taking forever; your code is littered with nested loops. If you want it to go faster, factor these out - for example, combine object creation and initialization - or reduce the number of neurons. But significantly cutting the number of back propagation calls is the best way to make this run faster.
Related
I can't seem to find what's wrong with my neural net, despite verifying my net based on this example, which suggests my backprop and forward prop is working fine. However, after training on XOR my net returns around 0.5 for the output regardless of the input. In other words, the net seems to be minimizing the error as best it can without seeing any correlation between the input and the output. Since a single iteration of back propagation seems to be working fine, my instinct would suggest the problem lies somehow in the iterations that follow. However, there isn't any obvious problem that would cause this, leaving me quite stumped.
I've looked at other threads where similar problems have arisen, but it seems most of the time their error is either extremely niche to the way they set up their net, or their parameters such as learning rate or epochs is really off. Is anyone familiar with a case like this?
public class Net
{
int[] sizes;
double LEARNING_RATE;
double[][][] weights;
double[][] bias;
Random rand = new Random(); //53489085
public Net(int[] sizes_, double LEARNING_RATE_)
{
LEARNING_RATE = LEARNING_RATE_;
sizes = sizes_;
int numInputs = sizes[0];
double range = 1.0 / Math.sqrt(numInputs);
bias = new double[sizes.length - 1][];
weights = new double[sizes.length - 1][][];
for(int w_layer = 0; w_layer < weights.length; w_layer++)
{
bias[w_layer] = new double[sizes[w_layer+1]];
weights[w_layer] = new double[sizes[w_layer+1]][sizes[w_layer]];
for(int j = 0; j < weights[w_layer].length; j++)
{
bias[w_layer][j] = 2*range*rand.nextDouble() - range;
for(int i = 0; i < weights[w_layer][0].length; i++)
{
weights[w_layer][j][i] = 2*range*rand.nextDouble() - range;
}
}
}
}
public double[] evaluate(double[] image_vector)
{
return forwardPass(image_vector)[sizes.length-1];
}
public double totalError(double[][] expec, double[][] actual)
{
double sum = 0;
for(int i = 0; i < expec.length; i++)
{
sum += error(expec[i], evaluate(actual[i]));
}
return sum / expec.length;
}
private double error(double[] expec, double[] actual)
{
double sum = 0;
for(int i = 0; i < expec.length; i++)
{
double del = expec[i] - actual[i];
sum += 0.5 * del * del;
}
return sum;
}
public void backpropagate(double[][] image_vector, double[][] outputs)
{
double[][][] deltaWeights = new double[weights.length][][];
double[][] deltaBias = new double[weights.length][];
for(int w = 0; w < weights.length; w++)
{
deltaBias[w] = new double[bias[w].length];
deltaWeights[w] = new double[weights[w].length][];
for(int j = 0; j < weights[w].length; j++)
{
deltaWeights[w][j] = new double[weights[w][j].length];
}
}
for(int batch = 0; batch < image_vector.length; batch++)
{
double[][] neuronVals = forwardPass(image_vector[batch]);
/* OUTPUT DELTAS */
int w_layer = weights.length-1;
double[] deltas = new double[weights[w_layer].length];
for(int j = 0; j < weights[w_layer].length; j++)
{
double actual = neuronVals[w_layer + 1][j];
double expec = outputs[batch][j];
double deltaErr = actual - expec;
double deltaSig = actual * (1 - actual);
double delta = deltaErr * deltaSig;
deltas[j] = delta;
deltaBias[w_layer][j] += delta;
for(int i = 0; i < weights[w_layer][0].length; i++)
{
deltaWeights[w_layer][j][i] += delta * neuronVals[w_layer][i];
}
}
w_layer--;
/* REST OF THE DELTAS */
while(w_layer >= 0)
{
double[] nextDeltas = new double[weights[w_layer].length];
for(int j = 0; j < weights[w_layer].length; j++)
{
double outNeur = neuronVals[w_layer+1][j];
double deltaSig = outNeur * (1 - outNeur);
double sum = 0;
for(int i = 0; i < weights[w_layer+1].length; i++)
{
sum += weights[w_layer+1][i][j] * deltas[i];
}
double delta = sum * deltaSig;
nextDeltas[j] = delta;
deltaBias[w_layer][j] += delta;
for(int i = 0; i < weights[w_layer][0].length; i++)
{
deltaWeights[w_layer][j][i] += delta * neuronVals[w_layer][i];
}
}
deltas = nextDeltas;
w_layer--;
}
}
for(int w_layer = 0; w_layer < weights.length; w_layer++)
{
for(int j = 0; j < weights[w_layer].length; j++)
{
deltaBias[w_layer][j] /= (double) image_vector.length;
bias[w_layer][j] -= LEARNING_RATE * deltaBias[w_layer][j];
for(int i = 0; i < weights[w_layer][j].length; i++)
{
deltaWeights[w_layer][j][i] /= (double) image_vector.length; // average of batches
weights[w_layer][j][i] -= LEARNING_RATE * deltaWeights[w_layer][j][i];
}
}
}
}
public double[][] forwardPass(double[] image_vector)
{
double[][] outputs = new double[sizes.length][];
double[] inputs = image_vector;
for(int w = 0; w < weights.length; w++)
{
outputs[w] = inputs;
double[] output = new double[weights[w].length];
for(int j = 0; j < weights[w].length; j++)
{
output[j] = bias[w][j];
for(int i = 0; i < weights[w][j].length; i++)
{
output[j] += weights[w][j][i] * inputs[i];
}
output[j] = sigmoid(output[j]);
}
inputs = output;
}
outputs[outputs.length-1] = inputs.clone();
return outputs;
}
static public double sigmoid(double val)
{
return 1.0 / (1.0 + Math.exp(-val));
}
}
And my XOR class looks like this. It's very unlikely that the error lies in this part given it's simplicity, but I figured it couldn't hurt to post just in case I have some fundamental misunderstanding to how XOR works. My net is set up to take examples in batches, but as you can see below for this particular example I send it batches of one, or effectively not using batches.
public class SingleLayer {
static int numEpochs = 10000;
static double LEARNING_RATE = 0.001;
static int[] sizes = new int[] {2, 2, 1};
public static void main(String[] args)
{
System.out.println("Initializing randomly generate neural net...");
Net n = new Net(sizes, LEARNING_RATE);
System.out.println("Complete!");
System.out.println("Loading dataset...");
double[][] inputs = new double[4][2];
double[][] outputs = new double[4][1];
inputs[0] = new double[] {1, 1};
outputs[0] = new double[] {0};
inputs[1] = new double[] {1, 0};
outputs[1] = new double[] {1};
inputs[2] = new double[] {0, 1};
outputs[2] = new double[] {1};
inputs[3] = new double[] {0, 0};
outputs[3] = new double[] {0};
System.out.println("Complete!");
System.out.println("STARTING ERROR: " + n.totalError(outputs, inputs));
for(int epoch = 0; epoch < numEpochs; epoch++)
{
double[][] in = new double[1][2];
double[][] out = new double[1][1];
int num = (int)(Math.random()*inputs.length);
in[0] = inputs[num];
out[0] = outputs[num];
n.backpropagate(inputs, outputs);
System.out.println("ERROR: " + n.totalError(out, in));
}
System.out.println("Prediction After Training: " + n.evaluate(inputs[0])[0] + " Expected: " + outputs[0][0]);
System.out.println("Prediction After Training: " + n.evaluate(inputs[1])[0] + " Expected: " + outputs[1][0]);
System.out.println("Prediction After Training: " + n.evaluate(inputs[2])[0] + " Expected: " + outputs[2][0]);
System.out.println("Prediction After Training: " + n.evaluate(inputs[3])[0] + " Expected: " + outputs[3][0]);
}
}
Can anyone provide some insight as to what may be wrong? My parameters are pretty well defined and I've followed all the suggestions for how the weights should be initialized and what the learning rate should be etc. Thanks!
You're only presenting the first 3 inputs to your neural network, because the following line is wrong:
int num = (int)(Math.random() * 3);
change that to
int num = (int)(Math.random() * inputs.length);
to use all 4 possible inputs.
I figured it out. I wasn't running enough epochs. That seems a little silly to me but this visualization revealed to me that the net lingers on answers ~0.5 for a long time before reducing the error to less than 0.00001.
I'm trying to implement a simple neural network.
I know there is a lot of library already available out there, that is not the point.
My network has only 3 layers:
one input layer
one hidden layer
one output layer
The output layer has 8 neuron representing each a different class.
I've understand how to implement the feedfoward algorythm but i'm really struggling for the backpropagation one.
here is what i've come up with so far :
private void backPropagation(List<List<Input>> trainingData)
{
List<Input> trainingSet = new ArrayList<Input>();
for (int row = 0; row < trainingData.size(); row++) {
trainingSet = trainingData.get(row);
//we start by getting the output of the network
List<Double> outputs = feedFoward(trainingSet);
//Im using the Iris dataset, so here the desiredOutput is
//the species where
// 1 : setosa
// 2 : versicolor
// 3 : virginica
double desiredOutput = getDesiredOutputFromTrainingSet(trainingSet);
//We are getting the output neuron that fired the highest result
//like if we have
//Ouput layer :
//Neuron 1 --> 0.001221513
//Neuron 2 --> 0.990516510
//Neuron 3 --> 0.452221000
//so the network predicted that the trainingData correspond to (2) versicolor
double highestOutput = Collections.max(outputs);
//What our neuron should aim for
double target = 0;
List<Double> deltaOutputLayer = new ArrayList<Double>();
List<List<Double>> newWeightsOutputLayer = new ArrayList<List<Double>>();
for (int j = 0; j < outputs.size(); j++) {
double out = outputs.get(j);
//Important to do j + 1 because the species classes start at 1 (1 : setosa, 2: versicolor, 3:virginica)
if(out == highestOutput && (j + 1) == desiredOutput)
target = 0.99; //1
else
target = 0.01; //0
//chain rule
double delta = (out - target) * LogisticFonction.sigmoidPrime(out);
deltaOutputLayer.add(delta);
//get the new weigth value from delta and neta
List<Double> newWeights = new ArrayList<Double>();
for (int weightIndex = 0; weightIndex < _outputLayer.get(j).get_weigths().size(); weightIndex++) {
double gradient = delta * _outputsAfterActivationHiddenLayer.get(weightIndex);
double newWeight = _outputLayer.get(j).get_weigths().get(weightIndex) - (_learningRate * gradient);
newWeights.add(newWeight);
}
newWeightsOutputLayer.add(newWeights);
}
//hidden layer
double totalError = 0;
for (int i = 0; i < _neuronsHiddenLayer.size(); i++) {
for (int j = 0; j < deltaOutputLayer.size(); j++) {
double wi = _outputLayer.get(j).get_weigths().get(i);
double delta = deltaOutputLayer.get(j);
double partialError = wi * delta;
totalError += partialError;
}
double z = _outputsAfterActivationHiddenLayer.get(i);
double errorNeuron = LogisticFonction.sigmoidPrime(z);
List<Double> newWeightsHiddenLayer = new ArrayList<Double>();
for (int k = 0; k < _neuronsHiddenLayer.get(i).get_weigths().size(); k++) {
double in = _neuronsHiddenLayer.get(i).get_inputs().get(k);
double gradient = totalError * errorNeuron * in;
double oldWeigth = _neuronsHiddenLayer.get(i).get_weigths().get(k);
double newWeigth = oldWeigth - (_learningRate * gradient);
_neuronsHiddenLayer.get(i).get_weigths().set(k, newWeigth);
newWeightsHiddenLayer.add(newWeigth);
}
}
//then update the weigth of the output layer with the new values.
for (int i = 0; i < newWeightsOutputLayer.size(); i++) {
List<Double> newWeigths = newWeightsOutputLayer.get(i);
_outputLayer.get(i).set_weigths(newWeigths);
}
}
}
I've try testing with the Iris dataset : https://en.wikipedia.org/wiki/Iris_flower_data_set
but my result are very inconsistant leading me to belive there is a bug in my backpropagation algorythm.
If anyone can see some major flaw tell me please!
thanks a lot.
In this part of the code:
if(out == highestOutput && (j + 1) == desiredOutput)
target = 0.99; //1
else
target = 0.01; //0
The target output of the neuron is 0.99 when the condition (out == highestOutput && (j + 1) == desiredOutput). It means that you would only expect the output of the neuron to be 0.99 when the feedforward outputs the same neuron as the training example. This is incorrect.
The condition on that part of the code should've been only (j + 1) == desiredOutput. Remove out == highestOutput condition. The target output should be 0.99 for the desiredOutput neuron no matter whether the feedforward results in that neuron or not. So this is the corrected code:
if((j + 1) == desiredOutput)
target = 0.99; //1
else
target = 0.01; //0
I need to find the mean, median, mode, and range from an input file.
[input file has the numbers{60,75,53,49,92,71}]
I don't know how to print the calculations from the range out or calculate the mode.
It's pretty bad, I'm very new to Java.
It would be great if anyone could help me with it.
import java.io.*;
import java.util.*;
public class grades {
public static double avg(double[] num) {
double total = 0;
int j = 0;
for (; j < num.length; j++) {
total += num[j];
}
return (total / j);
}
public double getRange(double[] numberList) {
double initMin = numberList[0];
double initMax = numberList[0];
for (int i = 1; i <= numberList.length; i++) {
if (numberList[i] < initMin) initMin = numberList[i];
if (numberList[i] > initMax) initMax = numberList[i];
double range = initMax - initMin;
}
return range;
}
public static void main(String[] args) throws IOException {
double[] num = new double[12];
File inFile = new File("data.txt");
Scanner in = new Scanner(inFile);
for (int i = 0; i < num.length && in.hasNext(); i++) {
num[i] = in.nextDouble();
// System.out.println(num[i]);
}
double avg = grades.avg(num);
System.out.println("Arithmetic Mean = " + avg);
System.out.printf("Median = %.2f%n", grades.getMedian(num));
System.out.println("Range = " + range);
}
public static double getMedian(double[] num) {
int pos = (int) num.length / 2;
return num[pos];
}
}
I don't know how to print the calculations from the range out or calculate the mode.
You have already written a function to calculate the Range. Here is how you can print the Range.
System.out.println("Range = " + getRange(num));
Here is a quick code snippet to calculate the Mode:
public static double calculateMode(final double[] numberList) {
double[] cnts = new double[numberList.length];
double mode = 0, max = 0;
for (int i = 0; i < numberList.length; i++) {
/* Update Count Counter */
cnts[numberList[i]]++;
/* Check */
if (max < cnts[numberList[i]]) {
/* Update Max */
max = cnts[numberList[i]];
/* Update Mode */
mode = numberList[i];
}
}
/* Return Result */
return mode;
}
try sorting the element into an array.it will give following results:
[49,53,60,71,75,92]
suppose you stored it in array A.
int arrLength=A.length();
for(i=0,sum=0;i<arrlength;i++)
sum=sum+A[i]
mean=sum/arrLength;
median=A[arrLength/2]
I think you didn't sort the elements before finding median.
Do same thing to calculate range.It will be easier , I feel
I have a Matlab code of bat algorithm and I write java version of this algorithm
Bat algorithm is a simple optimization algorithm for finding the minimum of any function
here is the matlab code and my java version of this code
My java version of this algorithm can't find the optimum result like matlab version
and I can't find where is my mistake in converting the code from matlab to java
Can anyone help me where is my mistake?
import java.util.Random;
public class Bat
{
private int n;
private float A, r;
private float Qmin, Qmax;
private int d;
private int NofGen;
private float fmin;
private int fminIndex;
private float Fnew;
private int loopCounter;
private float Q[], V[][], Sol[][], UL_bound[][], fitness[], S[][], Best[];
private Random myRand;
public Bat(
int NBats,
float loudness,
float pulseRate,
float minFreq,
float maxFreq,
int NofGeneration,
int dimension
)
{
n = NBats;
A = loudness;
r = pulseRate;
Qmin = minFreq;
Qmax = maxFreq;
NofGen = NofGeneration;
d = dimension;
S = new float[n][d];
Best = new float[d];
UL_bound = new float[2][d];
//default bounds
for(int i = 0 ; i < d ; i++)
{
UL_bound[0][i] = -10000;
UL_bound[1][i] = 10000;
}
loopCounter = 0;
myRand = new Random();
Q = new float[n];
for(int i = 0 ; i < n ; i++)
Q[i] = 0;
V = new float[n][d];
for(int i = 0 ; i < n ; i++)
for(int j = 0 ; j < d ; j++)
V[i][j] = 0;
}
public void intial()
{
Sol = new float[n][d];
for(int i = 0 ; i < n ; i++)
for(int j = 0 ; j < d ; j++)
{
float t = myRand.nextFloat();
//(upper -lower)*rand + lower
Sol[i][j] = t * (UL_bound[1][j] - UL_bound[0][j]) + UL_bound[0][j];
}
fitness = new float[n];
for(int i = 0 ; i < n ; i++)
fitness[i] = function(Sol[i]);
//finding fmin
fmin = fitness[0];
fminIndex = 0;
for(int i = 0 ; i < n ; i++)
{
if (fitness[i] < fmin)
{
fmin = fitness[i];
fminIndex = i;
}
}
//setting best
for(int j = 0 ; j < d ; j++)
Best[j] = Sol[fminIndex][j];
}
public void start()
{
while(loopCounter < NofGen)
{
for(int i = 0 ; i < n ; i++)
{
Q[i] = Qmin + (Qmin - Qmax)* myRand.nextFloat();
for(int j = 0 ; j < d ; j++)
V[i][j] = V[i][j] + (Sol[i][j]-Best[j])*Q[i];
for(int j = 0 ; j < d ; j++)
S[i][j] = Sol[i][j] + V[i][j];
Sol[i] = simpleBounds(Sol[i]);
if(myRand.nextFloat() > r)
for(int j = 0 ; j < d ; j++)
S[i][j] = (float) (Best[j] + (.001 * myRand.nextFloat()) );
Fnew = function(S[i]);
if(Fnew <= fitness[i] && myRand.nextFloat() < A)
{
for(int j = 0 ; j < d ; j++)
Sol[i][j] = S[i][j];
fitness[i] = Fnew;
}
if(Fnew <= fmin)
{
fmin = Fnew;
for(int j = 0 ; j < d ; j++)
Best[j] = S[i][j];
}
}
loopCounter++;
}
}
public float[] simpleBounds(float p[])
{
for(int i = 0 ; i < d ; i++)
{
if(p[i] < UL_bound[0][i])
p[i] = UL_bound[0][i];
if(p[i] > UL_bound[1][i])
p[i] = UL_bound[1][i];
}
return p;
}
float function(float p[])
{
// Sphere function with fmin=0 at (0,0,...,0)
float sum = 0;
for(int i = 0 ; i < p.length ; i++)
sum = sum + p[i]*p[i];
return sum;
}
public float printResult()
{
System.out.println("After " + loopCounter + "Repeats :");
for(int i = 0 ; i < d ; i++)
System.out.print(Best[i] + ", ");
System.out.println ( "F(x) = " + fmin);
return fmin;
}
public void set_UL_Bound(int n, float L, float U)
{
if( n < d && n >= 0)
{
UL_bound[0][n] = L;
UL_bound[1][n] = U;
}
}
}
and this is the matlab versian
function [best,fmin,N_iter]=bat_algorithm(para)
% Display help
help bat_algorithm.m
% Default parameters
if nargin<1, para=[20 1000 0.5 0.5]; end
n=para(1); % Population size, typically 10 to 40
N_gen=para(2); % Number of generations
A=para(3); % Loudness (constant or decreasing)
r=para(4); % Pulse rate (constant or decreasing)
% This frequency range determines the scalings
% You should change these values if necessary
Qmin=0; % Frequency minimum
Qmax=2; % Frequency maximum
% Iteration parameters
N_iter=0; % Total number of function evaluations
% Dimension of the search variables
d=5; % Number of dimensions
% Lower limit/bounds/ a vector
Lb=-3*ones(1,d);
% Upper limit/bounds/ a vector
Ub=6*ones(1,d);
% Initializing arrays
Q=zeros(n,1); % Frequency
v=zeros(n,d); % Velocities
% Initialize the population/solutions
for i=1:n,
Sol(i,:)=Lb+(Ub-Lb).*rand(1,d);
Fitness(i)=Fun(Sol(i,:));
end
% Find the initial best solution
[fmin,I]=min(Fitness);
best=Sol(I,:);
for t=1:N_gen,
% Loop over all bats/solutions
for i=1:n,
Q(i)=Qmin+(Qmin-Qmax)*rand;
v(i,:)=v(i,:)+(Sol(i,:)-best)*Q(i);
S(i,:)=Sol(i,:)+v(i,:);
% Apply simple bounds/limits
Sol(i,:)=simplebounds(Sol(i,:),Lb,Ub);
% Pulse rate
if rand>r
% The factor 0.001 limits the step sizes of random walks
S(i,:)=best+0.001*randn(1,d);
end
% Evaluate new solutions
Fnew=Fun(S(i,:));
% Update if the solution improves, or not too loud
if (Fnew<=Fitness(i)) & (rand<A) ,
Sol(i,:)=S(i,:);
Fitness(i)=Fnew;
end
% Update the current best solution
if Fnew<=fmin,
best=S(i,:);
fmin=Fnew;
end
end
N_iter=N_iter+n;
end
% Output/display
disp(['Number of evaluations: ',num2str(N_iter)]);
disp(['Best =',num2str(best),' fmin=',num2str(fmin)]);
% Application of simple limits/bounds
function s=simplebounds(s,Lb,Ub)
% Apply the lower bound vector
ns_tmp=s;
I=ns_tmp<Lb;
ns_tmp(I)=Lb(I);
% Apply the upper bound vector
J=ns_tmp>Ub;
ns_tmp(J)=Ub(J);
% Update this new move
s=ns_tmp;
function z=Fun(u)
% Sphere function with fmin=0 at (0,0,...,0)
z=sum(u.^2);
%%%%% ============ end ====================================
The diff between two codes
In Matlab code:
S(i,:)=best+0.001*randn(1,d);
randn=>standard normal distribution.
While in Java code:
S[i][j] = (float) (Best[j] + (.001 * myRand.nextFloat()) );
java.util.Random.nextFloat()=>uniformly distributed float value between 0.0 and 1.0.
I was looking for the solution in C# and stumbled up on this. It was enough to get the job done. Here is the solution in C# translated from the java with variables renamed and an additional fitness function for finding the solution of two x,y equations xy=6 and x+y = 5. Also included is finding the square root of .3 :
using System;
namespace BatAlgorithmC
namespace BatAlgorithmC
{
class Program
{
static void Main(string[] args)
{
// Mybat x = new Mybat(100, 1000, 0.5, 0.5, 5, Mybat.sphere);
// Mybat x = new Mybat(1000, 1000, 0.5, 0.5, 1, Mybat.squareRoot);
Mybat x = new Mybat(1000, 1000, 0.5, 0.5, 2, Mybat.RootOfXYEquations);
Console.WriteLine("Hit any key to continue.");
Console.ReadLine();
}
}
public class Mybat
{
/**
* #param args the command line arguments
*/
public int _numberOfBats, _generations, Qmin, Qmax, N_iter, _dimension;
public double _volume, _pulseRate, min, max, fnew, fmin;
public double[][] _lowerBound, _upperBound, _velocity, _solution, S;
public double[] _fitness, _tempSolution, _bestSolution, Q;
public Random random;
//public static void main(String[] args) {
// Mybat x = new Mybat(20,1000,0.5,0.5,5, Mybat.sphere);
//}
public static void initJagged(double[][] array, int n, int d)
{
for (int i = 0; i < n; i++) array[i] = new double[d];
}
public Mybat(
int bats,
int generations,
double loud,
double pulse,
int dimension,
Func<double[], int, double> function
)
{
//initialization of variables
_numberOfBats = bats;
_generations = generations;
_volume = loud;
_pulseRate = pulse;
_dimension = dimension;
Random random = new Random();
//plan to change later and added as parameter
min = -15;
max = 15;
fmin = 0;
//decleration for the bounds
_lowerBound = new double[1][];
_upperBound = new double[1][];
Q = new double[_numberOfBats]; // frequency
_velocity = new double[_numberOfBats][]; //velocity
initJagged(_velocity, _numberOfBats, _dimension);
initJagged(_lowerBound, 1, _dimension);
initJagged(_upperBound, 1, _dimension);
//initialize solution array
_solution = new double[_numberOfBats][];
S = new double[_numberOfBats][];
_fitness = new double[_numberOfBats]; // fitness container
_bestSolution = new double[_dimension];
_tempSolution = new double[_dimension]; //temporary holder for a row in array _solution
initJagged(_solution, _numberOfBats, _dimension);
initJagged(S, _numberOfBats, _dimension);
for (int i = 0; i < _numberOfBats; i++)
{
// for minimal coding : added initialize Q[]array with '0' as element
Q[i] = 0;
for (int x = 0; x < _dimension; x++)
{
// for minimal coding : added initialize _velocity[][] array with '0' as element
_velocity[i][x] = 0;
//find random double values from LB to UB
_solution[i][x] = (random.NextDouble()*(max - min)) + min;
_tempSolution[x] = _solution[i][x];
//Console.WriteLine("sol["+i+"]["+x+"] = "+_solution[i][x]); //test line
//Console.WriteLine(rand.nextDouble()); //test line
}
_fitness[i] = function(_tempSolution, _dimension);
//initialize best and the fmin
if (i == 0 || fmin > _fitness[i])
{
fmin = _fitness[i];
for (int x = 0; x < _dimension; x++)
{
_bestSolution[x] = _solution[i][x];
}
}
Console.WriteLine("fitness[" + i + "]" + _fitness[i]); //test
}
Console.WriteLine("fmin = " + fmin); //test
// special note to these variables (below)
// change if required for maximum effectivity
Qmin = 0;
Qmax = 2;
N_iter = 1; //number of function evaluation
// bat proper
for (int loop = 0; loop < N_iter; loop++)
{
// loop over all bats/solutions
for (int nextBat = 0; nextBat < _numberOfBats; nextBat++)
{
Q[nextBat] = Qmin + ((Qmin - Qmax)*random.NextDouble());
// loop for velocity
for (int vel = 0; vel < _dimension; vel++)
{
_velocity[nextBat][vel] = _velocity[nextBat][vel] +
((_solution[nextBat][vel] - _bestSolution[vel])*Q[nextBat]);
}
//new solutions
for (int nextDimension = 0; nextDimension < _dimension; nextDimension++)
{
S[nextBat][nextDimension] = _solution[nextBat][nextDimension] +
_velocity[nextBat][nextDimension];
}
/**
* RESERVED SPOT for the QUESTIONABLE AREA ON THE
* MATLAB CODE (i think it is not needed for the java equivalent)
*/
// pulse rate
if (random.NextDouble() > _pulseRate)
{
for (int nextDimension = 0; nextDimension < _dimension; nextDimension++)
{
S[nextBat][nextDimension] = _bestSolution[nextDimension] + (0.001*random.NextGaussian());
}
}
//putting current row of _solution to a temp array
for (int nextDimension = 0; nextDimension < _dimension; nextDimension++)
{
_tempSolution[nextDimension] = S[nextBat][nextDimension];
}
fnew = function(_tempSolution, _dimension);
// update if solution is improved, and not too loud
if ((fnew <= _fitness[nextBat]) && (random.NextDouble() < _volume))
{
for (int x = 0; x < _dimension; x++)
{
_solution[nextBat][x] = S[nextBat][x];
_fitness[nextBat] = fnew;
}
}
//update current best solution
if (fnew <= fmin)
{
for (int nextDimension = 0; nextDimension < _dimension; nextDimension++)
{
_bestSolution[nextDimension] = S[nextBat][nextDimension];
fmin = fnew;
}
}
}
}
Console.WriteLine(" ");
Console.WriteLine("new fitness");
for (int i = 0; i < _numberOfBats; i++)
{
Console.WriteLine("fitness[" + i + "]" + _fitness[i]);
}
for (int nextDimension = 0; nextDimension < _dimension; nextDimension++)
{
Console.WriteLine("best[" + nextDimension + "]" + _bestSolution[nextDimension]);
}
Console.WriteLine("Fmin = " + fmin);
}
//possible that this function is not needed in java
public void set_bounds(int x, double L, double U)
{
//double temp_Lb[x];
//double temp_Ub[x];
for (int i = 0; i < x; i++)
{
_lowerBound[0][i] = L;
_upperBound[0][i] = U;
}
}
public static double sphere(double[] value, int d)
{
// sphere function where fmin is at 0
double result = 0;
for (int i = 0; i < d; i++)
{
result += (value[i]*value[i]);
}
return result;
}
public static double squareRoot(double[] value, int d)
{
// find the square root of .3
double result = 0;
for (int i = 0; i < d; i++)
{
result += Math.Abs(.3 - (value[i]*value[i]));
}
return result;
}
public static double RootOfXYEquations(double[] value, int d)
{
// solve for x and y xy = 6 and x+y = 5
double result = 0;
result += Math.Abs(5 - (value[0] + value[1]));
result += Math.Abs(6 - (value[0] * value[1]));
return result;
}
}
static class MathExtensiionns
{
public static double NextGaussian(this Random rand)
{
double u1 = rand.NextDouble(); //these are uniform(0,1) random doubles
double u2 = rand.NextDouble();
double mean = 0, stdDev = 1;
double randStdNormal = Math.Sqrt(-2.0 * Math.Log(u1)) *
Math.Sin(2.0 * Math.PI * u2); //random normal(0,1)
double randNormal =
mean + stdDev * randStdNormal; //random normal(mean,stdDev^2)
return randNormal;
}
}
}
this will be my first time here at stack overflow so i will say sorry beforehand if my response will be a bit ambiguous and has many problems. i just hope that this answer of mine will help future visitors on this thread who wants to study bat algo via java.
anyway, i did look at your code since i am studying bat algorithm at the moment.
tried running it and it does gives far off results compared to the matlab version.
what i noticed is that you just "literally" tried to convert the matlab code without fully understanding each matlab lines. i wanted to point out all of the stuff you missed but i am feeling lazy right now so i will just leave my version of bat algorithm in java.
NOTE: i just made a running bat algorithm in java. not an efficient, fully debugged, matlab's java-equivalent bat algorithm.
import java.util.Random;
public class Mybat {
/**
* #param args the command line arguments
*/
public int n, N_gen, Qmin, Qmax, N_iter, d;
public double A,r,min,max,fnew,fmin;
public double Lb[][],Ub[][],Q[],v[][],Sol[][],S[][],fitness[],temp[],best[];
public Random random;
public static void main(String[] args) {
Mybat x = new Mybat(20,1000,0.5,0.5,5);
}
public Mybat(
int bats,
int generations,
double loud,
double pulse,
int dimension
){
//initialization of variables
n=bats;
N_gen = generations;
A = loud;
r = pulse;
d = dimension;
Random rand = new Random();
//plan to change later and added as parameter
min = -15;
max = 15;
fmin = 0;
//decleration for the bounds
Lb = new double[1][d];
Ub = new double[1][d];
Q = new double[n]; // frequency
v = new double[n][d]; //velocity
//initialize solution array
Sol = new double[n][d];
S = new double[n][d];
fitness = new double[n]; // fitness container
best =new double[d];
temp = new double[d]; //temporary holder for a row in array Sol
for(int i=0;i<n;i++){
// for minimal coding : added initialize Q[]array with '0' as element
Q[i] = 0;
for(int x=0;x<d;x++){
// for minimal coding : added initialize v[][] array with '0' as element
v[i][x] = 0;
//find random double values from LB to UB
Sol[i][x]= (rand.nextDouble()*(max - min)) + min;
temp[x] = Sol[i][x];
//System.out.println("sol["+i+"]["+x+"] = "+Sol[i][x]); //test line
//System.out.println(rand.nextDouble()); //test line
}
fitness[i] = function(temp);
//initialize best and the fmin
if(i==0 || fmin > fitness[i]){
fmin = fitness[i];
for(int x=0;x<d;x++){
best[x] = Sol[i][x];
}
}
System.out.println("fitness["+i+"]"+fitness[i]); //test
}
System.out.println("fmin = "+fmin); //test
// special note to these variables (below)
// change if required for maximum effectivity
Qmin = 0;
Qmax = 2;
N_iter = 1; //number of function evaluation
// bat proper
for(int loop=0;loop<N_iter;loop++){
// loop over all bats/solutions
for(int i=0;i<n;i++){
Q[i] = Qmin+((Qmin-Qmax)*rand.nextDouble());
// loop for velocity
for(int vel=0;vel<d;vel++){
v[i][vel] = v[i][vel]+((Sol[i][vel]-best[vel])*Q[i]);
}
//new solutions
for(int x=0;x<d;x++){
S[i][x] = Sol[i][x] + v[i][x];
}
/**
* RESERVED SPOT for the QUESTIONABLE AREA ON THE
* MATLAB CODE (i think it is not needed for the java equivalent)
*/
// pulse rate
if(rand.nextDouble()>r){
for(int x=0;x<d;x++){
S[i][x] = best[x]+(0.001*rand.nextGaussian());
}
}
//putting current row of Sol to a temp array
for(int x=0;x<d;x++){
temp[x] = S[i][x];
}
fnew = function(temp);
// update if solution is improved, and not too loud
if((fnew<=fitness[i]) && (rand.nextDouble()<A)){
for(int x=0;x<d;x++){
Sol[i][x] = S[i][x];
fitness[i] = fnew;
}
}
//update current best solution
if(fnew<=fmin){
for(int x=0;x<d;x++){
best[x] = S[i][x];
fmin = fnew;
}
}
}
}
System.out.println(" ");
System.out.println("new fitness");
for(int i=0;i<n;i++){
System.out.println("fitness["+i+"]"+fitness[i]);
}
System.out.println("Fmin = "+fmin);
}
//possible that this function is not needed in java
public void set_bounds(int x, double L, double U){
//double temp_Lb[x];
//double temp_Ub[x];
for(int i=0; i<x; i++){
Lb[0][i] = L;
Ub[0][i] = U;
}
}
public double function(double value[]){
// sphere function where fmin is at 0
double result = 0;
for(int i=0;i<d;i++){
result += (value[i]*value[i]);
}
return result;
}
}
i read ecg byte array from file.now i want to detect QRS of read ecg byte.
how can i acheive this in java.
i get byte array from Lifegain defibrilator(an ecg device).i draw ecg on android from these bytes.now i want to detect QRS complex(term used for calculation of time and voltage of a wave of one heart beat).DATA=LeadData":"-284,-127,-122,17,-35,10,32,10,52,16,49,33,38,69,70,58,45,93,47,88,58,90,149,5,82,-12,-4,40,-34,29,-29,5,-4,-17,-13,-29,-13,-4,-9,-9,-10,-20,-15,-22,-32,-25,-23,-2,-15,-7,-13,-19,-17,-28,-27,-27,-33,-20,-16,-13,-20,-10,-22,-20,-19,-28,-15,-19,-22,-21,-9,-3,-6,-8,-6,-11,-8,-8,-5,-10,-5,-6,-9,-4,-6,3,20,3,14,7,11,10,5,11,5,10,2,10,13,14"
Regards,
shah
If the data you have is the data I think you have, you need to use one of the algorithms to detect your QRS complex.
There are a lot of algorithms out there to detect a QRS complex, one of the easiest is A Moving Average based Filtering System with its Application to Real-time QRS Detection by HC Chen and SW Chen (you can get it on http://www.cinc.org/archives/2003/pdf/585.pdf).
The stages are:
High Pass filtering
Low Pass filtering
Descision making stage
From the low pass picture you can notice that now we have the peaks we need to detect our QRS complex. The last stage is the decision stage making. In the article you have a formula to implement this.
We need to know when a QRS complex starts, so we need to set a threshold for this. The formula in this implementation is:
threshold = alpha * gamma * peak + (1 - alpha) * threshold
The peak is the local maximum in the window (we usually search the signal with a window of width 250), the threshold is the initial value (the first one can be the firstly found peak), alpha and gamma are randomly created, alpha is greater than 0 and smaller than 1 whereas gamma is 0.15 or 0.20. If the value of the current signal exceeds the threshold, a QRS complex is found.
Here is the source code in Java for low pass, high pass and decision making:
// High pass filter
// y1[n] = 1/M * Sum[m=0, M-1] x[n-m]
// y2[n] = x[n - (M+1)/2]
public static float[] highPass(int[] sig0, int nsamp) {
float[] highPass = new float[nsamp];
int M = 5; // M is recommended to be 5 or 7 according to the paper
float constant = (float) 1/M;
for(int i=0; i<sig0.length; i++) {
float y1 = 0;
float y2 = 0;
int y2_index = i-((M+1)/2);
if(y2_index < 0) {
y2_index = nsamp + y2_index;
}
y2 = sig0[y2_index];
float y1_sum = 0;
for(int j=i; j>i-M; j--) {
int x_index = i - (i-j);
if(x_index < 0) {
x_index = nsamp + x_index;
}
y1_sum += sig0[x_index];
}
y1 = constant * y1_sum;
highPass[i] = y2 - y1;
}
return highPass;
}
// Low pass filter; na n-to mesto zapiši kvadrat 30ih števil v oknu
public static float[] lowPass(float[] sig0, int nsamp) {
float[] lowPass = new float[nsamp];
for(int i=0; i<sig0.length; i++) {
float sum = 0;
if(i+30 < sig0.length) {
for(int j=i; j<i+30; j++) {
float current = sig0[j] * sig0[j];
sum += current;
}
}
else if(i+30 >= sig0.length) {
int over = i+30 - sig0.length;
for(int j=i; j<sig0.length; j++) {
float current = sig0[j] * sig0[j];
sum += current;
}
for(int j=0; j<over; j++) {
float current = sig0[j] * sig0[j];
sum += current;
}
}
lowPass[i] = sum;
}
return lowPass;
}
public static int[] QRS(float[] lowPass, int nsamp) {
int[] QRS = new int[nsamp];
double treshold = 0;
for(int i=0; i<200; i++) {
if(lowPass[i] > treshold) {
treshold = lowPass[i];
}
}
int frame = 250;
for(int i=0; i<lowPass.length; i+=frame) {
float max = 0;
int index = 0;
if(i + frame > lowPass.length) {
index = lowPass.length;
}
else {
index = i + frame;
}
for(int j=i; j<index; j++) {
if(lowPass[j] > max) max = lowPass[j];
}
boolean added = false;
for(int j=i; j<index; j++) {
if(lowPass[j] > treshold && !added) {
QRS[j] = 1;
added = true;
}
else {
QRS[j] = 0;
}
}
double gama = (Math.random() > 0.5) ? 0.15 : 0.20;
double alpha = 0.01 + (Math.random() * ((0.1 - 0.01)));
treshold = alpha * gama * max + (1 - alpha) * treshold;
}
return QRS;
}
Please follow the link below , i think they will help you.
http://www.cinc.org/archives/2008/pdf/0441.pdf
http://carg.site.uottawa.ca/doc/ELG6163GeoffreyGreen.pdf
http://www.eplimited.com/osea13.pdf
http://mirel.xmu.edu.cn/mirel/public/Teaching/QRSdetection.pdf
http://sourceforge.net/projects/ecgtoolkit-cs/files/ecgtoolkit-cs/ecgtoolkit-cs-2_2/