measuring sound intensity after analyzing spectrum? - java

i am writing a program on smartphone (on Android)
It is about :
Analyzing spectrum of sound by fft algorithms
measuring the intensity of a sound have f = fo (ex. fo = 18khz) from the spectrum which I have got results from the analysis above.
Calculating the distance from smartphone to source of sound with this intensity
After fft, I got two arrays (real and image). I calculate the sound intensity at f=18000hz( suppose that source of sound at 18000 hz is unchanged so that it makes it easier to measure sound intensity). As follow:
frequency at bin FFT[i] is :
if i <= [N/2] then i * SamplingFrequency / N
if i >= [N/2] then (N-i) * SamplingFrequency / N
therefore at frequency = 18000hz then I choose i = 304
sound intensity = real_array[304] * real_array[304] + image_array[304] * image_array[304]
However, the intensity, in fact, varies a lot making it difficult to measure the distance. And, I have no idea how to explain this.
Besides, I would like to ask you a question that the intensity I have measured above uses what unit to calculate.
Here is my code:
a. fft algorithms( I use fft 512 point)
import define.define512;
public class fft {
private static float[] W_real;
private static float[] W_img;
private static float[] input_real= new float[512];
private static float[] input_img;
//input_real1 is values from mic(smartphone)
//output is values of sound intensity
public static void FFT(float[] input_real1, float[] output)
{
for(int i =0;i<512;i++) input_real[i] = input_real1[i];
input_img = new float[512];
W_real = define512.W_IMAG;
W_img = define512.W_IMAG;
int[] W_order = define512.ORDER;
float[] output_real = new float[512], output_img = new float[512];
fftradix2(0,511);
//reorder deals with inverse bit
reorder(input_real, input_img, output_real, output_img, W_order, 512);
for(int i =0;i<512;i++)
{
output[i] = sqrt((output_real[i]*output_real[i] + output_img[i]*output_img[i]));
}
}
private static void reorder(float[] in_real,float[] in_imag, float[] out_real,float[] out_imag,int[] order,int N){
for(int i=0;i<N;i++){
out_real[i]=in_real[order[i]];
out_imag[i]=in_imag[order[i]];
}
}
//fft algorithms
private static void fftradix2(int dau,int cuoi)
{
int check = cuoi - dau;
if (check == 1)
{
input_real[dau] = input_real[dau] + input_real[cuoi];
input_img[dau] = input_img[dau] + input_img[cuoi];
input_real[cuoi] = input_real[dau] -2* input_real[cuoi];
input_img[cuoi] = input_img[dau] -2* input_img[cuoi];
}
else
{
int index = 512/(cuoi - dau + 1);
int tg = (cuoi - dau)/2;
fftradix2(dau,(dau+tg));
fftradix2((cuoi-tg),cuoi);
for(int i = dau;i<=(dau+tg);i++)
{
input_real[i] = input_real[i] + input_real[i+tg+1]*W_real[(i-dau)*index] - input_img[i+tg+1]*W_img[(i-dau)*index];
input_img[i] = input_img[i] + input_real[i+tg+1]*W_img[(i-dau)*index] + input_img[i+tg+1]*W_real[(i%(tg+1))*index];
input_real[i+tg+1] = input_real[i] -2* input_real[i+tg+1]*W_real[(i-dau)*index] +2* input_img[i+tg+1]*W_img[(i-dau)*index];
input_img[i+tg+1] = input_img[i] -2* input_real[i+tg+1]*W_img[(i-dau)*index] -2* input_img[i+tg+1]*W_real[(i-dau)*index];
}
}
}
}
b. code use mic in smartphone
NumOverlapSample = 800;
NumNewSample = 224;
private static int Fs = 44100;
private byte recorderAudiobuffer[] = new byte [1024];
AudioRecord recorder = new AudioRecord(AudioSource.MIC, Fs, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, 4096);
//start recorder
recorder.startRecording();
timer.schedule(new task_update(), 1000, 10);
class task_update extends TimerTask
{
#Override
public void run() {
// TODO Auto-generated method stub
for(int i=0;i<NumOverlapSample;i++)
recorderAudiobuffer[i] = recorderAudiobuffer[i+NumNewSample];
int bufferRead = recorder.read(recorderAudiobuffer,NumOverlapSample,NumNewSample);
convert.decode(recorderAudiobuffer, N, input);
fft.FFT(input, output);
}
and my soucre https://www.box.com/s/zuppzkicymfsuv4kb65p
thanks for all

At 18 kHz, microphone type, position and direction, as well as sound reflections from the nearby acoustic environment will strongly influence the sound level.

Related

Changes required for using non-quantized tflite files in MainActivity.java

This MainActivity.java was written for quantised models and I'm trying to use unquantised model.
After making the changes as mentioned here, here to MainActivity.java, my code is
public class MainActivity extends AppCompatActivity implements AdapterView.OnItemSelectedListener {
private static final String TAG = "MainActivity";
private Button mRun;
private ImageView mImageView;
private Bitmap mSelectedImage;
private GraphicOverlay mGraphicOverlay;
// Max width (portrait mode)
private Integer mImageMaxWidth;
// Max height (portrait mode)
private Integer mImageMaxHeight;
private final String[] mFilePaths =
new String[]{"mountain.jpg", "tennis.jpg","96580.jpg"};
/**
* Name of the model file hosted with Firebase.
*/
private static final String HOSTED_MODEL_NAME = "mobilenet_v1_224_quant";
private static final String LOCAL_MODEL_ASSET = "retrained_graph_mobilenet_1_224.tflite";
/**
* Name of the label file stored in Assets.
*/
private static final String LABEL_PATH = "labels.txt";
/**
* Number of results to show in the UI.
*/
private static final int RESULTS_TO_SHOW = 3;
/**
* Dimensions of inputs.
*/
private static final int DIM_BATCH_SIZE = 1;
private static final int DIM_PIXEL_SIZE = 3;
private static final int DIM_IMG_SIZE_X = 224;
private static final int DIM_IMG_SIZE_Y = 224;
private static final int IMAGE_MEAN = 128;
private static final float IMAGE_STD = 128.0f;
/**
* Labels corresponding to the output of the vision model.
*/
private List<String> mLabelList;
private final PriorityQueue<Map.Entry<String, Float>> sortedLabels =
new PriorityQueue<>(
RESULTS_TO_SHOW,
new Comparator<Map.Entry<String, Float>>() {
#Override
public int compare(Map.Entry<String, Float> o1, Map.Entry<String, Float>
o2) {
return (o1.getValue()).compareTo(o2.getValue());
}
});
/* Preallocated buffers for storing image data. */
private final int[] intValues = new int[DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y];
/**
* An instance of the driver class to run model inference with Firebase.
*/
private FirebaseModelInterpreter mInterpreter;
/**
* Data configuration of input & output data of model.
*/
private FirebaseModelInputOutputOptions mDataOptions;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
mGraphicOverlay = findViewById(R.id.graphic_overlay);
mImageView = findViewById(R.id.image_view);
Spinner dropdown = findViewById(R.id.spinner);
List<String> items = new ArrayList<>();
for (int i = 0; i < mFilePaths.length; i++) {
items.add("Image " + (i + 1));
}
ArrayAdapter<String> adapter = new ArrayAdapter<>(this, android.R.layout
.simple_spinner_dropdown_item, items);
dropdown.setAdapter(adapter);
dropdown.setOnItemSelectedListener(this);
mLabelList = loadLabelList(this);
mRun = findViewById(R.id.button_run);
mRun.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
runModelInference();
}
});
int[] inputDims = {DIM_BATCH_SIZE, DIM_IMG_SIZE_X, DIM_IMG_SIZE_Y, DIM_PIXEL_SIZE};
int[] outputDims = {DIM_BATCH_SIZE, mLabelList.size()};
try {
mDataOptions =
new FirebaseModelInputOutputOptions.Builder()
.setInputFormat(0, FirebaseModelDataType.FLOAT32, inputDims)
.setOutputFormat(0, FirebaseModelDataType.FLOAT32, outputDims)
.build();
FirebaseModelDownloadConditions conditions = new FirebaseModelDownloadConditions
.Builder()
.requireWifi()
.build();
FirebaseLocalModelSource localModelSource =
new FirebaseLocalModelSource.Builder("asset")
.setAssetFilePath(LOCAL_MODEL_ASSET).build();
FirebaseCloudModelSource cloudSource = new FirebaseCloudModelSource.Builder
(HOSTED_MODEL_NAME)
.enableModelUpdates(true)
.setInitialDownloadConditions(conditions)
.setUpdatesDownloadConditions(conditions) // You could also specify
// different conditions
// for updates
.build();
FirebaseModelManager manager = FirebaseModelManager.getInstance();
manager.registerLocalModelSource(localModelSource);
manager.registerCloudModelSource(cloudSource);
FirebaseModelOptions modelOptions =
new FirebaseModelOptions.Builder()
.setCloudModelName(HOSTED_MODEL_NAME)
.setLocalModelName("asset")
.build();
mInterpreter = FirebaseModelInterpreter.getInstance(modelOptions);
} catch (FirebaseMLException e) {
showToast("Error while setting up the model");
e.printStackTrace();
}
}
private void runModelInference() {
if (mInterpreter == null) {
Log.e(TAG, "Image classifier has not been initialized; Skipped.");
return;
}
// Create input data.
ByteBuffer imgData = convertBitmapToByteBuffer(mSelectedImage, mSelectedImage.getWidth(),
mSelectedImage.getHeight());
try {
FirebaseModelInputs inputs = new FirebaseModelInputs.Builder().add(imgData).build();
// Here's where the magic happens!!
mInterpreter
.run(inputs, mDataOptions)
.addOnFailureListener(new OnFailureListener() {
#Override
public void onFailure(#NonNull Exception e) {
e.printStackTrace();
showToast("Error running model inference");
}
})
.continueWith(
new Continuation<FirebaseModelOutputs, List<String>>() {
#Override
public List<String> then(Task<FirebaseModelOutputs> task) {
float[][] labelProbArray = task.getResult()
.<float[][]>getOutput(0);
List<String> topLabels = getTopLabels(labelProbArray);
mGraphicOverlay.clear();
GraphicOverlay.Graphic labelGraphic = new LabelGraphic
(mGraphicOverlay, topLabels);
mGraphicOverlay.add(labelGraphic);
return topLabels;
}
});
} catch (FirebaseMLException e) {
e.printStackTrace();
showToast("Error running model inference");
}
}
/**
* Gets the top labels in the results.
*/
private synchronized List<String> getTopLabels(float[][] labelProbArray) {
for (int i = 0; i < mLabelList.size(); ++i) {
sortedLabels.add(
new AbstractMap.SimpleEntry<>(mLabelList.get(i), (labelProbArray[0][i] )));
if (sortedLabels.size() > RESULTS_TO_SHOW) {
sortedLabels.poll();
}
}
List<String> result = new ArrayList<>();
final int size = sortedLabels.size();
for (int i = 0; i < size; ++i) {
Map.Entry<String, Float> label = sortedLabels.poll();
result.add(label.getKey() + ":" + label.getValue());
}
Log.d(TAG, "labels: " + result.toString());
return result;
}
/**
* Reads label list from Assets.
*/
private List<String> loadLabelList(Activity activity) {
List<String> labelList = new ArrayList<>();
try (BufferedReader reader =
new BufferedReader(new InputStreamReader(activity.getAssets().open
(LABEL_PATH)))) {
String line;
while ((line = reader.readLine()) != null) {
labelList.add(line);
}
} catch (IOException e) {
Log.e(TAG, "Failed to read label list.", e);
}
return labelList;
}
/**
* Writes Image data into a {#code ByteBuffer}.
*/
private synchronized ByteBuffer convertBitmapToByteBuffer(
Bitmap bitmap, int width, int height) {
ByteBuffer imgData =
ByteBuffer.allocateDirect(
4*DIM_BATCH_SIZE * DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y * DIM_PIXEL_SIZE);
imgData.order(ByteOrder.nativeOrder());
Bitmap scaledBitmap = Bitmap.createScaledBitmap(bitmap, DIM_IMG_SIZE_X, DIM_IMG_SIZE_Y,
true);
imgData.rewind();
scaledBitmap.getPixels(intValues, 0, scaledBitmap.getWidth(), 0, 0,
scaledBitmap.getWidth(), scaledBitmap.getHeight());
// Convert the image to int points.
int pixel = 0;
for (int i = 0; i < DIM_IMG_SIZE_X; ++i) {
for (int j = 0; j < DIM_IMG_SIZE_Y; ++j) {
final int val = intValues[pixel++];
imgData.putFloat((((val >> 16) & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
imgData.putFloat((((val >> 8) & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
imgData.putFloat(((val & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
}
}
return imgData;
}
private void showToast(String message) {
Toast.makeText(getApplicationContext(), message, Toast.LENGTH_SHORT).show();
}
public void onItemSelected(AdapterView<?> parent, View v, int position, long id) {
mGraphicOverlay.clear();
mSelectedImage = getBitmapFromAsset(this, mFilePaths[position]);
if (mSelectedImage != null) {
// Get the dimensions of the View
Pair<Integer, Integer> targetedSize = getTargetedWidthHeight();
int targetWidth = targetedSize.first;
int maxHeight = targetedSize.second;
// Determine how much to scale down the image
float scaleFactor =
Math.max(
(float) mSelectedImage.getWidth() / (float) targetWidth,
(float) mSelectedImage.getHeight() / (float) maxHeight);
Bitmap resizedBitmap =
Bitmap.createScaledBitmap(
mSelectedImage,
(int) (mSelectedImage.getWidth() / scaleFactor),
(int) (mSelectedImage.getHeight() / scaleFactor),
true);
mImageView.setImageBitmap(resizedBitmap);
mSelectedImage = resizedBitmap;
}
}
#Override
public void onNothingSelected(AdapterView<?> parent) {
// Do nothing
}
// Utility functions for loading and resizing images from app asset folder.
public static Bitmap getBitmapFromAsset(Context context, String filePath) {
AssetManager assetManager = context.getAssets();
InputStream is;
Bitmap bitmap = null;
try {
is = assetManager.open(filePath);
bitmap = BitmapFactory.decodeStream(is);
} catch (IOException e) {
e.printStackTrace();
}
return bitmap;
}
// Returns max image width, always for portrait mode. Caller needs to swap width / height for
// landscape mode.
private Integer getImageMaxWidth() {
if (mImageMaxWidth == null) {
// Calculate the max width in portrait mode. This is done lazily since we need to
// wait for a UI layout pass to get the right values. So delay it to first time image
// rendering time.
mImageMaxWidth = mImageView.getWidth();
}
return mImageMaxWidth;
}
// Returns max image height, always for portrait mode. Caller needs to swap width / height for
// landscape mode.
private Integer getImageMaxHeight() {
if (mImageMaxHeight == null) {
// Calculate the max width in portrait mode. This is done lazily since we need to
// wait for a UI layout pass to get the right values. So delay it to first time image
// rendering time.
mImageMaxHeight =
mImageView.getHeight();
}
return mImageMaxHeight;
}
// Gets the targeted width / height.
private Pair<Integer, Integer> getTargetedWidthHeight() {
int targetWidth;
int targetHeight;
int maxWidthForPortraitMode = getImageMaxWidth();
int maxHeightForPortraitMode = getImageMaxHeight();
targetWidth = maxWidthForPortraitMode;
targetHeight = maxHeightForPortraitMode;
return new Pair<>(targetWidth, targetHeight);
}
}
But I'm still getting Failed to get input dimensions. 0-th input should have 268203 bytes, but found 1072812 bytes for inception and 0-th input should have 150528 bytes, but found 602112 bytes for mobilenet. So, a factor is 4 there always.
To see what I've changed, the output of diff original.java changed.java is: (Ignore the line numbers)
32a33,34
> private static final int IMAGE_MEAN = 128;
> private static final float IMAGE_STD = 128.0f;
150,151c152,153
< byte[][] labelProbArray = task.getResult()
< .<byte[][]>getOutput(0);
---
> float[][] labelProbArray = task.getResult()
> .<float[][]>getOutput(0);
170c172
< private synchronized List<String> getTopLabels(byte[][] labelProbArray) {
---
> private synchronized List<String> getTopLabels(float[][] labelProbArray) {
173,174c175
< new AbstractMap.SimpleEntry<>(mLabelList.get(i), (labelProbArray[0][i] &
< 0xff) / 255.0f));
---
> new AbstractMap.SimpleEntry<>(mLabelList.get(i), (labelProbArray[0][i] )));
214c215,216
< DIM_BATCH_SIZE * DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y * DIM_PIXEL_SIZE);
---
> 4*DIM_BATCH_SIZE * DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y * DIM_PIXEL_SIZE);
>
226,228c228,232
< imgData.put((byte) ((val >> 16) & 0xFF));
< imgData.put((byte) ((val >> 8) & 0xFF));
< imgData.put((byte) (val & 0xFF));
---
> imgData.putFloat((((val >> 16) & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
> imgData.putFloat((((val >> 8) & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
> imgData.putFloat(((val & 0xFF)-IMAGE_MEAN)/IMAGE_STD);
This is how the buffer is allocated in the code lab:
ByteBuffer imgData = ByteBuffer.allocateDirect(
DIM_BATCH_SIZE * DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y * DIM_PIXEL_SIZE);
DIM_BATCH_SIZE - A typical usage is for supporting batch processing (if the model supports it). In our sample and probably your test, you feed 1 image at a time and just keep it as 1.
DIM_PIXEL_SIZE - We set 3 in the code lab, which corresponds to r/g/b 1 byte each.
However, looks like you are using a float model. Then instead of one byte each for r/g/b, you use a float (4 bytes) to represent r/g/b each (you figured out this part already yourself). Then the buffer you allocated using above code is no longer sufficient.
You can follow example here for float models:
https://github.com/tensorflow/tensorflow/blob/25b4086bb5ba1788ceb6032eda58348f6e20a71d/tensorflow/contrib/lite/java/demo/app/src/main/java/com/example/android/tflitecamerademo/ImageClassifierFloatInception.java
To be exact on imgData population, below should be the formula for allocation:
ByteBuffer imgData = ByteBuffer.allocateDirect(
DIM_BATCH_SIZE * getImageSizeX() * getImageSizeY() * DIM_PIXEL_SIZE
* getNumBytesPerChannel());
getNumBytesPerChannel() should be 4 in your case.
[Update for the new question, in regards of below error]:
Failed to get input dimensions. 0-th input should have 268203 bytes, but found 1072812 bytes
This is the check that number of bytes expected by the model == number of bytes passed in. 268203 = 299 * 299 * 3 & 1072812 = 4 * 299 * 299 * 3. Looks like you are using a quantized model but fed it with data for float model. Could you double check the model you used? To make things simple, don't specify cloud model source and use local model from assets only.
[Update 0628, developer said they trained a float model]:
It could be your model is wrong; it could also be you have a Cloud model downloaded which overrides your local model. But the error message tells us that the model being loaded is NOT a float model.
To isolate the issue, I'd recommend below few testings:
1) Remove setCloudModelName / registerCloudModelSource from quick start app
2) Play with official TFLite float model You will have to download the model mentioned in comment and change Camera2BasicFragment to use that ImageClassifierFloatInception (instead of ImageClassifierQuantizedMobileNet)
3) Still use the same TFLite sample app, switch to your own trained model. Make sure to tune the image size to your values.

changing the color of an ellipse using hashtag data using Twitter4j 3.0.3

Okay, so i dont really know much about writing code, yet. I am working on a project that uses twitter API data. My goal for the project is to use hash tags to represent both good and bad things (for sake of simplicity, lets use #good and #bad).
I want that hashtag data to modify the color of a simple ellipse to a shade of color in between red and green, depending on the number of #good and #bad tweets.
I like to think of it as a +100/-100 spectrum. each #good tweet is +1, each #bad is -1. If it is at -100 tweets, then the ellipse is full red. If it is at +100 tweets, then the ellipse is full green.
I know this is a little complicated, but its for an art project im doing. I followed a tutorial and currently have the twitter data responding on a simple array list of tweets (tutorial # https://www.youtube.com/watch?v=gwS6irtGK-c)
I am using processing, java, twitter4j 3.0.3, and a macbook pro with OSX el capitan 10.11.3
Any help would be greatly appreciated. Even pointing me in the direction on how to code it myself. If you need more information from me, ill respond as quickly as I see it!
ConfigurationBuilder cb = new ConfigurationBuilder();
Twitter twitterInstance;
Query queryForTwitter;
ArrayList tweets;
void setup() {
cb.setOAuthConsumerKey("****");
cb.setOAuthConsumerSecret("****");
cb.setOAuthAccessToken("****");
cb.setOAuthAccessTokenSecret("****");
cb.setUseSSL(true);
twitterInstance = new TwitterFactory( cb.build()
).getInstance();
queryForTwitter = new Query("#good");
size(640,440);
FetchTweets();
} //setup
void draw() {
background(0);
DrawTweets();
} //draw
void DrawTweets() {
for(int i=0; i<tweets.size(); i++) {
Status t = (Status) tweets.get(i);
String user = t.getUser().getName();
String msg = t.getText();
text(user + ": " + msg,
20,15+i*30-mouseY, width-20, 40);
} //for
} //drawTweets
void FetchTweets(){
try {
QueryResult result = twitterInstance.search(
queryForTwitter );
tweets = (ArrayList) result.getTweets();
} catch(TwitterException te) {
println("Couldn't connect: " +te);
} // end of catch TwitterException
}// end of FetchAndDrawTweets()
SECOND VERSION:
ConfigurationBuilder cb = new ConfigurationBuilder();
Twitter twitterInstance;
Query queryForTwitter;
//ArrayList tweets;
void setup() {
cb.setOAuthConsumerKey("****");
cb.setOAuthConsumerSecret("****");
cb.setOAuthAccessToken("****");
cb.setOAuthAccessTokenSecret("****");
cb.setUseSSL(true);
//twitterInstance = new TwitterFactory( cb.build()
// ).getInstance();
//queryForTwitter = new Query("#feelthebern");
size(640,440);
int numGood = 50;
int numBad = 50;
for (int i = 0; i < numGood; i++) {
tweets.add("#good");
}
for (int i = 0; i < numBad; i++) {
tweets.add("#bad");
}
} //setup
ArrayList<String> tweets = new ArrayList<String>();
//create a function that counts the tweets
//that contain a certain hashtag
int countTweets(String hashtag){
int total = 0;
for(String tweet : tweets){
if(tweet.contains(hashtag)){
total++;
}
}
return total;
}
void draw(){
//count the good and bad tweets
int goodTweets = countTweets("#good");
int badTweets = countTweets("#bad");
//calculate color based on tweet counts
float r = badTweets/100.0 * 255;
float g = goodTweets/100.0 * 255;
float b = 0;
background(r, g, b);
}
You have to break your problem down into smaller steps.
Step 1: Create a function that simply returns an ArrayList of tweets.
Step 2: Create a function that takes that ArrayList and a String value, and returns the number of times that String occurs in the tweets in the ArrayList.
This code assumes you have a ArrayList<String> tweets:
int countTweets(String hashtag){
int total = 0;
for(String tweet : tweets){
if(tweet.contains(hashtag)){
total++;
}
}
return total;
}
Step 3: Calculate the color based on the number of tweets containing each word. You said you'll always have 100 tweets, so you can just divide the tweet count by 100, then multiply by 255 to get the color value.
Putting it all together, it looks like this:
ArrayList<String> tweets = new ArrayList<String>();
void setup() {
//you would actually get these from twitter,
//but for testing let's just fill them ourselves
int numGood = 50;
int numBad = 50;
for (int i = 0; i < numGood; i++) {
tweets.add("#good");
}
for (int i = 0; i < numBad; i++) {
tweets.add("#bad");
}
}
//create a function that counts the tweets
//that contain a certain hashtag
int countTweets(String hashtag){
int total = 0;
for(String tweet : tweets){
if(tweet.contains(hashtag)){
total++;
}
}
return total;
}
void draw(){
//count the good and bad tweets
int goodTweets = countTweets("#good");
int badTweets = countTweets("#bad");
//calculate color based on tweet counts
float r = badTweets/100.0 * 255;
float g = goodTweets/100.0 * 255;
float b = 0;
background(r, g, b);
}

Playing multiple byte arrays simultaneously in Java

How can you play multiple (audio) byte arrays simultaneously? This "byte array" is recorded by TargetDataLine, transferred using a server.
What I've tried so far
Using SourceDataLine:
There is no way to play mulitple streams using SourceDataLine, because the write method blocks until the buffer is written. This problem cannot be fixed using Threads, because only one SourceDataLine can write concurrently.
Using the AudioPlayer Class:
ByteInputStream stream2 = new ByteInputStream(data, 0, data.length);
AudioInputStream stream = new AudioInputStream(stream2, VoiceChat.format, data.length);
AudioPlayer.player.start(stream);
This just plays noise on the clients.
EDIT
I don't receive the voice packets at the same time, it's not simultaneously, more "overlapping".
Apparently Java's Mixer interface was not designed for this.
http://docs.oracle.com/javase/7/docs/api/javax/sound/sampled/Mixer.html:
A mixer is an audio device with one or more lines. It need not be
designed for mixing audio signals.
And indeed, when I try to open multiple lines on the same mixer this fails with a LineUnavailableException. However if all your audio recordings have the same audio format it's quite easy to manually mix them together. For example if you have 2 inputs:
Convert both to the appropriate data type (for example byte[] for 8 bit audio, short[] for 16 bit, float[] for 32 bit floating point etc)
Sum them in another array. Make sure summed values do not exceed the range of the datatype.
Convert output back to bytes and write that to the SourceDataLine
See also How is audio represented with numbers?
Here's a sample mixing down 2 recordings and outputting as 1 signal, all in 16bit 48Khz stereo.
// print all devices (both input and output)
int i = 0;
Mixer.Info[] infos = AudioSystem.getMixerInfo();
for (Mixer.Info info : infos)
System.out.println(i++ + ": " + info.getName());
// select 2 inputs and 1 output
System.out.println("Select input 1: ");
int in1Index = Integer.parseInt(System.console().readLine());
System.out.println("Select input 2: ");
int in2Index = Integer.parseInt(System.console().readLine());
System.out.println("Select output: ");
int outIndex = Integer.parseInt(System.console().readLine());
// ugly java sound api stuff
try (Mixer in1Mixer = AudioSystem.getMixer(infos[in1Index]);
Mixer in2Mixer = AudioSystem.getMixer(infos[in2Index]);
Mixer outMixer = AudioSystem.getMixer(infos[outIndex])) {
in1Mixer.open();
in2Mixer.open();
outMixer.open();
try (TargetDataLine in1Line = (TargetDataLine) in1Mixer.getLine(in1Mixer.getTargetLineInfo()[0]);
TargetDataLine in2Line = (TargetDataLine) in2Mixer.getLine(in2Mixer.getTargetLineInfo()[0]);
SourceDataLine outLine = (SourceDataLine) outMixer.getLine(outMixer.getSourceLineInfo()[0])) {
// audio format 48khz 16 bit stereo (signed litte endian)
AudioFormat format = new AudioFormat(48000.0f, 16, 2, true, false);
// 4 bytes per frame (16 bit samples stereo)
int frameSize = 4;
int bufferSize = 4800;
int bufferBytes = frameSize * bufferSize;
// buffers for java audio
byte[] in1Bytes = new byte[bufferBytes];
byte[] in2Bytes = new byte[bufferBytes];
byte[] outBytes = new byte[bufferBytes];
// buffers for mixing
short[] in1Samples = new short[bufferBytes / 2];
short[] in2Samples = new short[bufferBytes / 2];
short[] outSamples = new short[bufferBytes / 2];
// how long to record & play
int framesProcessed = 0;
int durationSeconds = 10;
int durationFrames = (int) (durationSeconds * format.getSampleRate());
// open devices
in1Line.open(format, bufferBytes);
in2Line.open(format, bufferBytes);
outLine.open(format, bufferBytes);
in1Line.start();
in2Line.start();
outLine.start();
// start audio loop
while (framesProcessed < durationFrames) {
// record audio
in1Line.read(in1Bytes, 0, bufferBytes);
in2Line.read(in2Bytes, 0, bufferBytes);
// convert input bytes to samples
ByteBuffer.wrap(in1Bytes).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(in1Samples);
ByteBuffer.wrap(in2Bytes).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(in2Samples);
// mix samples - lower volume by 50% since we're mixing 2 streams
for (int s = 0; s < bufferBytes / 2; s++)
outSamples[s] = (short) ((in1Samples[s] + in2Samples[s]) * 0.5);
// convert output samples to bytes
ByteBuffer.wrap(outBytes).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(outSamples);
// play audio
outLine.write(outBytes, 0, bufferBytes);
framesProcessed += bufferBytes / frameSize;
}
in1Line.stop();
in2Line.stop();
outLine.stop();
}
}
Allright, I put something together which should get you started. I'll post the full code below but I'll first try and explain the steps involved.
The interesting part here is to create you're own audio "mixer" class which allows consumers of that class to schedule audio blocks at specific points in the (near) future. The specific-point-in-time part is important here: i'm assuming you receive network voices in packets where each packet needs to start exactly at the end of the previous one in order to play back a continuous sound for a single voice. Also since you say voices can overlap I'm assuming (yes, lots of assumptions) a new one can come in over the network while one or more old ones are still playing. So it seems reasonable to allow audio blocks to be scheduled from any thread. Note that there's only one thread actually writing to the dataline, it's just that any thread can submit audio packets to the mixer.
So for the submit-audio-packet part we now have this:
private final ConcurrentLinkedQueue<QueuedBlock> scheduledBlocks;
public void mix(long when, short[] block) {
scheduledBlocks.add(new QueuedBlock(when, Arrays.copyOf(block, block.length)));
}
The QueuedBlock class is just used to tag a byte array (the audio buffer) with the "when": the point in time where the block should be played.
Points in time are expressed relative to the current position of the audio stream. It is set to zero when the stream is created and updated with the buffer size each time an audio buffer is written to the dataline:
private final AtomicLong position = new AtomicLong();
public long position() {
return position.get();
}
Apart from all the hassle to set up the data line, the interesting part of the mixer class is obviously where the mixdown happens. For each scheduled audio block, it's split up into 3 cases:
The block is already played in it's entirety. Remove from the scheduledBlocks list.
The block is scheduled to start at some point in time after the current buffer. Do nothing.
(Part of) the block should be mixed down into the current buffer. Note that the beginning of the block may (or may not) be already played in previous buffer(s). Similarly, the end of the scheduled block may exceed the end of the current buffer in which case we mix down the first part of it and leave the rest for the next round, untill all of it has been played an the entire block is removed.
Also note that there's no reliable way to start playing audio data immediately, when you submit packets to the mixer be sure to always have them start at least the duration of 1 audio buffer from now otherwise you'll risk losing the beginning of your sound. Here's the mixdown code:
private static final double MIXDOWN_VOLUME = 1.0 / NUM_PRODUCERS;
private final List<QueuedBlock> finished = new ArrayList<>();
private final short[] mixBuffer = new short[BUFFER_SIZE_FRAMES * CHANNELS];
private final byte[] audioBuffer = new byte[BUFFER_SIZE_FRAMES * CHANNELS * 2];
private final AtomicLong position = new AtomicLong();
Arrays.fill(mixBuffer, (short) 0);
long bufferStartAt = position.get();
for (QueuedBlock block : scheduledBlocks) {
int blockFrames = block.data.length / CHANNELS;
// block fully played - mark for deletion
if (block.when + blockFrames <= bufferStartAt) {
finished.add(block);
continue;
}
// block starts after end of current buffer
if (bufferStartAt + BUFFER_SIZE_FRAMES <= block.when)
continue;
// mix in part of the block which overlaps current buffer
int blockOffset = Math.max(0, (int) (bufferStartAt - block.when));
int blockMaxFrames = blockFrames - blockOffset;
int bufferOffset = Math.max(0, (int) (block.when - bufferStartAt));
int bufferMaxFrames = BUFFER_SIZE_FRAMES - bufferOffset;
for (int f = 0; f < blockMaxFrames && f < bufferMaxFrames; f++)
for (int c = 0; c < CHANNELS; c++) {
int bufferIndex = (bufferOffset + f) * CHANNELS + c;
int blockIndex = (blockOffset + f) * CHANNELS + c;
mixBuffer[bufferIndex] += (short)
(block.data[blockIndex]*MIXDOWN_VOLUME);
}
}
scheduledBlocks.removeAll(finished);
finished.clear();
ByteBuffer
.wrap(audioBuffer)
.order(ByteOrder.LITTLE_ENDIAN)
.asShortBuffer()
.put(mixBuffer);
line.write(audioBuffer, 0, audioBuffer.length);
position.addAndGet(BUFFER_SIZE_FRAMES);
And finally a complete, self-contained sample which spawns a number of threads submitting audio blocks representing sinewaves of random duration and frequency to the mixer (called AudioConsumer in this sample). Replace sinewaves by incoming network packets and you should be halfway to a solution.
package test;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.Line;
import javax.sound.sampled.Mixer;
import javax.sound.sampled.SourceDataLine;
public class Test {
public static final int CHANNELS = 2;
public static final int SAMPLE_RATE = 48000;
public static final int NUM_PRODUCERS = 10;
public static final int BUFFER_SIZE_FRAMES = 4800;
// generates some random sine wave
public static class ToneGenerator {
private static final double[] NOTES = {261.63, 311.13, 392.00};
private static final double[] OCTAVES = {1.0, 2.0, 4.0, 8.0};
private static final double[] LENGTHS = {0.05, 0.25, 1.0, 2.5, 5.0};
private double phase;
private int framesProcessed;
private final double length;
private final double frequency;
public ToneGenerator() {
ThreadLocalRandom rand = ThreadLocalRandom.current();
length = LENGTHS[rand.nextInt(LENGTHS.length)];
frequency = NOTES[rand.nextInt(NOTES.length)] * OCTAVES[rand.nextInt(OCTAVES.length)];
}
// make sound
public void fill(short[] block) {
for (int f = 0; f < block.length / CHANNELS; f++) {
double sample = Math.sin(phase * 2.0 * Math.PI);
for (int c = 0; c < CHANNELS; c++)
block[f * CHANNELS + c] = (short) (sample * Short.MAX_VALUE);
phase += frequency / SAMPLE_RATE;
}
framesProcessed += block.length / CHANNELS;
}
// true if length of tone has been generated
public boolean done() {
return framesProcessed >= length * SAMPLE_RATE;
}
}
// dummy audio producer, based on sinewave generator
// above but could also be incoming network packets
public static class AudioProducer {
final Thread thread;
final AudioConsumer consumer;
final short[] buffer = new short[BUFFER_SIZE_FRAMES * CHANNELS];
public AudioProducer(AudioConsumer consumer) {
this.consumer = consumer;
thread = new Thread(() -> run());
thread.setDaemon(true);
}
public void start() {
thread.start();
}
// repeatedly play random sine and sleep for some time
void run() {
try {
ThreadLocalRandom rand = ThreadLocalRandom.current();
while (true) {
long pos = consumer.position();
ToneGenerator g = new ToneGenerator();
// if we schedule at current buffer position, first part of the tone will be
// missed so have tone start somewhere in the middle of the next buffer
pos += BUFFER_SIZE_FRAMES + rand.nextInt(BUFFER_SIZE_FRAMES);
while (!g.done()) {
g.fill(buffer);
consumer.mix(pos, buffer);
pos += BUFFER_SIZE_FRAMES;
// we can generate audio faster than it's played
// sleep a while to compensate - this more closely
// corresponds to playing audio coming in over the network
double bufferLengthMillis = BUFFER_SIZE_FRAMES * 1000.0 / SAMPLE_RATE;
Thread.sleep((int) (bufferLengthMillis * 0.9));
}
// sleep a while in between tones
Thread.sleep(1000 + rand.nextInt(2000));
}
} catch (Throwable t) {
System.out.println(t.getMessage());
t.printStackTrace();
}
}
}
// audio consumer - plays continuously on a background
// thread, allows audio to be mixed in from arbitrary threads
public static class AudioConsumer {
// audio block with "when to play" tag
private static class QueuedBlock {
final long when;
final short[] data;
public QueuedBlock(long when, short[] data) {
this.when = when;
this.data = data;
}
}
// need not normally be so low but in this example
// we're mixing down a bunch of full scale sinewaves
private static final double MIXDOWN_VOLUME = 1.0 / NUM_PRODUCERS;
private final List<QueuedBlock> finished = new ArrayList<>();
private final short[] mixBuffer = new short[BUFFER_SIZE_FRAMES * CHANNELS];
private final byte[] audioBuffer = new byte[BUFFER_SIZE_FRAMES * CHANNELS * 2];
private final Thread thread;
private final AtomicLong position = new AtomicLong();
private final AtomicBoolean running = new AtomicBoolean(true);
private final ConcurrentLinkedQueue<QueuedBlock> scheduledBlocks = new ConcurrentLinkedQueue<>();
public AudioConsumer() {
thread = new Thread(() -> run());
}
public void start() {
thread.start();
}
public void stop() {
running.set(false);
}
// gets the play cursor. note - this is not accurate and
// must only be used to schedule blocks relative to other blocks
// (e.g., for splitting up continuous sounds into multiple blocks)
public long position() {
return position.get();
}
// put copy of audio block into queue so we don't
// have to worry about caller messing with it afterwards
public void mix(long when, short[] block) {
scheduledBlocks.add(new QueuedBlock(when, Arrays.copyOf(block, block.length)));
}
// better hope mixer 0, line 0 is output
private void run() {
Mixer.Info[] mixerInfo = AudioSystem.getMixerInfo();
try (Mixer mixer = AudioSystem.getMixer(mixerInfo[0])) {
Line.Info[] lineInfo = mixer.getSourceLineInfo();
try (SourceDataLine line = (SourceDataLine) mixer.getLine(lineInfo[0])) {
line.open(new AudioFormat(SAMPLE_RATE, 16, CHANNELS, true, false), BUFFER_SIZE_FRAMES);
line.start();
while (running.get())
processSingleBuffer(line);
line.stop();
}
} catch (Throwable t) {
System.out.println(t.getMessage());
t.printStackTrace();
}
}
// mix down single buffer and offer to the audio device
private void processSingleBuffer(SourceDataLine line) {
Arrays.fill(mixBuffer, (short) 0);
long bufferStartAt = position.get();
// mixdown audio blocks
for (QueuedBlock block : scheduledBlocks) {
int blockFrames = block.data.length / CHANNELS;
// block fully played - mark for deletion
if (block.when + blockFrames <= bufferStartAt) {
finished.add(block);
continue;
}
// block starts after end of current buffer
if (bufferStartAt + BUFFER_SIZE_FRAMES <= block.when)
continue;
// mix in part of the block which overlaps current buffer
// note that block may have already started in the past
// but extends into the current buffer, or that it starts
// in the future but before the end of the current buffer
int blockOffset = Math.max(0, (int) (bufferStartAt - block.when));
int blockMaxFrames = blockFrames - blockOffset;
int bufferOffset = Math.max(0, (int) (block.when - bufferStartAt));
int bufferMaxFrames = BUFFER_SIZE_FRAMES - bufferOffset;
for (int f = 0; f < blockMaxFrames && f < bufferMaxFrames; f++)
for (int c = 0; c < CHANNELS; c++) {
int bufferIndex = (bufferOffset + f) * CHANNELS + c;
int blockIndex = (blockOffset + f) * CHANNELS + c;
mixBuffer[bufferIndex] += (short) (block.data[blockIndex] * MIXDOWN_VOLUME);
}
}
scheduledBlocks.removeAll(finished);
finished.clear();
ByteBuffer.wrap(audioBuffer).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(mixBuffer);
line.write(audioBuffer, 0, audioBuffer.length);
position.addAndGet(BUFFER_SIZE_FRAMES);
}
}
public static void main(String[] args) {
System.out.print("Press return to exit...");
AudioConsumer consumer = new AudioConsumer();
consumer.start();
for (int i = 0; i < NUM_PRODUCERS; i++)
new AudioProducer(consumer).start();
System.console().readLine();
consumer.stop();
}
}
You can use the Tritontus library to do software audio mixing (it's old but still works quite well).
Add the dependency to your project:
<dependency>
<groupId>com.googlecode.soundlibs</groupId>
<artifactId>tritonus-all</artifactId>
<version>0.3.7.2</version>
</dependency>
Use the org.tritonus.share.sampled.FloatSampleBuffer. Both buffers must be of same AudioFormat before calling #mix.
// TODO instantiate these variables with real data
byte[] audio1, audio2;
AudioFormat af1, af2;
SourceDataLine sdl = AudioSystem.getSourceDataLine(af1);
FloatSampleBuffer fsb1 = new FloatSampleBuffer(audio1, 0, audio1.length, af1.getFormat());
FloatSampleBuffer fsb2 = new FloatSampleBuffer(audio2, 0, audio2.length, af2.getFormat());
fsb1.mix(fsb2);
byte[] result = fsb1.convertToByteArray(af1);
sdl.write(result, 0, result.length); // play it

Collision detection in andengine gles2

I'm trying to set up Collision detection on the cactus-property items on all cactuses in the TMXmap example from andengine Gles2. I have tried various methods - can anyone give me one that works?
Original Code
Tmxmaps andengine
One suggested solution:
collision detection
Another suggested solution:
from andengine.org
I've tried:
if(pTMXTileProperties.containsTMXProperty("cactus", "true")) {
final Rectangle rect = new Rectangle(pTMXTile.getTileX()+10, pTMXTile.getTileY(),14, 14);
final FixtureDef boxFixtureDef = PhysicsFactory.createFixtureDef(0, 0, 1f);
PhysicsFactory.createBoxBody(mPhysicsWorld, rect, BodyType.StaticBody, boxFixtureDef);
rect.setVisible(false);
mScene.attachChild(rect);
}
This is from AndEngine: Handling collisions with TMX Objects
But I get this error:
Physicsfactory not found
I'm using the TMX example you have there as a basis for my game.
This is the main block of code for collisions:
// Define the block behavior
mPathFinderMap = new IPathFinderMap<TMXLayer>(){
private boolean mCollide;
#Override
public boolean isBlocked(final int pX, final int pY, final TMXLayer pTMXLayer) {
/*
* This is where collisions happen and are detected
*/
mCollide = false;
//Null check. Used since not all tiles have properties
if(pTMXLayer.getTMXTile(pX, pY).getTMXTileProperties(mTiledMap) != null){
//Get tiles with collision property
if(pTMXLayer.getTMXTile(pX, pY).getTMXTileProperties(mTiledMap).containsTMXProperty("COLLISION", "true"))
mCollide = true;
}
if(mTMXmapLoader.getCollideTiles().contains(pTMXLayer.getTMXTile(pX, pY)))
mCollide = true;
return mCollide;
}
};
/*
* This method moves the sprite to the designated location
*/
public void walkTo(TMXTile pFinalPosition) {
if(mHasFinishedPath){
mHasFinishedPath = false;//This prevents overlapping paths when the user double clicks. Used to prevent stutter
//Player coordinates
final float[] lPlayerCordinates = mPlayerSprite.convertLocalToSceneCoordinates(mPlayerSprite.getWidth()/2, mPlayerSprite.getHeight()/2);
// Get the tile the center of the player are currently waking on.
TMXTile lPlayerPosition = SceneManager.mWorldScene.getTouchLayer().getTMXTileAt(lPlayerCordinates[Constants.VERTEX_INDEX_X], lPlayerCordinates[Constants.VERTEX_INDEX_Y]);
mFinalPosition = pFinalPosition;
// Sets the A* path from the player location to the touched location.
if(mPathFinderMap.isBlocked(pFinalPosition.getTileColumn(), pFinalPosition.getTileRow(), SceneManager.mWorldScene.getTouchLayer())){
pFinalPosition = getNextTile(lPlayerPosition, pFinalPosition);
}
// These are the parameters used to determine the
int lFromCol = lPlayerPosition.getTileColumn(); int lFromRow = lPlayerPosition.getTileRow();
int lToCol = pFinalPosition.getTileColumn(); int lToRow = pFinalPosition.getTileRow();
boolean lAllowDiagonal = false;
// Find the path. This needs to be refreshed
AStarPath = mAStarPathFinder.findPath(MAX_SEARCH_DEPTH, mPathFinderMap, 0, 0, mTiledMap.getTileColumns() - 1, mTiledMap.getTileRows() - 1, SceneManager.mWorldScene.getTouchLayer(),
lFromCol, lFromRow, lToCol, lToRow, lAllowDiagonal, mHeuristic, mCostCallback);
//Log.i("AstarPath", "AStarPath " + AStarPath);
//Only loads the path if the AStarPath is not null
Path lPlayerPath = loadPathFound();
//Log.i("AstarPath", "lPlayerPath " + lPlayerPath);
if(lPlayerPath != null)
moveSprite(lPlayerPath);//Moves the sprite along the path
else
mHasFinishedPath = true;//If the path is null the player has not moved. Set the flag to true allows input to effect the sprite
}else{
//Update parameters
mFinalPosition = pFinalPosition;
mWaypointIndex = 0;
}
}
/*
* Updates the path
*/
public void updatePath(TMXTile pFinalPosition) {
//Player coordinates
final float[] lPlayerCordinates = mPlayerSprite.convertLocalToSceneCoordinates(mPlayerSprite.getWidth()/2, mPlayerSprite.getHeight()/2);
// Get the tile the feet of the player are currently waking on.
TMXTile lPlayerPosition = SceneManager.mWorldScene.getTouchLayer().getTMXTileAt(lPlayerCordinates[Constants.VERTEX_INDEX_X], lPlayerCordinates[Constants.VERTEX_INDEX_Y]);
// Sets the A* path from the player location to the touched location.
if(mPathFinderMap.isBlocked(pFinalPosition.getTileColumn(), pFinalPosition.getTileRow(), SceneManager.mWorldScene.getTouchLayer())){
pFinalPosition = getNextTile(lPlayerPosition, pFinalPosition);
}
// Determine the tile locations
int FromCol = lPlayerPosition.getTileColumn();
int FromRow = lPlayerPosition.getTileRow();
int ToCol = pFinalPosition.getTileColumn();
int ToRow = pFinalPosition.getTileRow();
// Find the path. This needs to be refreshed
AStarPath = mAStarPathFinder.findPath(MAX_SEARCH_DEPTH, mPathFinderMap, 0, 0, mTiledMap.getTileColumns()-1, mTiledMap.getTileRows()-1, SceneManager.mWorldScene.getTouchLayer(),
FromCol, FromRow, ToCol, ToRow, false, mHeuristic, mCostCallback);
//Loads the path with the astar specifications
Path lPlayerPath = loadPathFound();
//Moves the sprite along the path
if(lPlayerPath != null){
moveSprite(lPlayerPath);
}else{
//If the path is still null after the path manipulation then the path is finished
mHasFinishedPath = true;
mWaypointIndex = 0;
//mPlayerSprite.stopAnimation();
//AStarPath = null;
}
}
The TMXmapLoader does the rest:
//Get the collision, ext, and changing tiles from the object sets on the map
mCollideTiles = this.getObjectGroupPropertyTiles("COLLIDE", TMXGroupObjects);
mExitTiles = this.getObjectPropertyTiles("EXIT", mTMXObjects);
mChangingTiles = this.getObjectGroupPropertyTiles("CHANGE", TMXGroupObjects);
...
public ArrayList<TMXTile> getCollideTiles(){
return mCollideTiles;
}
...
public ArrayList<TMXTile> getObjectGroupPropertyTiles(String pName, final int pLayer, ArrayList<TMXObjectGroup> pTMXObjectGroups){
ArrayList<TMXTile> ObjectTile = new ArrayList<TMXTile>();
for (final TMXObjectGroup pObjectGroups : pTMXObjectGroups) {
// Iterates through the properties and assigns them to the new variable
for (final TMXObjectGroupProperty pGroupProperties : pObjectGroups.getTMXObjectGroupProperties()) {
//Sees if any of the elements have this condition
if (pGroupProperties.getName().contains(pName)) {
for (final TMXObject pObjectTiles : pObjectGroups.getTMXObjects()) {
int ObjectX = pObjectTiles.getX();
int ObjectY = pObjectTiles.getY();
// Gets the number of rows and columns in the object
int ObjectRows = pObjectTiles.getHeight() / WorldActivity.TILE_HEIGHT;
int ObjectColumns = pObjectTiles.getWidth() / WorldActivity.TILE_WIDTH;
for (int TileRow = 0; TileRow < ObjectRows; TileRow++) {
for (int TileColumn = 0; TileColumn < ObjectColumns; TileColumn++) {
float lObjectTileX = ObjectX + TileColumn * WorldActivity.TILE_WIDTH;
float lObjectTileY = ObjectY + TileRow * WorldActivity.TILE_HEIGHT;
ObjectTile.add(mTMXTiledMap.getTMXLayers().get(pLayer).getTMXTileAt(lObjectTileX, lObjectTileY));
}
}
}
}
}
}
return ObjectTile;
}
I'm not familiar with android development, but the error seems to indicate that PhysicsFactory hasn't been imported. Maybe try adding an import statement like this to the top of your file?
import org.anddev.andengine.extension.physics.box2d.PhysicsFactory;

Android app to record sound in real time and identify frequency

I need to develop an app to record frequencies in real time using the phone's mic and then display them (in text). I am posting my code here. The FFT and complex classes have been used from http://introcs.cs.princeton.edu/java/97data/FFT.java.html and http://introcs.cs.princeton.edu/java/97data/Complex.java.html .The problem is when i run this on the emulator the frequency starts from some random value and keeps on increasing till 7996. It then repeats the whole process. Can someone plz help me out?
public class Main extends Activity {
TextView disp;
private static int[] sampleRate = new int[] { 44100, 22050, 11025, 8000 };
short audioData[];
double finalData[];
int bufferSize,srate;
String TAG;
public boolean recording;
AudioRecord recorder;
Complex[] fftArray;
float freq;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
disp = (TextView) findViewById(R.id.display);
Thread t1 = new Thread(new Runnable(){
public void run() {
Log.i(TAG,"Setting up recording");
for (int rate : sampleRate) {
try{
Log.d(TAG, "Attempting rate " + rate);
bufferSize=AudioRecord.getMinBufferSize(rate,AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT)*3; //get the buffer size to use with this audio record
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
recorder = new AudioRecord (MediaRecorder.AudioSource.MIC,rate,AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,2048); //instantiate the AudioRecorder
Log.d(TAG, "BufferSize " +bufferSize);
srate = rate;
}
} catch (Exception e) {
Log.e(TAG, rate + "Exception, keep trying.",e);
}
}
bufferSize=2048;
recording=true; //variable to use start or stop recording
audioData = new short [bufferSize]; //short array that pcm data is put into.
Log.i(TAG,"Got buffer size =" + bufferSize);
while (recording) { //loop while recording is needed
Log.i(TAG,"in while 1");
if (recorder.getState()==android.media.AudioRecord.STATE_INITIALIZED) // check to see if the recorder has initialized yet.
if (recorder.getRecordingState()==android.media.AudioRecord.RECORDSTATE_STOPPED)
recorder.startRecording(); //check to see if the Recorder has stopped or is not recording, and make it record.
else {
Log.i(TAG,"in else");
// audiorecord();
finalData=convert_to_double(audioData);
Findfft();
for(int k=0;k<fftArray.length;k++)
{
freq = ((float)srate/(float) fftArray.length) *(float)k;
runOnUiThread(new Runnable(){
public void run()
{
disp.setText("The frequency is " + freq);
if(freq>=15000)
recording = false;
}
});
}
}//else recorder started
} //while recording
if (recorder.getState()==android.media.AudioRecord.RECORDSTATE_RECORDING)
recorder.stop(); //stop the recorder before ending the thread
recorder.release(); //release the recorders resources
recorder=null; //set the recorder to be garbage collected.
}//run
});
t1.start();
}
private void Findfft() {
// TODO Auto-generated method stub
Complex[] fftTempArray = new Complex[bufferSize];
for (int i=0; i<bufferSize; i++)
{
fftTempArray[i] = new Complex(finalData[i], 0);
}
fftArray = FFT.fft(fftTempArray);
}
private double[] convert_to_double(short data[]) {
// TODO Auto-generated method stub
double[] transformed = new double[data.length];
for (int j=0;j<data.length;j++) {
transformed[j] = (double)data[j];
}
return transformed;
}
#Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
}
Your question has been succinctly answered, however, to further your objectives and complete the loop...
Yes, FFT is not optimal on limited CPUs for pitch / frequency identification. A more optimal approach is YIN described here. You may find an implementation at Tarsos.
Issues you will face are the lack of javax.sound.sampled in the ADK and therefore converting the shorts/bytes from AudioRecord to the floats required for the referenced implementations.
Your problem is right here:
Findfft();
for(int k=0;k<fftArray.length;k++) {
freq = ((float)srate/(float) fftArray.length) *(float)k;
runOnUiThread(new Runnable() {
public void run() {
disp.setText("The frequency is " + freq);
if(freq>=15000) recording = false;
}
});
}
All this for loop does is go through your array of FFT values, convert the array index to a frequency in Hz, and print it.
If you want to output what frequency you're recording, you should at least look at the data in your array - the crudest method would be to calculate the square real magnitude and find the frequency bin with the biggest.
In addition to that, I don't think the FFT algorithm you're using does any precalculations - there are others that do, and seeing as you're developing for a mobile device, you might want to take CPU usage and power use into account.
JTransforms is one library that does use precalculation to lower CPU load, and its documentation is very complete.
You may also find useful information on how to interpret the data returned from the FFT at Wikipedia - no offense, but it looks like you're not quite sure what you're doing, so I'm giving pointers.
Lastly, if you're looking to use this app for musical notes, I seem to remember lots of people saying that an FFT isn't the best way to do that, but I can't remember what is. Maybe someone else can add that bit?
i find this solution after few days - the Best for getting frequency in Hrz:
download Jtransforms and this Jar also - Jtransforms need it.
then i use this task:
public class MyRecorder extends AsyncTask<Void, short[], Void> {
int blockSize = 2048;// = 256;
private static final int RECORDER_SAMPLERATE = 8000;
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_MONO;
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
int BufferElements2Rec = 1024; // want to play 2048 (2K) since 2 bytes we use only 1024
int BytesPerElement = 2;
#Override
protected Void doInBackground(Void... params) {
try {
final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,
RECORDER_AUDIO_ENCODING, BufferElements2Rec * BytesPerElement);
if (audioRecord == null) {
return null;
}
final short[] buffer = new short[blockSize];
final double[] toTransform = new double[blockSize];
audioRecord.startRecording();
while (started) {
Thread.sleep(100);
final int bufferReadResult = audioRecord.read(buffer, 0, blockSize);
publishProgress(buffer);
}
audioRecord.stop();
audioRecord.release();
} catch (Throwable t) {
Log.e("AudioRecord", "Recording Failed");
}
return null;
}
#Override
protected void onProgressUpdate(short[]... buffer) {
super.onProgressUpdate(buffer);
float freq = calculate(RECORDER_SAMPLERATE, buffer[0]);
}
public static float calculate(int sampleRate, short [] audioData)
{
int numSamples = audioData.length;
int numCrossing = 0;
for (int p = 0; p < numSamples-1; p++)
{
if ((audioData[p] > 0 && audioData[p + 1] <= 0) ||
(audioData[p] < 0 && audioData[p + 1] >= 0))
{
numCrossing++;
}
}
float numSecondsRecorded = (float)numSamples/(float)sampleRate;
float numCycles = numCrossing/2;
float frequency = numCycles/numSecondsRecorded;
return frequency;
}

Categories

Resources