I am working on an app which uploads a large amount of data. I want to determine the transfer rate of the upload, to show in a notification.
One post suggests using the WifiInfo which will not work for mobile data.
Another post suggests getting the network type to estimate the speed.
I'm not satisfied with the answers in these posts, so I am asking again.
I've seen apps which display the upload transfer rate, as well as some custom ROMs like Resurrection Remix.
How can I determine the transfer rate of these uploads?
It is feasible to obtain the transferred traffic amount using android.net.TrafficStats. Here is an implementation of this idea which measures the up-stream and down-stream transfer rate. You can measure the rate of mobile network by passing TrafficSpeedMeasurer.TrafficType.MOBILE to the TrafficSpeedMeasurer constructor, otherwise using TrafficSpeedMeasurer.TrafficType.ALL will result in measuring general traffic (WiFi/Mobile). Also by setting SHOW_SPEED_IN_BITS = true in MainActivity you can change the unit of speed measuring to bits per second.
MainActivity.java
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.widget.TextView;
public class MainActivity extends AppCompatActivity {
private static final boolean SHOW_SPEED_IN_BITS = false;
private TrafficSpeedMeasurer mTrafficSpeedMeasurer;
private TextView mTextView;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
mTextView = findViewById(R.id.connection_class);
mTrafficSpeedMeasurer = new TrafficSpeedMeasurer(TrafficSpeedMeasurer.TrafficType.ALL);
mTrafficSpeedMeasurer.startMeasuring();
}
#Override
protected void onDestroy() {
super.onDestroy();
mTrafficSpeedMeasurer.stopMeasuring();
}
#Override
protected void onPause() {
super.onPause();
mTrafficSpeedMeasurer.removeListener(mStreamSpeedListener);
}
#Override
protected void onResume() {
super.onResume();
mTrafficSpeedMeasurer.registerListener(mStreamSpeedListener);
}
private ITrafficSpeedListener mStreamSpeedListener = new ITrafficSpeedListener() {
#Override
public void onTrafficSpeedMeasured(final double upStream, final double downStream) {
runOnUiThread(new Runnable() {
#Override
public void run() {
String upStreamSpeed = Utils.parseSpeed(upStream, SHOW_SPEED_IN_BITS);
String downStreamSpeed = Utils.parseSpeed(downStream, SHOW_SPEED_IN_BITS);
mTextView.setText("Up Stream Speed: " + upStreamSpeed + "\n" + "Down Stream Speed: " + downStreamSpeed);
}
});
}
};
}
TrafficSpeedMeasurer.java
import android.net.TrafficStats;
import android.os.Handler;
import android.os.HandlerThread;
import android.os.Looper;
import android.os.Message;
import android.os.SystemClock;
public class TrafficSpeedMeasurer {
private ITrafficSpeedListener mTrafficSpeedListener;
private SamplingHandler mHandler;
private TrafficType mTrafficType;
private long mLastTimeReading;
private long mPreviousUpStream = -1;
private long mPreviousDownStream = -1;
public TrafficSpeedMeasurer(TrafficType trafficType) {
mTrafficType = trafficType;
HandlerThread thread = new HandlerThread("ParseThread");
thread.start();
mHandler = new SamplingHandler(thread.getLooper());
}
public void registerListener(ITrafficSpeedListener iTrafficSpeedListener) {
mTrafficSpeedListener = iTrafficSpeedListener;
}
public void removeListener() {
mTrafficSpeedListener = null;
}
public void startMeasuring() {
mHandler.startSamplingThread();
mLastTimeReading = SystemClock.elapsedRealtime();
}
public void stopMeasuring() {
mHandler.stopSamplingThread();
finalReadTrafficStats();
}
private void readTrafficStats() {
long newBytesUpStream = (mTrafficType == TrafficType.MOBILE ? TrafficStats.getMobileTxBytes() : TrafficStats.getTotalTxBytes()) * 1024;
long newBytesDownStream = (mTrafficType == TrafficType.MOBILE ? TrafficStats.getMobileRxBytes() : TrafficStats.getTotalRxBytes()) * 1024;
long byteDiffUpStream = newBytesUpStream - mPreviousUpStream;
long byteDiffDownStream = newBytesDownStream - mPreviousDownStream;
synchronized (this) {
long currentTime = SystemClock.elapsedRealtime();
double bandwidthUpStream = 0;
double bandwidthDownStream = 0;
if (mPreviousUpStream >= 0) {
bandwidthUpStream = (byteDiffUpStream) * 1.0 / (currentTime - mLastTimeReading);
}
if (mPreviousDownStream >= 0) {
bandwidthDownStream = (byteDiffDownStream) * 1.0 / (currentTime - mLastTimeReading);
}
if (mTrafficSpeedListener != null) {
mTrafficSpeedListener.onTrafficSpeedMeasured(bandwidthUpStream, bandwidthDownStream);
}
mLastTimeReading = currentTime;
}
mPreviousDownStream = newBytesDownStream;
mPreviousUpStream = newBytesUpStream;
}
private void finalReadTrafficStats() {
readTrafficStats();
mPreviousUpStream = -1;
mPreviousDownStream = -1;
}
private class SamplingHandler extends Handler {
private static final long SAMPLE_TIME = 1000;
private static final int MSG_START = 1;
private SamplingHandler(Looper looper) {
super(looper);
}
#Override
public void handleMessage(Message msg) {
switch (msg.what) {
case MSG_START:
readTrafficStats();
sendEmptyMessageDelayed(MSG_START, SAMPLE_TIME);
break;
default:
throw new IllegalArgumentException("Unknown what=" + msg.what);
}
}
void startSamplingThread() {
sendEmptyMessage(SamplingHandler.MSG_START);
}
void stopSamplingThread() {
removeMessages(SamplingHandler.MSG_START);
}
}
public enum TrafficType {
MOBILE,
ALL
}
}
ITrafficSpeedListener.java
public interface ITrafficSpeedListener {
void onTrafficSpeedMeasured(double upStream, double downStream);
}
Utils.java
import java.util.Locale;
public class Utils {
private static final long B = 1;
private static final long KB = B * 1024;
private static final long MB = KB * 1024;
private static final long GB = MB * 1024;
public static String parseSpeed(double bytes, boolean inBits) {
double value = inBits ? bytes * 8 : bytes;
if (value < KB) {
return String.format(Locale.getDefault(), "%.1f " + (inBits ? "b" : "B") + "/s", value);
} else if (value < MB) {
return String.format(Locale.getDefault(), "%.1f K" + (inBits ? "b" : "B") + "/s", value / KB);
} else if (value < GB) {
return String.format(Locale.getDefault(), "%.1f M" + (inBits ? "b" : "B") + "/s", value / MB);
} else {
return String.format(Locale.getDefault(), "%.2f G" + (inBits ? "b" : "B") + "/s", value / GB);
}
}
}
.
Visual Result
What you're trying to determine is the transfer rate of the bytes being uploaded over your HTTP Client. Obviously, this depends on the HTTP client you're using.
There's no out-of-the-box solution which applies to all HTTP clients used on Android. The Android SDK does not provide any methods for you to determine the transfer rate of a particular upload.
Fortunately, you're using OKHttp and there is a relatively straight-forward way to do this. You're going to have to implement a custom RequestBody, and observe the bytes being written to the buffer when the request is in flight.
There's a 'recipe' for doing this on the OkHttp Github:
https://github.com/square/okhttp/blob/master/samples/guide/src/main/java/okhttp3/recipes/Progress.java
You could also refer to this StackOverflow question dealing with the exact same topic:
Tracking progress of multipart file upload using OKHTTP
Another here:
OKHTTP 3 Tracking Multipart upload progress
I am talking in the context of your app since this makes it easier to capture the real time speed of your uploaded data. You don't need any extra libraries or sdk api's.
You are presumably uploading the data in chunks to the server. So
a) You know the data size of each packet
b) You know the start time before sending the packet / before sending multiple packets
c) You know the end time of xy packets by the server response e.g. status 200
With that you have all parameters to calculate the upload speed
double uploadSpeed = packet.size / (endTime - startTime) // time * 1000 to have it in seconds
EDIT:
Since you are using MultiPart from OkHttp you can monitor the amount of bytes uploaded. Tracking progress of multipart file upload using OKHTTP. You would replace packet.size with the current uploaded amount and the endTime would be an interval of xy seconds.
Related
I would like to implement the library PocketSphinx in my Android project but I fail with it since nothing happens. It doesn't work and I don't get any errors.
This is how I tried it:
Added pocketsphinx-android-5prealpha-release.aar to /app/libs
Added assets.xml to /app
Aded the following to /app/build.gradle:
ant.importBuild 'assets.xml'
preBuild.dependsOn(list, checksum)
clean.dependsOn(clean_assets)
Added sync (with all sub-files) into /app/assets
Cloned the following repos into my root-directory:
git clone https://github.com/cmusphinx/sphinxbase
git clone https://github.com/cmusphinx/pocketsphinx
git clone https://github.com/cmusphinx/pocketsphinx-android
Executed gradle build
This is how my code looks like:
import android.app.Service;
import android.content.Intent;
import android.os.AsyncTask;
import android.os.IBinder;
import android.util.Log;
import androidx.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.lang.ref.WeakReference;
import java.util.HashMap;
import ch.yourclick.kitt.R;
import edu.cmu.pocketsphinx.Assets;
import edu.cmu.pocketsphinx.Hypothesis;
import edu.cmu.pocketsphinx.RecognitionListener;
import edu.cmu.pocketsphinx.SpeechRecognizer;
import edu.cmu.pocketsphinx.SpeechRecognizerSetup;
public class SttService extends Service implements RecognitionListener {
private static final String TAG = "SstService";
/* Named searches allow to quickly reconfigure the decoder */
private static final String KWS_SEARCH = "wakeup";
private static final String FORECAST_SEARCH = "forecast";
private static final String DIGITS_SEARCH = "digits";
private static final String PHONE_SEARCH = "phones";
private static final String MENU_SEARCH = "menu";
/* Keyword we are looking for to activate menu */
private static final String KEYPHRASE = "oh mighty computer";
/* Used to handle permission request */
private static final int PERMISSIONS_REQUEST_RECORD_AUDIO = 1;
private SpeechRecognizer recognizer;
private HashMap<String, Integer> captions;
public SttService() {
// Prepare the data for UI
captions = new HashMap<>();
captions.put(KWS_SEARCH, R.string.kws_caption);
captions.put(MENU_SEARCH, R.string.menu_caption);
captions.put(DIGITS_SEARCH, R.string.digits_caption);
captions.put(PHONE_SEARCH, R.string.phone_caption);
captions.put(FORECAST_SEARCH, R.string.forecast_caption);
Log.e(TAG, "SttService: Preparing the recognition");
// Recognizer initialization is a time-consuming and it involves IO,
// so we execute it in async task
new SetupTask(this).execute();
}
private static class SetupTask extends AsyncTask<Void, Void, Exception> {
WeakReference<SttService> activityReference;
SetupTask(SttService activity) {
this.activityReference = new WeakReference<>(activity);
}
#Override
protected Exception doInBackground(Void... params) {
try {
Assets assets = new Assets(activityReference.get());
File assetDir = assets.syncAssets();
activityReference.get().setupRecognizer(assetDir);
} catch (IOException e) {
return e;
}
return null;
}
#Override
protected void onPostExecute(Exception result) {
if (result != null) {
Log.e(TAG, "onPostExecute: Failed to init recognizer " + result);
} else {
activityReference.get().switchSearch(KWS_SEARCH);
}
}
}
#Override
public void onDestroy() {
super.onDestroy();
if (recognizer != null) {
recognizer.cancel();
recognizer.shutdown();
}
}
#Nullable
#Override
public IBinder onBind(Intent intent) {
return null;
}
/**
* In partial result we get quick updates about current hypothesis. In
* keyword spotting mode we can react here, in other modes we need to wait
* for final result in onResult.
*/
#Override
public void onPartialResult(Hypothesis hypothesis) {
if (hypothesis == null)
return;
String text = hypothesis.getHypstr();
if (text.equals(KEYPHRASE))
switchSearch(MENU_SEARCH);
else if (text.equals(DIGITS_SEARCH))
switchSearch(DIGITS_SEARCH);
else if (text.equals(PHONE_SEARCH))
switchSearch(PHONE_SEARCH);
else if (text.equals(FORECAST_SEARCH))
switchSearch(FORECAST_SEARCH);
else
Log.e(TAG, "onPartialResult: " + text);
}
/**
* This callback is called when we stop the recognizer.
*/
#Override
public void onResult(Hypothesis hypothesis) {
if (hypothesis != null) {
String text = hypothesis.getHypstr();
Log.e(TAG, "onResult: " + text);
}
}
#Override
public void onBeginningOfSpeech() {
}
/**
* We stop recognizer here to get a final result
*/
#Override
public void onEndOfSpeech() {
if (!recognizer.getSearchName().equals(KWS_SEARCH))
switchSearch(KWS_SEARCH);
}
private void switchSearch(String searchName) {
recognizer.stop();
// If we are not spotting, start listening with timeout (10000 ms or 10 seconds).
if (searchName.equals(KWS_SEARCH))
recognizer.startListening(searchName);
else
recognizer.startListening(searchName, 10000);
String caption = getResources().getString(captions.get(searchName));
Log.e(TAG, "switchSearch: "+ caption);
}
private void setupRecognizer(File assetsDir) throws IOException {
// The recognizer can be configured to perform multiple searches
// of different kind and switch between them
recognizer = SpeechRecognizerSetup.defaultSetup()
.setAcousticModel(new File(assetsDir, "en-us-ptm"))
.setDictionary(new File(assetsDir, "cmudict-en-us.dict"))
.setRawLogDir(assetsDir) // To disable logging of raw audio comment out this call (takes a lot of space on the device)
.getRecognizer();
recognizer.addListener(this);
/* In your application you might not need to add all those searches.
They are added here for demonstration. You can leave just one.
*/
// Create keyword-activation search.
recognizer.addKeyphraseSearch(KWS_SEARCH, KEYPHRASE);
// Create grammar-based search for selection between demos
File menuGrammar = new File(assetsDir, "menu.gram");
recognizer.addGrammarSearch(MENU_SEARCH, menuGrammar);
// Create grammar-based search for digit recognition
File digitsGrammar = new File(assetsDir, "digits.gram");
recognizer.addGrammarSearch(DIGITS_SEARCH, digitsGrammar);
// Create language model search
File languageModel = new File(assetsDir, "weather.dmp");
recognizer.addNgramSearch(FORECAST_SEARCH, languageModel);
// Phonetic search
File phoneticModel = new File(assetsDir, "en-phone.dmp");
recognizer.addAllphoneSearch(PHONE_SEARCH, phoneticModel);
}
#Override
public void onError(Exception error) {
Log.e(TAG, "onError: " + error.getMessage());
}
#Override
public void onTimeout() {
switchSearch(KWS_SEARCH);
}
}
My code is almost the same as pocketsphinx-android-demo. The only differences are that I am doing this in a service class, instead of an Activity and I am not asking the user for microphone permission since I do that in the MainActity already. Well, my code has some warnings but no errors.
When I run my app, I get this message (see the full stack trace):
E/SstService: switchSearch: To start demonstration say "oh mighty
computer".
But when I say "oh mighty computer" (or anything else), nothing happens. I don't even get an error. So I have no idea where I am stuck and what I am doing wrong.
If there is someone familiar with that library, any help will be appreciated!
I tried making a synth and it works and I can play music with them. However the first synth that I made had delay and you couldn't play fast songs. So I tried again using sourceDataline.flush() method to speed it up. Well it somewhat fixes it but delay is to much. I tried also reducing sample rate but delay is to much.
Edit: turns out you can comment the line keyStateInterface.setFlush(false);
it improves the delay however you still can't play fast songs
here is the code:
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.SourceDataLine;
public class SoundLine implements Runnable{
KeyStateInterface keyStateInterface;
public SoundLine(KeyStateInterface arg){
keyStateInterface=arg;
}
#Override
public void run() {
AudioFormat audioFormat = new AudioFormat(44100,8,1,true,false);
try {
SourceDataLine sourceDataLine = AudioSystem.getSourceDataLine(audioFormat);
sourceDataLine.open(audioFormat);
sourceDataLine.start();
SynthMain synthMain = new SynthMain();
int v = 0;
while (true) {
int bytesAvailable = sourceDataLine.available();
if (bytesAvailable > 0) {
int sampling = 256/(64);
byte[] bytes = new byte[sampling];
for (int i = 0; i < sampling; i++) {
//bytes[i] = (byte) (Math.sin(angle) * 127f);
float t = (float) (synthMain.makeSound((double)v,44100,keyStateInterface)* 127f);
bytes[i] = (byte) (t);
v += 1;
}
if(keyStateInterface.getFlush()){
sourceDataLine.flush();
}
sourceDataLine.write(bytes, 0, sampling);
//if(!keyStateInterface.isCacheKeysSame())sourceDataLine.flush();
//System.out.println(bytesWritten);
} else {
Thread.sleep(1);
}
//System.out.println(bytesAvailable);
//System.out.println();
//if((System.currentTimeMillis()-mil)%50==0)freq+=0.5;
}
}catch (Exception e){
}
}
}
public class SynthMain {
double[] noteFrequency = {
466.1637615181,
493.8833012561,
523.2511306012,
554.3652619537,
587.3295358348,
622.2539674442,
659.2551138257,
698.4564628660,
739.9888454233,
783.9908719635,
830.6093951599,
880.0000000000,
932.3275230362,
987.7666025122,
1046.5022612024,
1108.7305239075,
1174.6590716696,
1244.5079348883,
1318.5102276515,
1396.9129257320,
1479.9776908465,
1567.9817439270,
1661.2187903198,
1760.0000000000,
1864.6550460724,
1975.5332050245,
2093.0045224048,
2217.4610478150,
2349.3181433393,
2489.0158697766,
2637.0204553030,
2793.8258514640,
2959.9553816931,
3135.9634878540,
3322.4375806396,
3520.0000000000,
3729.3100921447,
};
boolean[] keys = new boolean[noteFrequency.length];
public double makeSound(double dTime,double SampleRate,KeyStateInterface keyStateInterface){
if(keyStateInterface.getSizeOfMidiKey()>0){
keyStateInterface.setFlush(true);
for(int i=0;i<keyStateInterface.getSizeOfMidiKey();i++) {
KeyRequest keyRequest = keyStateInterface.popMidiKey();
if(keyRequest.getCommand()==-112){
if(keyRequest.getVelocity()>0)keys[keyRequest.getArg1()] = true;
if(keyRequest.getVelocity()<1)keys[keyRequest.getArg1()] = false;
System.out.println(keyRequest.getVelocity());
}
}
}else{
keyStateInterface.setFlush(false);
}
//System.out.println("makeSound");
double a = 0.0;
for(int i=0;i<keys.length;i++){
if(keys[i]){
a+=Oscillate(dTime,noteFrequency[i],(int)SampleRate);
}
}
return a*0.4;
}
public double Oscillate(double dTime,double dFreq,int sampleRate){
double period = (double)sampleRate / dFreq;
return Math.sin(2.0 * Math.PI * (int)dTime / period);
}
}
import java.util.ArrayList;
import java.util.Stack;
public class KeyState implements KeyStateInterface{
boolean isFlush;
ArrayList<KeyRequest> keyRequest = new ArrayList<KeyRequest>();
ArrayList<KeyRequest> midiKeyRequest = new ArrayList<KeyRequest>();
#Override
public void pushKey(int keyCode, boolean press) {
keyRequest.add(new KeyRequest(KeyRequest.KEY,keyCode,press));
}
#Override
public void pushMidiKey(int command, int arg1, int velocity) {
midiKeyRequest.add(new KeyRequest(KeyRequest.MIDI_KEY,command,arg1,velocity));
}
#Override
public KeyRequest popKey() {
KeyRequest t = keyRequest.get(keyRequest.size());
return t;
}
#Override
public KeyRequest popMidiKey() {
KeyRequest t = midiKeyRequest.get(keyRequest.size());
midiKeyRequest.remove(keyRequest.size());
return t;
}
#Override
public int getSizeOfKey() {
return keyRequest.size();
}
#Override
public int getSizeOfMidiKey() {
return midiKeyRequest.size();
}
#Override
public boolean getFlush() {
boolean v = isFlush;
isFlush = false;
return v;
}
#Override
public void setFlush(boolean arg) {
isFlush=arg;
}
}
I haven't dug deep into your code, but perhaps the following info will be useful.
The SourceDataLine.write() method uses a blocking queue internally. It will only progress as fast as the data can be processed. So, there is no need to test for available capacity before populating and shipping bytes.
I'd give the SDL thread a priority of 10, since most of it's time is spent in a blocked state anyway.
Also, I'd leave the line open and running. I first got that advice from Neil Smith of Praxis Live. There is a cost associated with continually rebuilding it. And it looks to me like you are creating a new SDL for every 4 bytes of audio data. That would be highly inefficient. I suspect that shipping somewhere in the range of 256 to 8K on a line that is left open would be a better choice, but I don't have hard facts to back that up that opinion. Neil wrote about having all the transporting arrays be the same size (e.g., the array of data produced by the synth be the same size as the SDL write).
I've made a real-time theremin with java, where the latency includes the task of reading the mouse click and position, then sending that to the synth that is generating the audio data. I wouldn't claim thay my latency down to a precision that allows "in the pocket" starts and stops to notes, but it still is pretty good. I suspect further optimization possible on my end.
I think Neil (mentioned earlier) has had better results. He's spoken of achieving latencies in the range of 5 milliseconds and less, as far back as 2011.
I'm using google/grafika's examples to decode, transform and encode back to file a video clip. The transformation is downscaling and translating, it is done via shader stored in Texture2dProgram. My main activity is based on CameraCaptureActivity. The catch is I'm placing two videos on single texture at the same time, side by side. I would like to delay one of them for a given amount of frames. Also note that I don't need display preview while encoding.
My best idea so far was to change timestamps while advancing through frames. In TextureMovieEncoder, I'm sending information about frames, including timestamp in which they has to be placed in result video. It takes place in frameAvailiable(), where I'm sending information about two frames at once (left and right). The idea was to increase timestamp of one of them. The problem is that result video is distorted, so I don't know if my approach is feasible. TextureMovieEncoder is posted below.
package com.android.grafika;
import android.graphics.SurfaceTexture;
import android.opengl.EGLContext;
import android.opengl.GLES20;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.util.Log;
import com.android.grafika.gles.EglCore;
import com.android.grafika.gles.FullFrameRect;
import com.android.grafika.gles.Texture2dProgram;
import com.android.grafika.gles.WindowSurface;
import java.io.File;
import java.io.IOException;
import java.lang.ref.WeakReference;
/**
* Encode a movie from frames rendered from an external texture image.
* <p>
* The object wraps an encoder running on a dedicated thread. The various control messages
* may be sent from arbitrary threads (typically the app UI thread). The encoder thread
* manages both sides of the encoder (feeding and draining); the only external input is
* the GL texture.
* <p>
* The design is complicated slightly by the need to create an EGL context that shares state
* with a view that gets restarted if (say) the device orientation changes. When the view
* in question is a GLSurfaceView, we don't have full control over the EGL context creation
* on that side, so we have to bend a bit backwards here.
* <p>
* To use:
* <ul>
* <li>create TextureMovieEncoder object
* <li>create an EncoderConfig
* <li>call TextureMovieEncoder#startRecording() with the config
* <li>call TextureMovieEncoder#setTextureId() with the texture object that receives frames
* <li>for each frame, after latching it with SurfaceTexture#updateTexImage(),
* call TextureMovieEncoder#frameAvailable().
* </ul>
*
* TODOO: tweak the API (esp. textureId) so it's less awkward for simple use cases.
*/
public class TextureMovieEncoder implements Runnable {
private static final String TAG = MainActivity.TAG;
private static final boolean VERBOSE = false;
private static final long timestampCorrection = 1000000000;
private long timestampCorected;
private static final int MSG_START_RECORDING = 0;
private static final int MSG_STOP_RECORDING = 1;
private static final int MSG_FRAME_AVAILABLE = 2;
private static final int MSG_SET_TEXTURE_ID = 3;
private static final int MSG_UPDATE_SHARED_CONTEXT = 4;
private static final int MSG_QUIT = 5;
private boolean measure_started = false;
private long startTime = -1;
private int cycle = 0;
private long handleFrameTime = 0;
private long last_timestamp = -1;
private float [] transform;
private long last_orig_timestamp = -1;
public long getFrame() {
return frame;
}
private long frame = 0;
private long average_diff = 0;
private long step = 40000000;
private long actTimestamp = 0;
private boolean shouldStop = false;
public void setmSpeedCallback(SpeedControlCallback mSpeedCallback) {
this.mSpeedCallback = mSpeedCallback;
}
private SpeedControlCallback mSpeedCallback;
// ----- accessed exclusively by encoder thread -----
private WindowSurface mInputWindowSurface;
private EglCore mEglCore;
private FullFrameRect mFullScreen;
private int mTextureId;
private VideoEncoderCore mVideoEncoder;
// ----- accessed by multiple threads -----
private volatile EncoderHandler mHandler;
private Object mReadyFence = new Object(); // guards ready/running
private boolean mReady;
private boolean mRunning;
/**
* Encoder configuration.
* <p>
* Object is immutable, which means we can safely pass it between threads without
* explicit synchronization (and don't need to worry about it getting tweaked out from
* under us).
* <p>
* TODO: make frame rate and iframe interval configurable? Maybe use builder pattern
* with reasonable defaults for those and bit rate.
*/
public static class EncoderConfig {
final File mOutputFile;
final int mWidth;
final int mHeight;
final int mBitRate;
final EGLContext mEglContext;
public EncoderConfig(File outputFile, int width, int height, int bitRate,
EGLContext sharedEglContext) {
mOutputFile = outputFile;
mWidth = width;
mHeight = height;
mBitRate = bitRate;
mEglContext = sharedEglContext;
}
#Override
public String toString() {
return "EncoderConfig: " + mWidth + "x" + mHeight + " #" + mBitRate +
" to '" + mOutputFile.toString() + "' ctxt=" + mEglContext;
}
}
/**
* Tells the video recorder to start recording. (Call from non-encoder thread.)
* <p>
* Creates a new thread, which will create an encoder using the provided configuration.
* <p>
* Returns after the recorder thread has started and is ready to accept Messages. The
* encoder may not yet be fully configured.
*/
public void startRecording(EncoderConfig config) {
Log.d(TAG, "Encoder: startRecording()");
synchronized (mReadyFence) {
if (mRunning) {
Log.w(TAG, "Encoder thread already running");
return;
}
mRunning = true;
new Thread(this, "TextureMovieEncoder").start();
while (!mReady) {
try {
mReadyFence.wait();
} catch (InterruptedException ie) {
// ignore
}
}
}
mHandler.sendMessage(mHandler.obtainMessage(MSG_START_RECORDING, config));
}
/**
* Tells the video recorder to stop recording. (Call from non-encoder thread.)
* <p>
* Returns immediately; the encoder/muxer may not yet be finished creating the movie.
* <p>
* TODO: have the encoder thread invoke a callback on the UI thread just before it shuts down
* so we can provide reasonable status UI (and let the caller know that movie encoding
* has completed).
*/
public void stopRecording() {
//mHandler.sendMessage(mHandler.obtainMessage(MSG_STOP_RECORDING));
//mHandler.sendMessage(mHandler.obtainMessage(MSG_QUIT));
// We don't know when these will actually finish (or even start). We don't want to
// delay the UI thread though, so we return immediately.
shouldStop = true;
Log.d(TAG, "Shout down flag set up.");
}
/**
* Returns true if recording has been started.
*/
public boolean isRecording() {
synchronized (mReadyFence) {
return mRunning;
}
}
/**
* Tells the video recorder to refresh its EGL surface. (Call from non-encoder thread.)
*/
public void updateSharedContext(EGLContext sharedContext) {
mHandler.sendMessage(mHandler.obtainMessage(MSG_UPDATE_SHARED_CONTEXT, sharedContext));
}
/**
* Tells the video recorder that a new frame is available. (Call from non-encoder thread.)
* <p>
* This function sends a message and returns immediately. This isn't sufficient -- we
* don't want the caller to latch a new frame until we're done with this one -- but we
* can get away with it so long as the input frame rate is reasonable and the encoder
* thread doesn't stall.
* <p>
* TODO: either block here until the texture has been rendered onto the encoder surface,
* or have a separate "block if still busy" method that the caller can execute immediately
* before it calls updateTexImage(). The latter is preferred because we don't want to
* stall the caller while this thread does work.
*/
public void frameAvailable(SurfaceTexture st) {
synchronized (mReadyFence) {
if (!mReady) {
return;
}
}
transform = new float[16]; // TODOO - avoid alloc every frame
st.getTransformMatrix(transform);
long timestamp = st.getTimestamp();
// if first frame
if (last_timestamp < 0) {
if (!measure_started) {
startTime = System.currentTimeMillis();
measure_started = true;
}
last_timestamp = timestamp;
last_orig_timestamp = timestamp;
}
else {
// HARDCODED FRAME NUMBER :(
// if playback finished or frame number reached
if ((frame == 200) || shouldStop) {
if (measure_started) {
long stopTime = System.currentTimeMillis();
long elapsedTime = stopTime - startTime;
Log.d(TAG, "Rendering time: " + (double)elapsedTime * 0.001 + "[s]");
Log.d(TAG, "HandlingFrame time: " + (double)(stopTime - handleFrameTime) * 0.001 + "[s]");
measure_started = false;
}
mHandler.sendMessage(mHandler.obtainMessage(MSG_STOP_RECORDING));
mHandler.sendMessage(mHandler.obtainMessage(MSG_QUIT));
return;
}
else if (timestamp == 0) {
// Seeing this after device is toggled off/on with power button. The
// first frame back has a zero timestamp.
//
// MPEG4Writer thinks this is cause to abort() in native code, so it's very
// important that we just ignore the frame.
Log.w(TAG, "HEY: got SurfaceTexture with timestamp of zero");
return;
}
// this is workaround for duplicated timestamp
// might cause troubles with some videos
else if ((timestamp == last_orig_timestamp)) {
return;
}
else {
frame++;
mHandler.sendMessage(mHandler.obtainMessage(MSG_FRAME_AVAILABLE,
(int) (actTimestamp >> 32), (int) actTimestamp, transform));
timestampCorected = actTimestamp + timestampCorrection;
mHandler.sendMessage(mHandler.obtainMessage(MSG_FRAME_AVAILABLE,
(int) (timestampCorected >> 32), (int) timestampCorected, transform));
actTimestamp += step;
}
last_orig_timestamp = timestamp;
}
}
/**
* Calculates 'average' diffrence between frames.
* Result is based on first 50 frames.
* Shuld be called in frameAvailiable.
*
* #param timestamp actual frame timestamp
*/
private void calcAndShowAverageDiff(long timestamp) {
if ((frame < 50) && (frame > 0)) {
average_diff += timestamp - last_timestamp;
last_timestamp = timestamp;
}
if (frame == 50) {
average_diff /= frame;
Log.d(TAG, "Average timestamp difference: " + Long.toString(average_diff));
}
}
/**
* Tells the video recorder what texture name to use. This is the external texture that
* we're receiving camera previews in. (Call from non-encoder thread.)
* <p>
* TODOO: do something less clumsy
*/
public void setTextureId(int id) {
synchronized (mReadyFence) {
if (!mReady) {
return;
}
}
mHandler.sendMessage(mHandler.obtainMessage(MSG_SET_TEXTURE_ID, id, 0, null));
}
/**
* Encoder thread entry point. Establishes Looper/Handler and waits for messages.
* <p>
* #see java.lang.Thread#run()
*/
#Override
public void run() {
// Establish a Looper for this thread, and define a Handler for it.
Looper.prepare();
synchronized (mReadyFence) {
mHandler = new EncoderHandler(this);
mReady = true;
mReadyFence.notify();
}
Looper.loop();
Log.d(TAG, "Encoder thread exiting");
synchronized (mReadyFence) {
mReady = mRunning = false;
mHandler = null;
}
}
/**
* Handles encoder state change requests. The handler is created on the encoder thread.
*/
private static class EncoderHandler extends Handler {
private WeakReference<TextureMovieEncoder> mWeakEncoder;
public EncoderHandler(TextureMovieEncoder encoder) {
mWeakEncoder = new WeakReference<TextureMovieEncoder>(encoder);
}
#Override // runs on encoder thread
public void handleMessage(Message inputMessage) {
int what = inputMessage.what;
Object obj = inputMessage.obj;
TextureMovieEncoder encoder = mWeakEncoder.get();
if (encoder == null) {
Log.w(TAG, "EncoderHandler.handleMessage: encoder is null");
return;
}
switch (what) {
case MSG_START_RECORDING:
encoder.handleStartRecording((EncoderConfig) obj);
break;
case MSG_STOP_RECORDING:
encoder.handleStopRecording();
break;
case MSG_FRAME_AVAILABLE:
long timestamp = (((long) inputMessage.arg1) << 32) |
(((long) inputMessage.arg2) & 0xffffffffL);
encoder.handleFrameAvailable((float[]) obj, timestamp);
break;
case MSG_SET_TEXTURE_ID:
encoder.handleSetTexture(inputMessage.arg1);
break;
case MSG_UPDATE_SHARED_CONTEXT:
encoder.handleUpdateSharedContext((EGLContext) inputMessage.obj);
break;
case MSG_QUIT:
Looper.myLooper().quit();
break;
default:
throw new RuntimeException("Unhandled msg what=" + what);
}
}
}
/**
* Starts recording.
*/
private void handleStartRecording(EncoderConfig config) {
Log.d(TAG, "handleStartRecording " + config);
prepareEncoder(config.mEglContext, config.mWidth, config.mHeight, config.mBitRate,
config.mOutputFile);
}
/**
* Handles notification of an available frame.
* <p>
* The texture is rendered onto the encoder's input surface, along with a moving
* box (just because we can).
* <p>
* #param transform The texture transform, from SurfaceTexture.
* #param timestampNanos The frame's timestamp, from SurfaceTexture.
*/
private void handleFrameAvailable(float[] transform, long timestampNanos) {
if (VERBOSE) Log.d(TAG, "handleFrameAvailable tr=" + transform);
if (cycle == 1) {
mVideoEncoder.drainEncoder(false);
mFullScreen.drawFrame(mTextureId, transform, 1.0f);
}
else {
mFullScreen.drawFrame(mTextureId, transform, -1.0f);
}
mInputWindowSurface.setPresentationTime(timestampNanos);
mInputWindowSurface.swapBuffers();
if (cycle == 1) {
mSpeedCallback.setCanRelease(true);
cycle = 0;
} else
cycle++;
}
/**
* Handles a request to stop encoding.
*/
private void handleStopRecording() {
Log.d(TAG, "handleStopRecording");
mVideoEncoder.drainEncoder(true);
releaseEncoder();
}
/**
* Sets the texture name that SurfaceTexture will use when frames are received.
*/
private void handleSetTexture(int id) {
//Log.d(TAG, "handleSetTexture " + id);
mTextureId = id;
}
/**
* Tears down the EGL surface and context we've been using to feed the MediaCodec input
* surface, and replaces it with a new one that shares with the new context.
* <p>
* This is useful if the old context we were sharing with went away (maybe a GLSurfaceView
* that got torn down) and we need to hook up with the new one.
*/
private void handleUpdateSharedContext(EGLContext newSharedContext) {
Log.d(TAG, "handleUpdatedSharedContext " + newSharedContext);
// Release the EGLSurface and EGLContext.
mInputWindowSurface.releaseEglSurface();
mFullScreen.release(false);
mEglCore.release();
// Create a new EGLContext and recreate the window surface.
mEglCore = new EglCore(newSharedContext, EglCore.FLAG_RECORDABLE);
mInputWindowSurface.recreate(mEglCore);
mInputWindowSurface.makeCurrent();
// Create new programs and such for the new context.
mFullScreen = new FullFrameRect(
new Texture2dProgram(Texture2dProgram.ProgramType.TEXTURE_SBS));
}
private void prepareEncoder(EGLContext sharedContext, int width, int height, int bitRate,
File outputFile) {
try {
mVideoEncoder = new VideoEncoderCore(width, height, bitRate, outputFile);
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
mEglCore = new EglCore(sharedContext, EglCore.FLAG_RECORDABLE);
mInputWindowSurface = new WindowSurface(mEglCore, mVideoEncoder.getInputSurface(), true);
mInputWindowSurface.makeCurrent();
mFullScreen = new FullFrameRect(
new Texture2dProgram(Texture2dProgram.ProgramType.TEXTURE_SBS));
}
private void releaseEncoder() {
mVideoEncoder.release();
if (mInputWindowSurface != null) {
mInputWindowSurface.release();
mInputWindowSurface = null;
}
if (mFullScreen != null) {
mFullScreen.release(false);
mFullScreen = null;
}
if (mEglCore != null) {
mEglCore.release();
mEglCore = null;
}
}
/**
* Draws a box, with position offset.
*/
private void drawBox(int posn) {
final int width = mInputWindowSurface.getWidth();
int xpos = (posn * 4) % (width - 50);
GLES20.glEnable(GLES20.GL_SCISSOR_TEST);
GLES20.glScissor(xpos, 0, 100, 100);
GLES20.glClearColor(1.0f, 0.0f, 1.0f, 1.0f);
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
GLES20.glDisable(GLES20.GL_SCISSOR_TEST);
}
}
Is my idea viable? Or is there better/correct way to delay one of the videos?
It appears that my first idea with switching timestamps was invalid.
Following Fadden's suggestion, I've succesfuly created delay by using two decoders. I've modified code of grafika's MoviePlayer so it contains two pairs of extractor-decoder. Decoders has separate output textures. Extracting loops are running in separate threads. I thought that this approach will cause some heavy drop in performance, but it appears that it didn't happen, performance is still acceptable for my needs.
I'am doing simple libgdx game. I have lag (game stop for 0. 5 sec) when i use sound.play()
edit this bug apear on android 4.0 on 2.3 everything is running fine.
method. I play sound by this code:
if(CollisionDetector.detect(touchArea, hoodie.getTouchArea())){
GameScreen.totalScore++;
setPosition();
System.out.println("played");
Assets.eatSound.play();
}
And i use this method to load sound:
static long waitForLoadCompleted(Sound sound,float volume) {
long id;
while ((id = sound.play(volume)) == -1) {
long t = TimeUtils.nanoTime();
while (TimeUtils.nanoTime() - t < 100000000);
}
return id;
}
What am i doing wrong? Or what can i do to fix this lag ?
edit:
I have just tried to do thread with sound.play() but it also doesn't work:
new Thread(new Runnable() {
#Override
public void run() {
// do something important here, asynchronously to the rendering thread
// post a Runnable to the rendering thread that processes the result
Gdx.app.postRunnable(new Runnable() {
#Override
public void run() {
// process the result, e.g. add it to an Array<Result> field of the ApplicationListener.
eatSound2.play();
}
});
}
}).start();
My Sound asset class looks like this but i still have lag with sound.
package com.redHoodie;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.audio.Sound;
import com.badlogic.gdx.utils.Disposable;
public class SoundEffect implements Disposable {
private static final int WaitLimit = 1000;
private static final int ThrottleMs = 100;
Sound eatSound;
Sound endSound;
public SoundEffect(){
eatSound = Gdx.audio.newSound(Gdx.files.internal("eatSound.ogg"));
endSound = Gdx.audio.newSound(Gdx.files.internal("sadend.wav"));
checkedPlay(eatSound);
}
protected long checkedPlay (Sound sound) {
return checkedPlay(sound, 1);
}
protected long checkedLoop (Sound sound) {
return checkedLoop(sound, 1);
}
protected long checkedPlay (Sound sound, float volume) {
int waitCounter = 0;
long soundId = 0;
boolean ready = false;
while (!ready && waitCounter < WaitLimit) {
soundId = sound.play(volume);
ready = (soundId != 0);
waitCounter++;
try {
Thread.sleep(ThrottleMs);
} catch (InterruptedException e) {
}
}
return soundId;
}
protected long checkedLoop (Sound sound, float volume) {
int waitCounter = 0;
long soundId = 0;
boolean ready = false;
while (!ready && waitCounter < WaitLimit) {
soundId = sound.loop(volume);
ready = (soundId != 0);
waitCounter++;
try {
Thread.sleep(ThrottleMs);
} catch (InterruptedException e) {
}
}
return soundId;
}
#Override
public void dispose() {
// TODO Auto-generated method stub
}
}
I had the same problem. It was because my .mp3 file was too short. Mine was 0.167 seconds long. I added 1.2 seconds of silence with Audacity, and it fixed the problem.
Lately I run into the same issue (except I'm using wav instead mp3 files). My app was lagging when I play many (like 10 or 20) sounds at the same time (same render method). "Solved" this by playing only 1 sound at the time. Generally it's hard to distinct many sounds at the same time. Also on desktop it works fine, but problem appears on android (9 or 8).
If someone still facing this issue as me there is the alternative solution with one limitation: no option to use sound id.
You can change default LibGDX behavior and use AsynchronousAndroidAudio by overriding this method in your AndroidLauncher class:
#Override
public AndroidAudio createAudio(Context context, AndroidApplicationConfiguration config) {
return new AsynchronousAndroidAudio(context, config);
}
See the official documentation for more info and also the pull request
Also, if for any reasons you need sound id you can take this implementation as an example and find a workaround for your project.
Fix is available starting from LibGDX 1.9.12
I have to upload data to a server. I am using a service that is running on the same process as my application. Should I use a Separate thread for upload process or Should I use a AsyncTask to upload data to server ?
More specifically can I use AsyncTask inside a service class ? And should I use it ? This service should always be running in memory in order to send data to the server every 5 seconds.
No problem to use AsyncTask in a service.
NOTE / FIX : I was wrong when I said the service runs in background, it only applis to IntentService. As noted in the comments and in the documentation, a service does not create it's own thread :
Caution: A service runs in the main thread of its hosting process—the service does not create its own thread and does not run in a separate process (unless you specify otherwise). This means that, if your service is going to do any CPU intensive work or blocking operations (such as MP3 playback or networking), you should create a new thread within the service to do that work.
That means you must use an AsyncTask (or another thread in any case) to perform your upload task.
Yes you can, the below code will run every 5 seconds. Use your regular connection code for sending part.
public class AsyncTaskInServiceService extends Service {
public AsyncTaskInServiceService() {
super("AsyncTaskInServiceService ");
}
#Override
public void onCreate() {
// TODO Auto-generated method stub
super.onCreate();
}
#Override
protected void onHandleIntent(Intent intent) {
final Timer t = new Timer();
t.scheduleAtFixedRate(new TimerTask() {
#Override
public void run() {
//Connect to database here
try {
} catch (JSONException e) {
e.printStackTrace();
}
}
}, 0, 5000);
}
}
Use AsyncTask in a service in android
package com.emergingandroidtech.Services;
import android.app.Service;
import android.content.Intent;
import android.os.IBinder;
import android.util.Log;
import android.widget.Toast;
import java.net.MalformedURLException;
import java.net.URL;
import android.os.AsyncTask;
public class MyService extends Service
{
#Override
public IBinder onBind(Intent arg0)
{
return null;
}
#Override
public int onStartCommand(Intent intent,int flags,int startId)
{
//We want this service to continue running until it is explicitly
//stopped,so return sticky.
Toast.makeText(this,“ServiceStarted”,Toast.LENGTH_LONG).show();
try
{
new DoBackgroundTask().execute(
new URL(“http://www.google.com/somefiles.pdf”),
new URL(“http://emergingandroidtech.blogspot.in”));
}
catch (MalformedURLException e)
{
e.printStackTrace();
}
return START_STICKY;
}
#Override
public void onDestroy()
{
super.onDestroy();
Toast.makeText(this,“ServiceDestroyed”,Toast.LENGTH_LONG).show();
}
private int DownloadFile(URL url)
{
try
{
//---simulate taking sometime to download a file---
Thread.sleep(5000);
}
catch(InterruptedException e)
{
e.printStackTrace();
}
//---return an arbitrary number representing
//the size of the file downloaded---
return 100;
}
private class DoBackgroundTask extends AsyncTask<URL, Integer, Long>
{
protected Long doInBackground(URL... urls)
{
int count = urls.length;
long totalBytesDownloaded = 0;
for (int i = 0; i < count; i++)
{
totalBytesDownloaded += DownloadFile(urls[i]);
//---calculate percentage downloaded and
// report its progress---
publishProgress((int) (((i+1) / (float) count) * 100));
}
return totalBytesDownloaded;
}
protected void onProgressUpdate(Integer... progress)
{
Log.d(“Downloading files”, String.valueOf(progress[0]) + “% downloaded”);
Toast.makeText(getBaseContext(), String.valueOf(progress[0]) + “% downloaded”, Toast.LENGTH_LONG).show();
}
protected void onPostExecute(Long result)
{
Toast.makeText(getBaseContext(), “Downloaded “ + result + “ bytes”, Toast.LENGTH_LONG).show();
stopSelf();
}
}
}
Try this it may be work.
Thank you.