I have a Java application that I want to turn into an executable jar. I am using JMF in this application, and I can't seem to get the sound files working right...
I create the jar using
jar cvfm jarname.jar manifest.txt *.class *.gif *.wav
So, all the sound files get put inside the jar, and in the code, I am creating the Players using
Player player = Manager.createPlayer(ClassName.class.getResource("song1.wav"));
The jar is on my desktop, and when I attempt to run it, this exception occurs:
javax.media.NoPlayerException: Cannot find a Player for :jar:file:/C:/Users/Pojo/
Desktop/jarname.jar!/song1.wav
...It's not getting IOExceptions, so it seems to at least be finding the file itself all right.
Also, before I used the getResource, I used to have it like this:
Player player = Manager.createPlayer(new File("song1.wav").toURL());
and it was playing fine, so I know nothing is wrong with the sound file itself.
The reason I am trying to switch to this method instead of the File method is so that the sound files can be packaged inside the jar itself and not have to be its siblings in a directory.
This is a far cry from production code, but this seems resolved any runtime exceptions (though it's not actually wired up to play anything yet):
import javax.media.Manager;
import javax.media.Player;
import javax.media.protocol.URLDataSource;
// ...
URL url = JmfTest.class.getResource("song1.wav");
System.out.println("url: " + url);
URLDataSource uds = new URLDataSource(url);
uds.connect();
Player player = Manager.createPlayer(uds);
New solution:
First, a custom DataSource class that returns a SourceStream that implements Seekable is needed:
package com.ziesemer.test;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.net.JarURLConnection;
import java.net.URL;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import javax.media.Duration;
import javax.media.MediaLocator;
import javax.media.Time;
import javax.media.protocol.ContentDescriptor;
import javax.media.protocol.PullDataSource;
import javax.media.protocol.PullSourceStream;
import javax.media.protocol.Seekable;
/**
* #author Mark A. Ziesemer
* <www.ziesemer.com>
*/
public class JarDataSource extends PullDataSource{
protected JarURLConnection conn;
protected ContentDescriptor contentType;
protected JarPullSourceStream[] sources;
protected boolean connected;
public JarDataSource(URL url) throws IOException{
setLocator(new MediaLocator(url));
connected = false;
}
#Override
public PullSourceStream[] getStreams(){
return sources;
}
#Override
public void connect() throws IOException{
conn = (JarURLConnection)getLocator().getURL().openConnection();
conn.connect();
connected = true;
JarFile jf = conn.getJarFile();
JarEntry je = jf.getJarEntry(conn.getEntryName());
String mimeType = conn.getContentType();
if(mimeType == null){
mimeType = ContentDescriptor.CONTENT_UNKNOWN;
}
contentType = new ContentDescriptor(ContentDescriptor.mimeTypeToPackageName(mimeType));
sources = new JarPullSourceStream[1];
sources[0] = new JarPullSourceStream(jf, je, contentType);
}
#Override
public String getContentType(){
return contentType.getContentType();
}
#Override
public void disconnect(){
if(connected){
try{
sources[0].close();
}catch(IOException e){
e.printStackTrace();
}
connected = false;
}
}
#Override
public void start() throws IOException{
// Nothing to do.
}
#Override
public void stop() throws IOException{
// Nothing to do.
}
#Override
public Time getDuration(){
return Duration.DURATION_UNKNOWN;
}
#Override
public Object[] getControls(){
return new Object[0];
}
#Override
public Object getControl(String controlName){
return null;
}
protected class JarPullSourceStream implements PullSourceStream, Seekable, Closeable{
protected final JarFile jarFile;
protected final JarEntry jarEntry;
protected final ContentDescriptor type;
protected InputStream stream;
protected long position;
public JarPullSourceStream(JarFile jarFile, JarEntry jarEntry, ContentDescriptor type) throws IOException{
this.jarFile = jarFile;
this.jarEntry = jarEntry;
this.type = type;
this.stream = jarFile.getInputStream(jarEntry);
}
#Override
public ContentDescriptor getContentDescriptor(){
return type;
}
#Override
public long getContentLength(){
return jarEntry.getSize();
}
#Override
public boolean endOfStream(){
return position < getContentLength();
}
#Override
public Object[] getControls(){
return new Object[0];
}
#Override
public Object getControl(String controlType){
return null;
}
#Override
public boolean willReadBlock(){
if(endOfStream()){
return true;
}
try{
return stream.available() == 0;
}catch(IOException e){
return true;
}
}
#Override
public int read(byte[] buffer, int offset, int length) throws IOException{
int read = stream.read(buffer, offset, length);
position += read;
return read;
}
#Override
public long seek(long where){
try{
if(where < position){
stream.close();
stream = jarFile.getInputStream(jarEntry);
position = 0;
}
long skip = where - position;
while(skip > 0){
long skipped = stream.skip(skip);
skip -= skipped;
position += skipped;
}
}catch(IOException ioe){
// Made a best effort.
ioe.printStackTrace();
}
return position;
}
#Override
public long tell(){
return position;
}
#Override
public boolean isRandomAccess(){
return true;
}
#Override
public void close() throws IOException{
try{
stream.close();
}finally{
jarFile.close();
}
}
}
}
Then, the above custom data source is used to create a player, and a ControllerListener is added to cause the player to loop:
package com.ziesemer.test;
import java.net.URL;
import javax.media.ControllerEvent;
import javax.media.ControllerListener;
import javax.media.EndOfMediaEvent;
import javax.media.Manager;
import javax.media.Player;
import javax.media.Time;
/**
* #author Mark A. Ziesemer
* <www.ziesemer.com>
*/
public class JmfTest{
public static void main(String[] args) throws Exception{
URL url = JmfTest.class.getResource("Test.wav");
JarDataSource jds = new JarDataSource(url);
jds.connect();
final Player player = Manager.createPlayer(jds);
player.addControllerListener(new ControllerListener(){
#Override
public void controllerUpdate(ControllerEvent ce){
if(ce instanceof EndOfMediaEvent){
player.setMediaTime(new Time(0));
player.start();
}
}
});
player.start();
}
}
Note that without the custom data source, JMF tries repeatedly to seek back to the beginning - but fails, and eventually gives up. This can be seen from debugging the same ControllerListener, which will receive a several events for each attempt.
Or, using the MediaPlayer approach to loop (that you mentioned on my previous answer):
package com.ziesemer.test;
import java.net.URL;
import javax.media.Manager;
import javax.media.Player;
import javax.media.bean.playerbean.MediaPlayer;
/**
* #author Mark A. Ziesemer
* <www.ziesemer.com>
*/
public class JmfTest{
public static void main(String[] args) throws Exception{
URL url = JmfTest.class.getResource("Test.wav");
JarDataSource jds = new JarDataSource(url);
jds.connect();
final Player player = Manager.createPlayer(jds);
MediaPlayer mp = new MediaPlayer();
mp.setPlayer(player);
mp.setPlaybackLoop(true);
mp.start();
}
}
Again, I would not consider this production-ready code (could use some more Javadocs and logging, etc.), but it is tested and working (Java 1.6), and should meet your needs nicely.
Merry Christmas, and happy holidays!
Manager.createPlayer(this.getClass().getResource("/song1.wav"));
That will work if the song1.wav is in the root of a Jar that is on the run-time class-path of the application.
Related
Unable to use StreamingFileSink and store incoming events in compressed fashion.
I am trying to use StreamingFileSink to write unbounded event stream to S3. In the process, I would like to compress the data to make better use of storage size available.
I wrote a compressed string writer, by borrowing some code from SequenceFileWriterFactory from flink. It fails with the exception I described below.
If I try to use BucketingSink, it works great.
Using BucketingSink, I approached compressed string write as below. Again, I borrowed this code from some other pull request.
import org.apache.flink.streaming.connectors.fs.StreamWriterBase;
import org.apache.flink.streaming.connectors.fs.Writer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import java.io.IOException;
public class CompressionStringWriter<T> extends StreamWriterBase<T> implements Writer<T> {
private static final long serialVersionUID = 3231207311080446279L;
private String codecName;
private String separator;
public String getCodecName() {
return codecName;
}
public String getSeparator() {
return separator;
}
private transient CompressionOutputStream compressedOutputStream;
public CompressionStringWriter(String codecName, String separator) {
this.codecName = codecName;
this.separator = separator;
}
public CompressionStringWriter(String codecName) {
this(codecName, System.lineSeparator());
}
protected CompressionStringWriter(CompressionStringWriter<T> other) {
super(other);
this.codecName = other.codecName;
this.separator = other.separator;
}
#Override
public void open(FileSystem fs, Path path) throws IOException {
super.open(fs, path);
Configuration conf = fs.getConf();
CompressionCodecFactory codecFactory = new CompressionCodecFactory(conf);
CompressionCodec codec = codecFactory.getCodecByName(codecName);
if (codec == null) {
throw new RuntimeException("Codec " + codecName + " not found");
}
Compressor compressor = CodecPool.getCompressor(codec, conf);
compressedOutputStream = codec.createOutputStream(getStream(), compressor);
}
#Override
public void close() throws IOException {
if (compressedOutputStream != null) {
compressedOutputStream.close();
compressedOutputStream = null;
} else {
super.close();
}
}
#Override
public void write(Object element) throws IOException {
getStream();
compressedOutputStream.write(element.toString().getBytes());
compressedOutputStream.write(this.separator.getBytes());
}
#Override
public CompressionStringWriter<T> duplicate() {
return new CompressionStringWriter<>(this);
}
}
BucketingSink<DeviceEvent> bucketingSink = new BucketingSink<>("s3://"+ this.bucketName + "/" + this.objectPrefix);
bucketingSink
.setBucketer(new OrgIdBasedBucketAssigner())
.setWriter(new CompressionStringWriter<DeviceEvent>("Gzip", "\n"))
.setPartPrefix("file-")
.setPartSuffix(".gz")
.setBatchSize(1_500_000);
The one with BucketingSink works.
Now my code snippets using StreamingFileSink involves the below set of code.
import org.apache.flink.api.common.serialization.BulkWriter;
import java.io.IOException;
public class CompressedStringBulkWriter<T> implements BulkWriter<T> {
private final CompressedStringWriter compressedStringWriter;
public CompressedStringBulkWriter(final CompressedStringWriter compressedStringWriter) {
this.compressedStringWriter = compressedStringWriter;
}
#Override
public void addElement(T element) throws IOException {
this.compressedStringWriter.write(element);
}
#Override
public void flush() throws IOException {
this.compressedStringWriter.flush();
}
#Override
public void finish() throws IOException {
this.compressedStringWriter.close();
}
}
import org.apache.flink.api.common.serialization.BulkWriter;
import org.apache.flink.core.fs.FSDataOutputStream;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
public class CompressedStringBulkWriterFactory<T> implements BulkWriter.Factory<T> {
private SerializableHadoopConfiguration serializableHadoopConfiguration;
public CompressedStringBulkWriterFactory(final Configuration hadoopConfiguration) {
this.serializableHadoopConfiguration = new SerializableHadoopConfiguration(hadoopConfiguration);
}
#Override
public BulkWriter<T> create(FSDataOutputStream out) throws IOException {
return new CompressedStringBulkWriter(new CompressedStringWriter(out, serializableHadoopConfiguration.get(), "Gzip", "\n"));
}
}
import org.apache.flink.core.fs.FSDataOutputStream;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.fs.hdfs.HadoopFileSystem;
import org.apache.flink.util.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.Serializable;
public class CompressedStringWriter<T> implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(CompressedStringWriter.class);
private static final long serialVersionUID = 2115292142239557448L;
private String separator;
private transient CompressionOutputStream compressedOutputStream;
public CompressedStringWriter(FSDataOutputStream out, Configuration hadoopConfiguration, String codecName, String separator) {
this.separator = separator;
try {
Preconditions.checkNotNull(hadoopConfiguration, "Unable to determine hadoop configuration using path");
CompressionCodecFactory codecFactory = new CompressionCodecFactory(hadoopConfiguration);
CompressionCodec codec = codecFactory.getCodecByName(codecName);
Preconditions.checkNotNull(codec, "Codec " + codecName + " not found");
LOG.info("The codec name that was loaded from hadoop {}", codec);
Compressor compressor = CodecPool.getCompressor(codec, hadoopConfiguration);
this.compressedOutputStream = codec.createOutputStream(out, compressor);
LOG.info("Setup a compressor for codec {} and compressor {}", codec, compressor);
} catch (IOException ex) {
throw new RuntimeException("Unable to compose a hadoop compressor for the path", ex);
}
}
public void flush() throws IOException {
if (compressedOutputStream != null) {
compressedOutputStream.flush();
}
}
public void close() throws IOException {
if (compressedOutputStream != null) {
compressedOutputStream.close();
compressedOutputStream = null;
}
}
public void write(T element) throws IOException {
compressedOutputStream.write(element.toString().getBytes());
compressedOutputStream.write(this.separator.getBytes());
}
}
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
public class SerializableHadoopConfiguration implements Serializable {
private static final long serialVersionUID = -1960900291123078166L;
private transient Configuration hadoopConfig;
SerializableHadoopConfiguration(Configuration hadoopConfig) {
this.hadoopConfig = hadoopConfig;
}
Configuration get() {
return this.hadoopConfig;
}
// --------------------
private void writeObject(ObjectOutputStream out) throws IOException {
this.hadoopConfig.write(out);
}
private void readObject(ObjectInputStream in) throws IOException {
final Configuration config = new Configuration();
config.readFields(in);
if (this.hadoopConfig == null) {
this.hadoopConfig = config;
}
}
}
My actual flink job
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Properties kinesisConsumerConfig = new Properties();
...
...
DataStream<DeviceEvent> kinesis =
env.addSource(new FlinkKinesisConsumer<>(this.streamName, new DeviceEventSchema(), kinesisConsumerConfig)).name("source")
.setParallelism(16)
.setMaxParallelism(24);
final StreamingFileSink<DeviceEvent> bulkCompressStreamingFileSink = StreamingFileSink.<DeviceEvent>forBulkFormat(
path,
new CompressedStringBulkWriterFactory<>(
BucketingSink.createHadoopFileSystem(
new Path("s3a://"+ this.bucketName + "/" + this.objectPrefix),
null).getConf()))
.withBucketAssigner(new OrgIdBucketAssigner())
.build();
deviceEventDataStream.addSink(bulkCompressStreamingFileSink).name("bulkCompressStreamingFileSink").setParallelism(16);
env.execute();
I expect data to be saved in S3 as multiple files. Unfortunately no files are being created.
In the logs, I see below exception
2019-05-15 22:17:20,855 INFO org.apache.flink.runtime.taskmanager.Task - Sink: bulkCompressStreamingFileSink (11/16) (c73684c10bb799a6e0217b6795571e22) switched from RUNNING to FAILED.
java.lang.Exception: Could not perform checkpoint 1 for operator Sink: bulkCompressStreamingFileSink (11/16).
at org.apache.flink.streaming.runtime.tasks.StreamTask.triggerCheckpointOnBarrier(StreamTask.java:595)
at org.apache.flink.streaming.runtime.io.BarrierBuffer.notifyCheckpoint(BarrierBuffer.java:396)
at org.apache.flink.streaming.runtime.io.BarrierBuffer.processBarrier(BarrierBuffer.java:292)
at org.apache.flink.streaming.runtime.io.BarrierBuffer.getNextNonBlocked(BarrierBuffer.java:200)
at org.apache.flink.streaming.runtime.io.StreamInputProcessor.processInput(StreamInputProcessor.java:209)
at org.apache.flink.streaming.runtime.tasks.OneInputStreamTask.run(OneInputStreamTask.java:105)
at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:300)
at org.apache.flink.runtime.taskmanager.Task.run(Task.java:711)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.Exception: Could not complete snapshot 1 for operator Sink: bulkCompressStreamingFileSink (11/16).
at org.apache.flink.streaming.api.operators.AbstractStreamOperator.snapshotState(AbstractStreamOperator.java:422)
at org.apache.flink.streaming.runtime.tasks.StreamTask$CheckpointingOperation.checkpointStreamOperator(StreamTask.java:1113)
at org.apache.flink.streaming.runtime.tasks.StreamTask$CheckpointingOperation.executeCheckpointing(StreamTask.java:1055)
at org.apache.flink.streaming.runtime.tasks.StreamTask.checkpointState(StreamTask.java:729)
at org.apache.flink.streaming.runtime.tasks.StreamTask.performCheckpoint(StreamTask.java:641)
at org.apache.flink.streaming.runtime.tasks.StreamTask.triggerCheckpointOnBarrier(StreamTask.java:586)
... 8 more
Caused by: java.io.IOException: Stream closed.
at org.apache.flink.fs.s3.common.utils.RefCountedFile.requireOpened(RefCountedFile.java:117)
at org.apache.flink.fs.s3.common.utils.RefCountedFile.write(RefCountedFile.java:74)
at org.apache.flink.fs.s3.common.utils.RefCountedBufferingFileStream.flush(RefCountedBufferingFileStream.java:105)
at org.apache.flink.fs.s3.common.writer.S3RecoverableFsDataOutputStream.closeAndUploadPart(S3RecoverableFsDataOutputStream.java:199)
at org.apache.flink.fs.s3.common.writer.S3RecoverableFsDataOutputStream.closeForCommit(S3RecoverableFsDataOutputStream.java:166)
at org.apache.flink.streaming.api.functions.sink.filesystem.PartFileWriter.closeForCommit(PartFileWriter.java:71)
at org.apache.flink.streaming.api.functions.sink.filesystem.BulkPartWriter.closeForCommit(BulkPartWriter.java:63)
at org.apache.flink.streaming.api.functions.sink.filesystem.Bucket.closePartFile(Bucket.java:239)
at org.apache.flink.streaming.api.functions.sink.filesystem.Bucket.prepareBucketForCheckpointing(Bucket.java:280)
at org.apache.flink.streaming.api.functions.sink.filesystem.Bucket.onReceptionOfCheckpoint(Bucket.java:253)
at org.apache.flink.streaming.api.functions.sink.filesystem.Buckets.snapshotActiveBuckets(Buckets.java:244)
at org.apache.flink.streaming.api.functions.sink.filesystem.Buckets.snapshotState(Buckets.java:235)
at org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.snapshotState(StreamingFileSink.java:347)
at org.apache.flink.streaming.util.functions.StreamingFunctionUtils.trySnapshotFunctionState(StreamingFunctionUtils.java:118)
at org.apache.flink.streaming.util.functions.StreamingFunctionUtils.snapshotFunctionState(StreamingFunctionUtils.java:99)
at org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator.snapshotState(AbstractUdfStreamOperator.java:90)
at org.apache.flink.streaming.api.operators.AbstractStreamOperator.snapshotState(AbstractStreamOperator.java:395)
So wondering, what am I missing.
I am using AWS EMR latest (5.23).
In CompressedStringBulkWriter#close(), you are calling the close() method on the CompressionCodecStream which also closes the underlying the stream i.e. Flink's FSDataOutputStream. It has to be opened for the checkpointing to be done properly by Flink's internal to guarantee recoverable stream. That is why you are getting
Caused by: java.io.IOException: Stream closed.
at org.apache.flink.fs.s3.common.utils.RefCountedFile.requireOpened(RefCountedFile.java:117)
at org.apache.flink.fs.s3.common.utils.RefCountedFile.write(RefCountedFile.java:74)
at org.apache.flink.fs.s3.common.utils.RefCountedBufferingFileStream.flush(RefCountedBufferingFileStream.java:105)
at org.apache.flink.fs.s3.common.writer.S3RecoverableFsDataOutputStream.closeAndUploadPart(S3RecoverableFsDataOutputStream.java:199)
at org.apache.flink.fs.s3.common.writer.S3RecoverableFsDataOutputStream.closeForCommit(S3RecoverableFsDataOutputStream.java:166)
So instead of compressedOutputStream.close(), use compressedOutputStream.finish() which just flushes everything that's in buffer to the outputstream without closing it. BTW, there is an inbuilt HadoopCompressionBulkWriter made available in the latest version Flink, you can also use that.
I tried to create the Xtext calculator DSL as guided in the README in this repo. It is an Xtext DSL with a language server and LSP4E implementation, and has 2 sub projects named:
I downloaded the repo and opened it in the Eclipse IDE. Under the main project, there are 2 sub-projects named: org.xtext.calc.parent (which is the xtext project) and org.xtext.calc.lsp4e (the lsp4e implementation project).
In the org.xtext.calc.lsp4e project's src folder there are 3 Java files named: Activator, CalculatorLanguageServer, SocketStreamConnectionProvider. In the latter two, I get an error which I cannot resolve.
Below are the two files: -
1.) CalculatorLanguageServer.java
package org.xtext.calc.lsp4e;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.List;
import org.eclipse.core.runtime.FileLocator;
import org.eclipse.core.runtime.Platform;
import org.eclipse.lsp4e.server.ProcessStreamConnectionProvider;
import org.eclipse.lsp4e.server.StreamConnectionProvider;
import org.eclipse.lsp4j.jsonrpc.messages.Message;
import org.eclipse.lsp4j.services.LanguageServer;
import org.osgi.framework.Bundle;
public class CalculatorLanguageServer implements StreamConnectionProvider {
private final static boolean SOCKET_MODE = true;
private StreamConnectionProvider delegate;
public CalculatorLanguageServer() {
if (SOCKET_MODE) {
this.delegate = new SocketStreamConnectionProvider(5007) {
};
} else {
List<String> commands = new ArrayList<>();
commands.add("java");
commands.add("-Xdebug");
commands.add("-Xrunjdwp:server=y,transport=dt_socket,address=4001,suspend=n,quiet=y");
commands.add("-jar");
Bundle bundle = Activator.getDefault().getBundle();
URL resource = bundle.getResource("/language-server/calculator-language-server-jar");
try {
commands.add(new File(FileLocator.resolve(resource).toURI()).getAbsolutePath());
} catch (Exception e) {
throw new IllegalStateException(e);
}
this.delegate = new ProcessStreamConnectionProvider(commands, Platform.getLocation().toOSString()) {};
}
}
public void start() throws IOException {
delegate.start();
}
public InputStream getInputStream() {
return delegate.getInputStream();
}
public OutputStream getOutputStream() {
return delegate.getOutputStream();
}
public Object getInitializationOptions(URI rootUri) {
return delegate.getInitializationOptions(rootUri);
}
public void stop() {
delegate.stop();
}
public void handleMessage(Message message, LanguageServer languageServer, URI rootURI) {
delegate.handleMessage(message, languageServer, rootURI);
}
#Override
public String toString() {
return "Calculator Language Server: " + super.toString();
}
}
2.) SocketStreamConnectionProvider.java
package org.xtext.calc.lsp4e;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
import org.eclipse.lsp4e.server.StreamConnectionProvider;
public class SocketStreamConnectionProvider implements StreamConnectionProvider {
private int port;
private Socket socket;
private InputStream inputStream;
private OutputStream outputStream;
public SocketStreamConnectionProvider(int port) {
this.port = port;
}
#Override
public void start() throws IOException {
this.socket = new Socket("localhost", port);
inputStream = new BufferedInputStream(socket.getInputStream());
outputStream = new BufferedOutputStream(socket.getOutputStream());
}
#Override
public InputStream getInputStream() {
return inputStream;
}
#Override
public OutputStream getOutputStream() {
return outputStream;
}
#Override
public void stop() {
if (socket != null) {
try {
socket.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
In both these files, I get an error in the class names:
1.) The type CalculatorLanguageServer must implement the inherited abstract method StreamConnectionProvider.getErrorStream()
2.) The type SocketStreamConnectionProvider must implement the inherited abstract method StreamConnectionProvider.getErrorStream()
How to resolve these errors?
Thanks!
I need some help, I'm making a program like a file manager. In my program I need to make simultaneous files copies. For that I use SwingWorker to see the progress of the copies in a JProgressbar, but I need to know how to add more files to Copy in the task with the same destination.
This is my class that extends from Swingworker in my principal program I´ll select some files or folders to copy in one destination. What I need is while the Copytask is working I can to add more files to the Copyitem Arraylist.
Please help and sorry about my english.
import java.awt.Dimension;
import java.awt.Toolkit;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.List;
import javax.swing.JDialog;
import javax.swing.JOptionPane;
import javax.swing.JProgressBar;
import javax.swing.SwingWorker;
import xray.XRAYView;
public class CopyTask extends SwingWorker<Void, Integer>
{
ArrayList<CopyItem>copia;
private long totalBytes = 0L;
private long copiedBytes = 0L;
JProgressBar progressAll;
JProgressBar progressCurrent;
boolean override=true;
boolean overrideall=false;
public CopyTask(ArrayList<CopyItem>copia,JProgressBar progressAll,JProgressBar progressCurrent)
{
this.copia=copia;
this.progressAll=progressAll;
this.progressCurrent=progressCurrent;
progressAll.setValue(0);
progressCurrent.setValue(0);
totalBytes=retrieveTotalBytes(copia);
}
public void AgregarCopia(ArrayList<CopyItem>addcopia)throws Exception{
copia.addAll(copia.size(), addcopia);
totalBytes=retrieveTotalBytes(addcopia)+totalBytes;
System.out.println("AL AGREGAR: "+copia.size()+" Tamaño"+totalBytes);
}
public File getDriveDest(){
File dest=new File(copia.get(0).getOrigen().getPath().split("\\")[0]);
return dest;
}
#Override
public Void doInBackground() throws Exception
{
for(CopyItem cop:copia){
File ori=cop.getOrigen();
File des=new File(cop.getDestino().getPath());
if(!des.exists()){
des.mkdirs();
}
if(!overrideall){
override =true;
}
File para=new File(cop.getDestino().getPath()+"\\"+ori.getName());
copyFiles(ori, para);
}
return null;
}
#Override
public void process(List<Integer> chunks)
{
for(int i : chunks)
{
progressCurrent.setValue(i);
}
}
#Override
public void done()
{
setProgress(100);
}
private long retrieveTotalBytes(ArrayList<CopyItem>fich)
{
long size=0;
for(CopyItem cop: fich)
{
size += cop.getOrigen().length();
}
return size;
}
private void copyFiles(File sourceFile, File targetFile) throws IOException
{
if(overrideall==false){
if(targetFile.exists() && !targetFile.isDirectory()){
String []options={"Si a Todos","Si","No a Ninguno","No"};
int seleccion=JOptionPane.showOptionDialog(null, "El fichero \n"+targetFile+" \n se encuentra en el equipo, \n¿Desea sobreescribirlo?", "Colisión de ficheros", JOptionPane.DEFAULT_OPTION, JOptionPane.WARNING_MESSAGE, null, options, null);
switch(seleccion){
case 0:
override=true;
overrideall=true;
break;
case 1:
override=true;
overrideall=false;
break;
case 2:
override =false;
overrideall=true;
break;
case 3:
override =false;
overrideall=false;
break;
}
}
}
if(override || !targetFile.exists()){
FileInputStream LeeOrigen= new FileInputStream(sourceFile);
OutputStream Salida = new FileOutputStream(targetFile);
byte[] buffer = new byte[1024];
int tamaño;
long fileBytes = sourceFile.length();
long totalBytesCopied = 0;
while ((tamaño = LeeOrigen.read(buffer)) > 0) {
Salida.write(buffer, 0, tamaño);
totalBytesCopied += tamaño;
copiedBytes+= tamaño;
setProgress((int)Math.round(((double)copiedBytes++ / (double)totalBytes) * 100));
int progress = (int)Math.round(((double)totalBytesCopied / (double)fileBytes) * 100);
publish(progress);
}
Salida.close();
LeeOrigen.close();
publish(100);
}
}
}
Here is CopyItem class
import java.io.File;
public class CopyItem {
File origen;
File destino;
String root;
public CopyItem(File origen, File destino) {
this.origen = origen;
this.destino = destino;
}
public CopyItem(File origen, File destino, String root) {
this.origen = origen;
this.destino = destino;
this.root = root;
}
public String getRoot() {
return root;
}
public void setRoot(String root) {
this.root = root;
}
public File getOrigen() {
return origen;
}
public void setOrigen(File origen) {
this.origen = origen;
}
public File getDestino() {
return destino;
}
public void setDestino(File destino) {
this.destino = destino;
}
#Override
public String toString() {
return super.toString(); //To change body of generated methods, choose Tools | Templates.
}
}
yes you can add the files directly to source List(the list contains files to be copied ) but you need to synchronize your code because adding more file will be in different thread(UI Thread),another way is to implement (produce/consumer ) using BlockingQueue
Consumer class run in separate Thread or Swingworker coping files is in progress.
Producer class runs UI Thread (selecting more files).
both should have access to BlockingQueue (contains files to be copied)(of course BlockingQueue implementations are thread-safe based on the documentation. ,it has the advantage to block the execution and wait for the files to be added this is very useful if you dont know when the files are added )
I prefer using Thread Pool to manage the threads executions(Optional).
As of now, i am using AudioPlayer to create sound effects for my game, and I am struggling to find an easy way to reset the audio before I begin playing it again. As of now I am just reloading the AudioStream entirely, heres my code, any suggestions are welcome.
package resources;
import java.io.File;
import java.io.FileInputStream;
import java.util.HashMap;
import javax.sound.sampled.Clip;
import sun.audio.AudioPlayer;
import sun.audio.AudioStream;
public class Audio {
private static HashMap<String, AudioStream> sounds = new HashMap<String, AudioStream>();
private static HashMap<String, Long> times = new HashMap<String, Long>();
Clip c;
public static AudioStream getAudio(String s){
if(sounds.containsKey(s))
return sounds.get(s);
return null;
}
public static void loadAllAudio(){
File f = new File("res/sounds");
String[] files = f.list();
for(String s:files){
if(s.endsWith(".wav")){
loadAudio(s);
System.out.println("Loaded Audio: "+s);
}
}
}
public static double getSoundLength(String sound){
return getAudio(sound).getLength()/192000.0;
}
public static void playSound(String name){
resetSound(name);
times.put(name, System.nanoTime());
AudioPlayer.player.start(sounds.get(name));
}
public static void resetSound(String name){
loadAudio(name);
}
public static void stopSound(String sound){
AudioPlayer.player.stop(sounds.get(sound));
times.put(sound, System.nanoTime());
}
private static void loadAudio(String name){
try {
File f = new File("res/sounds/"+name);
sounds.put(name, new AudioStream(new FileInputStream(f)));
times.put(name, 0L);
} catch (Exception e) {}
}
public static double getTime(String sound) {
long time = times.get(sound);
if(time==0)
return 0;
return (System.nanoTime()-time)/1E9;
}
}
The line I would like to replace is:
public static void playSound(String name){
---------resetSound(name);--------- THIS LINE
times.put(name, System.nanoTime());
AudioPlayer.player.start(sounds.get(name));
}
You can use JLayer which supports .mp3
An example how to play sound with JLayer:
new Thread(()->{
try {
FileInputStream file = new FileInputStream("path ..../audio.mp3"); //initialize the FileInputStream
Player player= new Player(file); //initialize the player
player.play(); //start the player
} catch (Exception e) {
e.printStackTrace();
}
}).start();
Note:
Note that i am using a separate Thread cause if not the App will stack.
I've got the following code for a file upload with Apache's HTTP-Client (org.apache.http.client):
public static void main(String[] args) throws Exception
{
String fileName = "test.avi";
File file = new File(fileName);
String serverResponse = null;
HttpParams params = new BasicHttpParams();
params.setParameter(HttpProtocolParams.USE_EXPECT_CONTINUE, true);
HttpProtocolParams.setVersion(params, HttpVersion.HTTP_1_1);
HttpClient client = new DefaultHttpClient(params);
HttpPut put = new HttpPut("http://localhost:8080/" + fileName);
FileEntity fileEntity = new FileEntity(file, "binary/octet-stream");
put.setEntity(fileEntity);
HttpResponse response = client.execute(put);
HttpEntity entity = response.getEntity();
if (entity != null)
{
serverResponse = EntityUtils.toString(entity);
System.out.println(serverResponse);
}
}
It work's quite well but now I want to have a progress bar which shows the progress of the file upload. How can this be made? I found a code snippet at File Upload with Java (with progress bar) but it is designed for Apache HTTP Client 3 (org.apache.commons.httpclient) and the RequestEntity class does not exist in Apache HTTP Client 4. ;(
Maybe someone of you has an approach?
Many greetings
Benny
I introduced a derived FileEntity that just counts the written bytes.
It uses OutputStreamProgress that does the actual counting (kind of a decorator to the actual OutputStream).
The advantage of this (and decoration in general) is that I do not need to copy the actual implementation, like the the actual copying from the file stream to the output stream. I can also change to use a different (newer) implementation, like the NFileEntity.
Enjoy...
FileEntity.java
public class FileEntity extends org.apache.http.entity.FileEntity {
private OutputStreamProgress outstream;
public FileEntity(File file, String contentType) {
super(file, contentType);
}
#Override
public void writeTo(OutputStream outstream) throws IOException {
this.outstream = new OutputStreamProgress(outstream);
super.writeTo(this.outstream);
}
/**
* Progress: 0-100
*/
public int getProgress() {
if (outstream == null) {
return 0;
}
long contentLength = getContentLength();
if (contentLength <= 0) { // Prevent division by zero and negative values
return 0;
}
long writtenLength = outstream.getWrittenLength();
return (int) (100*writtenLength/contentLength);
}
}
OutputStreamProgress.java
public class OutputStreamProgress extends OutputStream {
private final OutputStream outstream;
private volatile long bytesWritten=0;
public OutputStreamProgress(OutputStream outstream) {
this.outstream = outstream;
}
#Override
public void write(int b) throws IOException {
outstream.write(b);
bytesWritten++;
}
#Override
public void write(byte[] b) throws IOException {
outstream.write(b);
bytesWritten += b.length;
}
#Override
public void write(byte[] b, int off, int len) throws IOException {
outstream.write(b, off, len);
bytesWritten += len;
}
#Override
public void flush() throws IOException {
outstream.flush();
}
#Override
public void close() throws IOException {
outstream.close();
}
public long getWrittenLength() {
return bytesWritten;
}
}
A new version using the package org.apache.commons.io.output from commons-io (2.4) and its class CountingOutputStream.
I changed the initial code to reflect my project needs to use a multipart form as input and the post method (this dues to the requirements imposed by the server side).
Consider that the delta of large file correspond in my tests to 4096 bytes. This means that the listener method counterChanged() is called every 4096 bytes of transfered data, what is acceptable for my use case.
The method looks like:
public void post(String url, File sendFile) {
HttpParams params = new BasicHttpParams();
params.setParameter(HttpProtocolParams.USE_EXPECT_CONTINUE, true);
HttpProtocolParams.setVersion(params, HttpVersion.HTTP_1_1);
HttpClient client = new DefaultHttpClient(params);
HttpPost post = new HttpPost(url + "/" + sendFile.getName());
MultipartEntity multiEntity = new MultipartEntity();
MyFileBody fileBody = new MyFileBody(sendFile);
fileBody.setListener(new IStreamListener(){
#Override
public void counterChanged(int delta) {
// do something
System.out.println(delta);
}});
multiEntity.addPart("file", fileBody);
StringBody stringBody = new StringBody(sendFile.getName());
multiEntity.addPart("fileName", stringBody);
post.setEntity(multiEntity);
HttpResponse response = client.execute(post);
}
The class MyFileBody becomes:
public class MyFileBody extends FileBody {
private IStreamListener listener;
public MyFileBody(File file) {
super(file);
}
#Override
public void writeTo(OutputStream out) throws IOException {
CountingOutputStream output = new CountingOutputStream(out) {
#Override
protected void beforeWrite(int n) {
if (listener != null && n != 0)
listener.counterChanged(n);
super.beforeWrite(n);
}
};
super.writeTo(output);
}
public void setListener(IStreamListener listener) {
this.listener = listener;
}
public IStreamListener getListener() {
return listener;
}
}
Finally, the listener interface looks like:
public interface IStreamListener {
void counterChanged(int delta);
}
This answer extends kilaka's answer by adding a simple listener to the OutputStreamProgress.java class instead of having the public getProgress() method (I'm honestly not sure how you are suppose to call the getProgress() method since the thread will be executing inside of httpclient's code the entire time you might want to call getProgress()!).
Please note you'll need to extend the entity class for each entity type you want to use, and when you write your HttpClient code, you'll need to create the entity of that new type.
I wrote a very basic write listener that implements the WriteListener interface. This is where you'll add your logic to do something with the write reports from the OutputStreamProgress, something like updating a progress bar :)
Big thanks to kilaka for using the decorator idea to sneak in a counting outstream.
WriteLisener.java
public interface WriteListener {
void registerWrite(long amountOfBytesWritten);
}
OutputStreamProgress.java
import java.io.IOException;
import java.io.OutputStream;
public class OutputStreamProgress extends OutputStream {
private final OutputStream outstream;
private long bytesWritten=0;
private final WriteListener writeListener;
public OutputStreamProgress(OutputStream outstream, WriteListener writeListener) {
this.outstream = outstream;
this.writeListener = writeListener;
}
#Override
public void write(int b) throws IOException {
outstream.write(b);
bytesWritten++;
writeListener.registerWrite(bytesWritten);
}
#Override
public void write(byte[] b) throws IOException {
outstream.write(b);
bytesWritten += b.length;
writeListener.registerWrite(bytesWritten);
}
#Override
public void write(byte[] b, int off, int len) throws IOException {
outstream.write(b, off, len);
bytesWritten += len;
writeListener.registerWrite(bytesWritten);
}
#Override
public void flush() throws IOException {
outstream.flush();
}
#Override
public void close() throws IOException {
outstream.close();
}
}
BasicWriteListener
public class BasicWriteListener implements WriteListener {
public BasicWriteListener() {
// TODO Auto-generated constructor stub
}
public void registerWrite(long amountOfBytesWritten) {
System.out.println(amountOfBytesWritten);
}
}
MultipartEntityWithProgressBar
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.Charset;
import org.apache.http.entity.mime.HttpMultipartMode;
import org.apache.http.entity.mime.MultipartEntity;
public class MultipartEntityWithProgressBar extends MultipartEntity {
private OutputStreamProgress outstream;
private WriteListener writeListener;
#Override
public void writeTo(OutputStream outstream) throws IOException {
this.outstream = new OutputStreamProgress(outstream, writeListener);
super.writeTo(this.outstream);
}
public MultipartEntityWithProgressBar(WriteListener writeListener)
{
super();
this.writeListener = writeListener;
}
public MultipartEntityWithProgressBar(HttpMultipartMode mode, WriteListener writeListener)
{
super(mode);
this.writeListener = writeListener;
}
public MultipartEntityWithProgressBar(HttpMultipartMode mode, String boundary, Charset charset, WriteListener writeListener)
{
super(mode, boundary, charset);
this.writeListener = writeListener;
}
// Left in for clarity to show where I took from kilaka's answer
// /**
// * Progress: 0-100
// */
// public int getProgress() {
// if (outstream == null) {
// return 0;
// }
// long contentLength = getContentLength();
// if (contentLength <= 0) { // Prevent division by zero and negative values
// return 0;
// }
// long writtenLength = outstream.getWrittenLength();
// return (int) (100*writtenLength/contentLength);
// }
}
Hello guys!
I solved the problem myself and made a simple example to it.
If there are any questions, feel free to ask.
Here we go!
ApplicationView.java
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.File;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.swing.JButton;
import javax.swing.JFrame;
import javax.swing.JPanel;
import javax.swing.JProgressBar;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.HttpVersion;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpParams;
import org.apache.http.params.HttpProtocolParams;
import org.apache.http.util.EntityUtils;
public class ApplicationView implements ActionListener
{
File file = new File("C:/Temp/my-upload.avi");
JProgressBar progressBar = null;
public ApplicationView()
{
super();
}
public void createView()
{
JFrame frame = new JFrame("File Upload with progress bar - Example");
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
frame.setBounds(0, 0, 300, 200);
frame.setVisible(true);
progressBar = new JProgressBar(0, 100);
progressBar.setBounds(20, 20, 200, 30);
progressBar.setStringPainted(true);
progressBar.setVisible(true);
JButton button = new JButton("upload");
button.setBounds(progressBar.getX(),
progressBar.getY() + progressBar.getHeight() + 20,
100,
40);
button.addActionListener(this);
JPanel panel = (JPanel) frame.getContentPane();
panel.setLayout(null);
panel.add(progressBar);
panel.add(button);
panel.setVisible(true);
}
public void actionPerformed(ActionEvent e)
{
try
{
sendFile(this.file, this.progressBar);
}
catch (Exception ex)
{
System.out.println(ex.getLocalizedMessage());
}
}
private void sendFile(File file, JProgressBar progressBar) throws Exception
{
String serverResponse = null;
HttpParams params = new BasicHttpParams();
params.setParameter(HttpProtocolParams.USE_EXPECT_CONTINUE, true);
HttpProtocolParams.setVersion(params, HttpVersion.HTTP_1_1);
HttpClient client = new DefaultHttpClient(params);
HttpPut put = new HttpPut("http://localhost:8080/" + file.getName());
ProgressBarListener listener = new ProgressBarListener(progressBar);
FileEntityWithProgressBar fileEntity = new FileEntityWithProgressBar(file, "binary/octet-stream", listener);
put.setEntity(fileEntity);
HttpResponse response = client.execute(put);
HttpEntity entity = response.getEntity();
if (entity != null)
{
serverResponse = EntityUtils.toString(entity);
System.out.println(serverResponse);
}
}
}
FileEntityWithProgressBar.java
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.http.entity.AbstractHttpEntity;
/**
* File entity which supports a progress bar.<br/>
* Based on "org.apache.http.entity.FileEntity".
* #author Benny Neugebauer (www.bennyn.de)
*/
public class FileEntityWithProgressBar extends AbstractHttpEntity implements Cloneable
{
protected final File file;
private final ProgressBarListener listener;
private long transferredBytes;
public FileEntityWithProgressBar(final File file, final String contentType, ProgressBarListener listener)
{
super();
if (file == null)
{
throw new IllegalArgumentException("File may not be null");
}
this.file = file;
this.listener = listener;
this.transferredBytes = 0;
setContentType(contentType);
}
public boolean isRepeatable()
{
return true;
}
public long getContentLength()
{
return this.file.length();
}
public InputStream getContent() throws IOException
{
return new FileInputStream(this.file);
}
public void writeTo(final OutputStream outstream) throws IOException
{
if (outstream == null)
{
throw new IllegalArgumentException("Output stream may not be null");
}
InputStream instream = new FileInputStream(this.file);
try
{
byte[] tmp = new byte[4096];
int l;
while ((l = instream.read(tmp)) != -1)
{
outstream.write(tmp, 0, l);
this.transferredBytes += l;
this.listener.updateTransferred(this.transferredBytes);
}
outstream.flush();
}
finally
{
instream.close();
}
}
public boolean isStreaming()
{
return false;
}
#Override
public Object clone() throws CloneNotSupportedException
{
return super.clone();
}
}
ProgressBarListener.java
import javax.swing.JProgressBar;
public class ProgressBarListener
{
private int transferedMegaBytes = 0;
private JProgressBar progressBar = null;
public ProgressBarListener()
{
super();
}
public ProgressBarListener(JProgressBar progressBar)
{
this();
this.progressBar = progressBar;
}
public void updateTransferred(long transferedBytes)
{
transferedMegaBytes = (int) (transferedBytes / 1048576);
this.progressBar.setValue(transferedMegaBytes);
this.progressBar.paint(progressBar.getGraphics());
System.out.println("Transferred: " + transferedMegaBytes + " Megabytes.");
}
}
Happy Coding!