serialization external library - java

I am using Redis in one of our project.While I realized that redis needs objects to be serialized to be persisted, I want to understand how to deal with some classes which refers to external library classes(StandardServletEnvironment) in my case ,which doesn't implement serializable and we can't modify it as well ? I am getting notSerializableException in these cases.

If you want to store user defined Java objects in redis, serialization is the suitable option. However when you go with Java native serialization it comes with some drawbacks like you faced and also it is too slow. I also faced the same kind of problem, after a long search I came up with solution to use kryo serialization.Kryo doesn't needs serialization implementation and it is faster enough than java native serialization.
P.S:If you don't want to use kryo and use the java inbuilt serialization, then create serializable class and pass your object to this class and do your stuff.
I hope this will help you.

As Praga stated, kyro is a good solution for (de)serialization of objects that do not implement Serializable interface. Here is a sample code for serialization with kyro, hope it helps:
Kryo kryo = new Kryo();
private byte[] encode(Object obj) {
ByteArrayOutputStream objStream = new ByteArrayOutputStream();
Output objOutput = new Output(objStream);
kryo.writeClassAndObject(objOutput, obj);
objOutput.close();
return objStream.toByteArray();
}
private <T> T decode(byte[] bytes) {
return (T) kryo.readClassAndObject(new Input(bytes));
}
Maven dependency:
<dependency>
<groupId>com.esotericsoftware</groupId>
<artifactId>kryo</artifactId>
<version>4.0.1</version>
</dependency>
Full Implementation for redis integration:
RedisInterface :
public class RedisInterface {
static final Logger logger = LoggerFactory.getLogger(RedisInterface.class);
private static RedisInterface instance =null;
public static RedisInterface getInstance ()
{
if(instance ==null)
createInstance();
return instance ;
}
private static synchronized void createInstance()
{
if(instance ==null)//in case of multi thread instances
instance =new RedisInterface();
}
JedisConfig jedis = new JedisConfig();
public boolean setAttribute(String key, Object value)
{
return this.setAttribute(key, key, value);
}
public boolean setAttribute(String key, Object value, int expireSeconds)
{
return this.setAttribute(key, key, value, expireSeconds);
}
public boolean setAttribute(String key, String field, Object value)
{
int expireSeconds = 20 *60; //20 minutes
return this.setAttribute(key, field, value, expireSeconds);
}
public boolean setAttribute(String key, String field, Object value, int expireSeconds)
{
try
{
if(key==null || "".equals(key) || field==null || "".equals(field))
return false;
byte[]keyBytes = key.getBytes();
byte[]fieldBytes = field.getBytes();
byte []valueBytes = encode(value);
long start = new Date().getTime();
jedis.set(keyBytes, fieldBytes, valueBytes, expireSeconds);
long end = new Date().getTime();
long waitTime =end-start;
logger.info("{} key saved to redis in {} milliseconds with timeout: {} seconds", new Object[] {key, waitTime, expireSeconds} );
return true;
}
catch(Exception e)
{
logger.error( "error on saving object to redis. key: " + key, e);
return false;
}
}
public <T> T getAttribute(String key)
{
return this.getAttribute(key, key);
}
public <T> T getAttribute(String key, String field)
{
try
{
if(key==null || "".equals(key) || field==null || "".equals(field)) return null;
byte[]keyBytes = key.getBytes();
byte[]fieldBytes = field.getBytes();
long start = new Date().getTime();
byte[] valueBytes = jedis.get(keyBytes, fieldBytes);
T o =null;
if(valueBytes!=null && valueBytes.length>0)
o = decode(valueBytes);
long end = new Date().getTime();
long waitTime =end-start;
logger.info("{} key read operation from redis in {} milliseconds. key found?: {}", new Object[] {key, waitTime, (o!=null)});
return o;
}
catch (Exception e)
{
logger.error( "error on getting object from redis. key: "+ key, e);
return null;
}
}
Kryo kryo = new Kryo();
private byte[] encode(Object obj) {
ByteArrayOutputStream objStream = new ByteArrayOutputStream();
Output objOutput = new Output(objStream);
kryo.writeClassAndObject(objOutput, obj);
objOutput.close();
return objStream.toByteArray();
}
private <T> T decode(byte[] bytes) {
return (T) kryo.readClassAndObject(new Input(bytes));
}
}
JedisConfig :
public class JedisConfig implements Closeable
{
private Pool<Jedis> jedisPool = null;
private synchronized void initializePool()
{
if(jedisPool!=null) return;
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(Integer.parseInt(Config.REDIS_MAX_ACTIVE_CONN)); // maximum active connections
poolConfig.setMaxIdle(Integer.parseInt(Config.REDIS_MAX_IDLE_CONN)); // maximum idle connections
poolConfig.setMaxWaitMillis(Long.parseLong(Config.REDIS_MAX_WAIT_MILLIS)); // max wait time for new connection (before throwing an exception)
if("true".equals(Config.REDIS_SENTINEL_ACTIVE))
{
String [] sentinelsArray = Config.REDIS_SENTINEL_HOST_LIST.split(",");
Set<String> sentinels = new HashSet();
for(String sentinel : sentinelsArray)
{
sentinels.add(sentinel);
}
String masterName = Config.REDIS_SENTINEL_MASTER_NAME;
jedisPool = new JedisSentinelPool(masterName, sentinels, poolConfig, Integer.parseInt(Config.REDIS_CONN_TIMEOUT));
}
else
{
jedisPool = new JedisPool(poolConfig,
Config.REDIS_IP,
Integer.parseInt(Config.REDIS_PORT),
Integer.parseInt(Config.REDIS_CONN_TIMEOUT));
}
}
protected Jedis getJedis()
{
if(jedisPool==null)
initializePool();
Jedis jedis = jedisPool.getResource();
return jedis;
}
public Long set(final byte[] key, final byte[] field, final byte[] value, int expireSeconds)
{
Jedis redis = null;
Long ret =0L;
try
{
redis = getJedis();
ret = redis.hset(key, field, value);
redis.expire(key, expireSeconds);
}
finally
{
if(redis!=null)
redis.close();
}
return ret;
}
public byte[] get(final byte[] key, final byte[] field) {
Jedis redis = null ;
byte[] valueBytes = null;
try
{
redis = getJedis();
valueBytes = redis.hget(key, field);
}
finally
{
if(redis!=null)
redis.close();
}
return valueBytes;
}
#Override
public void close() throws IOException {
if(jedisPool!=null)
jedisPool.close();
}
}

Related

Kafka Connect JsonConverter with validation

I'm trying to create kafka connect value converter which wraps invalid json records with a valid json object.
I'm reading the values from kinesis (using KinesisSourceConnector) so the input is in base64 encoding.
My implementation tries to process the input through ByteArrayConverter which decodes the data amd delegate the output to JsonConverter as follows (decode is initialized in the configure method to true):
private final Converter delegate = new JsonConverter();
private final Converter decoder = new ByteArrayConverter();
private boolean decode = false;
#Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
try {
String decoded = new String(decoder.fromConnectData(topic, schema, value));
LOG.info("decoded string\n" + decoded);
if(decode) {
byte[] bytes = decoder.fromConnectData(topic, schema, value);
return delegate.fromConnectData(topic, schema, bytes);
}
return delegate.fromConnectData(topic, schema, value);
} catch (Exception e) {
LOG.error("something went wrong", e);
return delegate.fromConnectData(topic, schema, wrapInvalidJson(new String(decoder.fromConnectData(topic, schema, value))));
}
}
When i am printing the decoded string it looks ok (decoded json string)
But when i consume the output topic it looks like base64 again and I'm not sure what i am missing
Not sure it is optimal but went for this approache
private final Converter delegate = new JsonConverter();
private final Converter decoder = new ByteArrayConverter();
private final Converter stringConverter = new StringConverter();
private final ObjectMapper mapper = new ObjectMapper();
private boolean decode = false;
#Override
public void configure(Map<String, ?> configs, boolean isKey) {
delegate.configure(Collections.singletonMap("schemas.enable", false), false);
if (configs.containsKey("ni.decode.data") && Boolean.valueOf((String) configs.get("ni.decode.data"))) {
decode = true;
}
}
#Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (decode) {
String decoded = new String(decoder.fromConnectData(topic, schema, value));
try {
return mapper.readTree(decoded).toString().getBytes();
} catch (Exception e) {
return wrapInvalidJson(decoded).getBytes();
}
} else {
try {
return delegate.fromConnectData(topic, schema, value);
} catch (Exception e) {
byte[] msg = stringConverter.fromConnectData(topic, schema, value);
return wrapInvalidJson(new String(msg)).getBytes();
}
}
}

Create objects in GenericObjectPool

I'm doing research on GenericObjectPool by putting Cipher in pool so it can be reused.
GenericObjectPool<Cipher> pool;
CipherFactory factory = new CipherFactory();
this.pool = new GenericObjectPool<Cipher>(factory);
pool.setMaxTotal(10);
pool.setBlockWhenExhausted(true);
pool.setMaxWaitMillis(30 * 1000);
CipherFactory
public class CipherFactory extends BasePooledObjectFactory<Cipher> {
private boolean running = false;
#Override
public Cipher create() throws Exception {
return Cipher.getInstance("DESede/CBC/NoPadding");
}
#Override
public PooledObject<Cipher> wrap(Cipher arg0) {
return new DefaultPooledObject<Cipher>(arg0);
}
#Override
public boolean validateObject(PooledObject<Cipher> p) {
//Ensures that the instance is safe to be returned by the pool
return true;
}
#Override
public void destroyObject(PooledObject<Cipher> p) {
//Destroys an instance no longer needed by the pool.
System.out.println("destroying");
}
#Override
public void activateObject(PooledObject<Cipher> p) throws Exception { //Reinitialize an instance to be returned by the pool
setRunning(true);
}
#Override
public void passivateObject(PooledObject<Cipher> p) throws Exception { // reset the object after the object returns to the pool
setRunning(false);
}
public void setRunning(boolean running) {
this.running = running;
}
//
}
This is how I implement ObjectPool in my Example class
public Key a(byte[] afyte) throws Exception {
Cipher cipher = null;
cipher = pool.borrowObject(); //get the object from the pool
try {
System.out.println("****************** After borrow ****************");
printPool();
cipher.init(Cipher.DECRYPT_MODE, mkkey, algParamSpec);
byte[] de = cipher.doFinal(afyte);
SecretKey mk = new SecretKeySpec(de, "DESede");
return mk;
} catch (Exception e) {
pool.invalidateObject(cipher);
cipher = null;
} finally {
if (null != cipher) {
pool.returnObject(cipher);
System.out.println("****************** After return ****************");
printPool();
}
}
return (Key) cipher;
}
printPool
public void printPool() {
System.out.println("Pool for cipher with instances DESede/CBC/NoPadding");
System.out.println("Active [" + pool.getNumActive() + "]"); //Return the number of instances currently borrowed from this pool
System.out.println("Idle [" + pool.getNumIdle() + "]"); //The number of instances currently idle in this pool
System.out.println("Total Created [" + pool.getCreatedCount() + "]");
}
Am I on the right path ? Is it possible to increase pool size ?
Edit
The answer from #http works fine to me. But if I have another method encryptECB(Key key, byte[] b), how should I write ?
Any help would be appreciated !
You are on the right track. When constructing the GenericObjectPool, you can use the constructor that accepts a GenericObjectPoolConfig object which contains all the configuration values for your object pool. The example below would let your pool grow to 20 connections before it was exhausted...
GenericObjectPoolConfig config = new GenericObjectPoolConfig();
config.setMinIdle(2);
config.setMaxIdle(5);
config.setMaxTotal(20);
GenericObjectPool<Cipher> pool;
CipherFactory factory = new CipherFactory();
this.pool = new GenericObjectPool<Cipher>(factory, config);
GenericeObjectPoolConfig also has a setBlockWhenExhausted method to specify the behaviour when the pool has reached the maxTotal connections. See https://commons.apache.org/proper/commons-pool/apidocs/org/apache/commons/pool2/impl/BaseObjectPoolConfig.html#setBlockWhenExhausted-boolean- for details.
A pattern I implement when using commons pool is to create 2 interfaces, one for your pooled object and one for your factory...
public interface PooledCipher extends java.io.Closeable {
byte[] doFinal(byte[] bytes) throws Exception;
SecretKeySpec getSecretKeySpec(byte[] bytes) throws Exception;
}
public interface CipherFactory {
PooledCipher getCipher() throws Exception;
void close();
}
CipherFactory implementation...
public class CipherFactoryImpl extends BasePooledObjectFactory<PooledCipher>
implements CipherFactory {
private final GenericObjectPoolConfig config;
private final GenericObjectPool<PooledCipher> pool;
private final String transformation;
private final int opmode;
private final Key key;
private final AlgorithmParameters params;
private final String secretKeySpecAlgorithm;
public CipherFactoryImpl(GenericObjectPoolConfig config, String transformation, int opmode, Key key, AlgorithmParameters params, String secretKeySpecAlgorithm) {
this.config = config;
this.pool = new GenericObjectPool<PooledCipher>(this, config);
this.transformation = transformation;
this.opmode = opmode;
this.key = key;
this.params = params;
this.secretKeySpecAlgorithm = secretKeySpecAlgorithm
}
#Override
public PooledCipher create() throws Exception {
return new PooledCipherImpl(pool, transformation, opmode, key, params, secretKeySpecAlgorithm);
}
#Override
public PooledCipher getCipher() throws Exception {
return pool.borrowObject();
}
#Override
public void destroyObject(PooledObject<PooledCipher> p) throws Exception {
try {
PooledCipherImpl cipherImpl = (PooledCipherImpl)p.getObject();
// do whatever you need with cipherImpl to destroy it
} finally {
super.destroyObject(p);
}
}
#Override
public void close() {
pool.close();
}
#Override
public PooledObject<PooledCipher> wrap(PooledCipher cipher) {
return new DefaultPooledObject<PooledCipher>(cipher);
}
}
PooledCipher implementation...
public class PooledCipherImpl implements PooledCipher {
private final ObjectPool<PooledCipher> pool;
private final Cipher cipher;
private final String secretKeySpecAlgorithm;
private boolean destroyOnClose = false;
public PooledCipherImpl(ObjectPool<PooledCipher> pool, String transformation, int opmode, Key key, AlgorithmParameters params, String secretKeySpecAlgorithm) {
this.pool = pool;
this.cipher = Cipher.getInstance(transformation);
this.cipher.init(opmode, key, params);
this.secretKeySpecAlgorithm = secretKeySpecAlgorithm;
}
#Override
public byte[] doFinal(byte[] bytes) throws Exception {
try {
return cipher.doFinal(bytes);
} catch (Exception e) {
destroyOnClose = true;
throw e;
}
}
#Override
public SecretKeySpec getSecretKeySpec(byte[] bytes) {
return new SecretKeySpec(doFinal(bytes), secretKeySpecAlgorithm);
}
#Override
public void close() throws IOException {
try {
if (destroyOnClose) {
pool.destroyObject(this);
} else {
pool.returnObject(this);
}
} catch (Exception e) {
throw new IOException(e);
}
}
}
Then you construct your CipherFactory like this...
String transformation = "DESede/CBC/NoPadding";
String secretKeySpecAlgorithm = "DESede";
GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig();
// set up the poolConfig here
poolConfig.setMaxTotal(20);
CipherFactory cipherFactory = new CipherFactoryImpl(poolConfig, transformation, Cipher.DECRYPT_MODE, mkkey, algParamSpec, secretKeySpecAlgorithm);
And use it like this...
public Key unwrapKey(byte[] tmkByte) throws Exception {
try (PooledCipher cipher = cipherFactory.getCipher()) {
return cipher.getSecretKeySpec(tmkByte);
}
}
Also you can reuse the PooledCipher and CipherFactory interfaces to create other implementations, such as JCA.

Is the following code thread safe?

I think I have implemented the Double-checked locking pattern but not sure if it safe or it works as intended. Any other logic to implement the same would be really helpful.
public class OnProperties {
private static String dfltPropertyFile = "on.properties";
private static long refreshSecs = 120L;
private static Properties props;
private static long lastReadTimestamp = 0;
public static String getProperty(String propertyName, String dfltValue) {
long currentTimestamp = System.currentTimeMillis() / 1000L;
if (props == null
|| (refreshSecs > 0 && (currentTimestamp - lastReadTimestamp) > refreshSecs)) {
synchronized (props) {
if (props == null
|| (refreshSecs > 0 && (currentTimestamp - lastReadTimestamp) > refreshSecs)) {
lastReadTimestamp = currentTimestamp;
try {
loadProperties(dfltPropertyFile);
refreshSecs = getProperty("on.properties.refresh", 120L);
if (refreshSecs < 0L) {
refreshSecs = 0L;
}
} catch (Exception e) {
refreshSecs = 600L;
}
}
}
}
if (props == null) {
return dfltValue;
}
String propertyValue = props.getProperty(propertyName, dfltValue);
return propertyValue;
}
public static boolean getProperty(String propertyName, boolean dfltValue) {
boolean value = dfltValue;
String strValue = getProperty(propertyName, (String) null);
if (strValue != null) {
try {
value = Boolean.parseBoolean(strValue);
} catch (NumberFormatException e) {
// just keep the default
}
}
return value;
}
private static void loadProperties(String p_propertiesFile)
throws java.io.IOException, java.io.FileNotFoundException {
InputStream fileStream = new FileInputStream(p_propertiesFile);
props = new Properties();
props.load(fileStream);
fileStream.close();
}
}
Generally multiple threads running often access the "getProperty" method as follows:
extDebug = OnProperties.getProperty("on.extdebug", false);
Atomic values guarantee to always return the complete latest value to all threads. This prevents a number of multi-threading issues in this case. A bit of synchronization is still required, but it can be limited to a minimum. See my implementation below:
import java.io.File;
import java.io.FileInputStream;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
public class OnProperties {
private static int refreshIntervalDefaultSecs;
private static int refreshIntervalOnErrorSecs;
static {
setRefreshInterval(120);
}
private static final AtomicReference<Properties> propsRef = new AtomicReference<Properties>(new Properties());
private static final AtomicLong nextPropsLoad = new AtomicLong(0L);
private static final Object loadLock = new Object();
private static String dfltPropertyFile = "on.properties";
public static String getProperty(String key, String defaultValue) {
String value = getProperty(key);
if (value == null) {
value = defaultValue;
}
return value;
}
private static String getProperty(String key) {
reloadWhenNeeded();
return propsRef.get().getProperty(key);
}
private static void reloadWhenNeeded() {
long now = System.currentTimeMillis();
if (now > nextPropsLoad.get()) {
boolean reload = false;
synchronized(loadLock) {
if (now > nextPropsLoad.get()) {
// need loadLock because there is time between previous get()
// and next set()
updateNextPropsLoad(now, refreshIntervalDefaultSecs);
reload = true;
}
}
if (reload) {
reloadProps(now);
}
}
}
private static void updateNextPropsLoad(long now, int nextRefreshSecs) {
nextPropsLoad.set(now + nextRefreshSecs * 1000);
}
private static void reloadProps(long now) {
Properties p = new Properties();
FileInputStream in = null;
System.out.println("Reloading from " + new File(dfltPropertyFile).getAbsolutePath());
try {
p.load(in = new FileInputStream(new File(dfltPropertyFile)));
propsRef.set(p);
setRefreshInterval(getProperty("on.properties.refresh", 120));
updateNextPropsLoad(now, refreshIntervalDefaultSecs);
} catch (Exception e) {
updateNextPropsLoad(now, refreshIntervalOnErrorSecs);
} finally {
try { if (in != null) in.close(); } catch (Exception e) {
updateNextPropsLoad(now, refreshIntervalOnErrorSecs);
}
}
}
private static void setRefreshInterval(int refreshSecs) {
if (refreshSecs < 1) {
refreshSecs = 120;
}
refreshIntervalDefaultSecs = refreshSecs;
refreshIntervalOnErrorSecs = 5 * refreshSecs;
}
public static boolean getProperty(String key, boolean defaultValue) {
boolean value = defaultValue;
String svalue = getProperty(key);
if (svalue != null) {
try {
value = Boolean.valueOf(svalue);
} catch (Exception ignored) {}
}
return value;
}
public static int getProperty(String key, int defaultValue) {
int value = defaultValue;
String svalue = getProperty(key);
if (svalue != null) {
try {
value = Integer.valueOf(svalue);
} catch (Exception ignored) {}
}
return value;
}
public static void main(String[] args) {
System.out.println("Refresh value from file: " + getProperty("on.properties.refresh", 120));
System.out.println("No reload " + getProperty("does.not.exist", true));
System.out.println("Next reload after " + ((nextPropsLoad.get() - System.currentTimeMillis()) / 1000) + " seconds.");
}
}
One drawback of the implementation is that one thread will get slowed down when it is selected to reload the properties from file. A better approach would be to create a 'watchdog' thread/scheduled task that checks every (for example) five seconds if the properties-file has a changed modification date and then trigger a reload (in which case the AtomicReference for the Properties still comes in handy).
Also keep in mind that there is a logical threading issue: if property values are interrelated (i.e. one value is only correct if another value is also updated), a reload could present a thread with old and new values that should not be mixed. The only way around that is to keep a reference to one set of properties in methods that use the interrelated values of the properties (and a class like this with static methods and variables is not handy in such a situation).
It is not safe as you have multiple variables which are read in a way which is not thread safe (i.e. access is not synchronized and they are not volatile).
It appears the workflow is mostly reads with a few writes. I would suggest using a ReentrantReadWriteLock to synchronize access.
To have this working correctly with double-checked locking you must do two things:
private static Properties props must be declared volatile;
as already mentioned, synchronised(props) won't work in case props are null - you need to declare a special lock object field:
.
private static final Object propsLockObject = new Object();
...
synchronized(propsLockObject) {
...
P.S. The lastReadTimestamp won't work also unless declared volatile. Though this is not about double-checked locking anymore.
To reload the properties, you don't need to re-initialize the props variable. Initialize the properties during the declaration statement itself will do. This will solve the problem of synchronizing with null.
Remove the initialization code in the loadProperties block.
remove the prop==null check outside and inside the synchronized block.
Once that is done, your code will work exactly the way you want.
public class OnProperties {
private static String dfltPropertyFile = "on.properties";
private static long refreshSecs = 120L;
private static Properties props = new Properties();
private static long lastReadTimestamp = 0;
public static String getProperty(String propertyName, String dfltValue) {
long currentTimestamp = System.currentTimeMillis() / 1000L;
if (refreshSecs > 0 && (currentTimestamp - lastReadTimestamp) > refreshSecs) {
synchronized (props) {
if (refreshSecs > 0 && (currentTimestamp - lastReadTimestamp) > refreshSecs) {
lastReadTimestamp = currentTimestamp;
try {
loadProperties(dfltPropertyFile);
refreshSecs = getProperty("on.properties.refresh", 120L);
if (refreshSecs < 0L) {
refreshSecs = 0L;
}
} catch (Exception e) {
refreshSecs = 600L;
}
}
}
}
String propertyValue = props.getProperty(propertyName, dfltValue);
return propertyValue;
}
public static boolean getProperty(String propertyName, boolean dfltValue) { boolean value = dfltValue;
String strValue = getProperty(propertyName, (String) null);
if (strValue != null) {
try {
value = Boolean.parseBoolean(strValue);
} catch (NumberFormatException e) {
// just keep the default
}
}
return value;
}
private static void loadProperties(String p_propertiesFile) throws java.io.IOException, java.io.FileNotFoundException { InputStream fileStream = new FileInputStream(p_propertiesFile); props.load(fileStream); fileStream.close(); } }
Please accept that the double-checked locking idiom is broken and does not work (i.e. does not synchronize properly). Even if you make it work using volatile (at the right place), it is far too complex for what you get.
So my suggestion: Simply synchronize everything. Then try and measure. If you find out that OnProperties is the bottleneck, consider more powerful/clever synchronization techniques and come back if necessary:
public class OnProperties {
/* some private fields here */
public static synchronized String getProperty(String propertyName, String dfltValue) {
reloadPropertiesIfNecessary();
return props.getProperty(propertyName, dfltValue);
}
/* other public methods using getProperty come here */
private static void reloadPropertiesIfNecessary() {
// check timestamp etc.
if (/* check timestamp etc. */) {
loadProperties(dfltPropertyFile);
// update timestamp etc.
}
}
private static void loadProperties(String filename) throws IOException {
try (InputStream stream = new FileInputStream(filename)) {
props = new Properties();
props.load(fileStream);
}
}
}

Dcm4Che - getting images from pacs

I've got following problem. I have to write small application that connects to pacs and gets images. I decided to use dcm4che toolkit. I've written following code:
public class Dcm4 {
/**
* #param args the command line arguments
*/
public static void main(String[] args) {
// TODO code application logic here
DcmQR dcmqr = new MyDcmQR("server");
dcmqr.setCalledAET("server", true);
dcmqr.setRemoteHost("213.165.94.158");
dcmqr.setRemotePort(104);
dcmqr.getKeys();
dcmqr.setDateTimeMatching(true);
dcmqr.setCFind(true);
dcmqr.setCGet(true);
dcmqr.setQueryLevel(MyDcmQR.QueryRetrieveLevel.IMAGE);
dcmqr.addMatchingKey(Tag.toTagPath("PatientID"),"2011");
dcmqr.addMatchingKey(Tag.toTagPath("StudyInstanceUID"),"1.2.276.0.7230010.3.1.2.669896852.2528.1325171276.917");
dcmqr.addMatchingKey(Tag.toTagPath("SeriesInstanceUID"),"1.2.276.0.7230010.3.1.3.669896852.2528.1325171276.916");
dcmqr.configureTransferCapability(true);
List<DicomObject> result=null;
byte[] imgTab=null;
BufferedImage bImage=null;
try {
dcmqr.start();
System.out.println("started");
dcmqr.open();
System.out.println("opened");
result = dcmqr.query();
System.out.println("queried");
dcmqr.get(result);
System.out.println("List Size = " + result.size());
for(DicomObject dco:result){
System.out.println(dco);
dcmTools.toByteArray(dco);
System.out.println("end parsing");
}
} catch (Exception e) {
System.out.println("error "+e);
}
try{
dcmqr.stop();
dcmqr.close();
}catch (Exception e) {
}
System.out.println("done");
}
}
Everything seems to be fine until I call dcmTools.toByteArray(dco).
Output till calliing toByteArray() looks like this:
List Size = 1
(0008,0052) CS #6 [IMAGE] Query/Retrieve Level
(0008,0054) AE #6 [server] Retrieve AE Title
(0020,000E) UI #54 [1.2.276.0.7230010.3.1.3.669896852.2528.1325171276.916] Series Instance UID
Source of ToByteArray:
public static byte[] toByteArray(DicomObject obj) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BufferedOutputStream bos = new BufferedOutputStream(baos);
DicomOutputStream dos = new DicomOutputStream(bos);
dos.writeDicomFile(obj);
dos.close();
byte[] data = baos.toByteArray();
return data;
}
After calling toByteArray I got output:
error java.lang.IllegalArgumentException: Missing (0002,0010) Transfer Syntax UID
I,ve found some informations in other forums and it seems like DcmQR.get() method doesn't send imgage data. Is it possible to force DcmQR to do it. I've written that problem is in or with DcmQR.createStorageService() method but I haven't found the solution. Please help me!!!
Hello cneller!
I've made some changes you suggested: I've add setMoveDest and setStoreDestination and DicomObject are stored in destination I've added - it looks great. Then I've tried to write response handler based on FutureDimseRSP which is used in Association.cget method:
public class MyDimseRSP extends DimseRSPHandler implements DimseRSP{
private MyEntry entry = new MyEntry(null, null);
private boolean finished;
private int autoCancel;
private IOException ex;
#Override
public synchronized void onDimseRSP(Association as, DicomObject cmd,
DicomObject data) {
super.onDimseRSP(as, cmd, data);
MyEntry last = entry;
while (last.next != null)
last = last.next;
last.next = new MyEntry(cmd, data);
if (CommandUtils.isPending(cmd)) {
if (autoCancel > 0 && --autoCancel == 0)
try {
super.cancel(as);
} catch (IOException e) {
ex = e;
}
} else {
finished = true;
}
notifyAll();
}
#Override
public synchronized void onClosed(Association as) {
if (!finished) {
// ex = as.getException();
ex = null;
if (ex == null) {
ex = new IOException("Association to " + as.getRemoteAET()
+ " closed before receive of outstanding DIMSE RSP");
}
notifyAll();
}
}
public final void setAutoCancel(int autoCancel) {
this.autoCancel = autoCancel;
}
#Override
public void cancel(Association a) throws IOException {
if (ex != null)
throw ex;
if (!finished)
super.cancel(a);
}
public DicomObject getDataset() {
return entry.command;
}
public DicomObject getCommand() {
return entry.dataset;
}
public MyEntry getEntry() {
return entry;
}
public synchronized boolean next() throws IOException, InterruptedException {
if (entry.next == null) {
if (finished)
return false;
while (entry.next == null && ex == null)
wait();
if (ex != null)
throw ex;
}
entry = entry.next;
return true;
}
}
Here is MyEntry code:
public class MyEntry {
final DicomObject command;
final DicomObject dataset;
MyEntry next;
public MyEntry(DicomObject command, DicomObject dataset) {
this.command = command;
this.dataset = dataset;
}
public DicomObject getCommand() {
return command;
}
public DicomObject getDataset() {
return dataset;
}
public MyEntry getNext() {
return next;
}
public void setNext(MyEntry next) {
this.next = next;
}
}
Then I've retyped get method from Dmcqr as follows:
public void getObject(DicomObject obj, DimseRSPHandler rspHandler)throws IOException, InterruptedException{
TransferCapability tc = selectTransferCapability(qrlevel.getGetClassUids());
MyDimseRSP myRsp=new MyDimseRSP();
if (tc == null)
throw new NoPresentationContextException(UIDDictionary
.getDictionary().prompt(qrlevel.getGetClassUids()[0])
+ " not supported by " + remoteAE.getAETitle());
String cuid = tc.getSopClass();
String tsuid = selectTransferSyntax(tc);
DicomObject key = obj.subSet(MOVE_KEYS);
assoc.cget(cuid, priority, key, tsuid, rspHandler);
assoc.waitForDimseRSP();
}
In second argument in this method I've used an instance of my response handler (MyDimseRSP). And I run my code I got null value of command and dataset of my response handler. In "next" variable only "command" is not null, and od course it's not DicomObject which I need. What I'm doing wrong!!!!
You're going to have to step through the code a bit (including the DCM4CHE toolkit code). I suspect you are using the default response handler, which just counts the number of completed operations, and doesn't actually store the image data from the get command.
Clearly, your for loop, below, is looping over the results of the find operation, not the get (which needs to be handled in the response handler).
for(DicomObject dco:result)
I expect you will have to override the response handler to write your DICOM files appropriately. See also the DcmRcv class for writing DICOM files from the DicomObject you'll receive.
:
From your edits above, I assume you are just trying to get the raw DICOM instance data (not the command that stored it). What about a response handler roughly like:
List<DicomObject> dataList = new ArrayList<DicomObject>();
#Override
public void onDimseRSP(Association as, DicomObject cmd, DicomObject data) {
if( shouldAdd(as, cmd) ) {
dataList.add( data )
}
}
Watch out for large lists, but it should get you the data in memory.

How to make sure a file is loaded from disk only once by multithreads in Java

There is a file which could be access by multithreads in random order, how to make sure the file is loaded from disk only once to reduce file io costs in Java?
Thanks.
I would do it this way
private final Map<File, byte[]> cache = new HashMap<File, byte[]>();
public synchronized byte[] readFile(File file) throws IOException {
byte[] content = cache.get(file);
if (content == null) {
content = Files.readAllBytes(file.toPath());
cache.put(file, content);
}
return content;
}
UPDATE
this version is supposed to allow asynchroneous reading
private final Map<File, byte[]> cache = new HashMap<File, byte[]>();
private final Map<File, Object> locks = new HashMap<File, Object>();
public byte[] readFile(File file) throws IOException {
Object lock = getLock(file);
synchronized (lock) {
byte[] content = cache.get(file);
if (content == null) {
content = Files.readAllBytes(file.toPath());
cache.put(file, content);
}
return content;
}
}
private synchronized Object getLock(File file) {
Object lock = locks.get(file);
if (lock == null) {
lock = new Object();
locks.put(file, lock);
}
return lock;
}
well you could keep track of the files your are currently accessing...
public static final long FILE_LOCKED = -1;
private static Set<File> lockedFiles = new HashSet<File>();
private byte[] fileContent;
private synchronized boolean acquireFileLock(File file) {
if(lockedFiles.contains(file) {
return false;
}
lockedFiles.add(file);
return true;
}
public long readFile(File file) {
if(acquireFileLock(file)) {
// read your File into fileContnent
lockedFiles.remove(file);
return bytesRead;
} else {
return FILE_LOCKED;
}

Categories

Resources