Calculate delta Offsets Kafka Java - java

In a spring project i used Kafka and now I want to make a method which takes "TopicName" and "GroupeId" as parameters
and calculate the difference between "Lastoffsets of the topic partitions" and the "offsets consumed by the group"
for the lastOffsets i get it
now i need to get the consumed offsets to calculate the difference
public ResponseEntity<Offsets> deltaoffsets (#RequestParam( name = "groupId") String groupId, #RequestParam( name = "topic") String topic) {
Map<String,Object> properties = (Map) kafkaLocalConsumerConfig.get("kafkaLocalConsumerConfig");
properties.put("group.id", groupId);
properties.put("enable.auto.commit", "true");
List<TopicPartition> partition=new ArrayList<>();
KafkaConsumer<String, RefentialToReload> kafkaLocalConsumer = new KafkaConsumer<>(properties);
Map<String, List<PartitionInfo>> topics = kafkaLocalConsumer.listTopics();
List<PartitionInfo> partitionInfos = topics.get(topic);
if (partitionInfos == null) {
log.warn("Partition information was not found for topic");
}
else {
for (PartitionInfo partitionInfo : partitionInfos) {
TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition());
partition.add(topicPartition);
log.info("partition assigned to kafkaLocalConsumer");
}
}
//get lastOffsets of the topicPartition
Map<TopicPartition,Long> OffsetsTopicpartition = kafkaLocalConsumer.endOffsets(kafkaLocalConsumer.assignment());
//here i need to get consumed offsets
}

beginningOffsets() is the first offsets, not the last.
You can use an AdminClient - here is an example that displays the current and end offsets...
#Bean
public ApplicationRunner runner(KafkaAdmin admin, ConsumerFactory<String, String> cf) throws Exception {
return args -> {
try (
AdminClient client = AdminClient.create(admin.getConfig());
Consumer<String, String> consumer = cf.createConsumer("group", "clientId", "");
) {
Collection<ConsumerGroupListing> groups = client.listConsumerGroups()
.all()
.get(10, TimeUnit.SECONDS);
groups.forEach(group -> {
Map<TopicPartition, OffsetAndMetadata> map = null;
try {
map = client.listConsumerGroupOffsets(group.groupId())
.partitionsToOffsetAndMetadata()
.get(10, TimeUnit.SECONDS);
}
catch (InterruptedException e) {
e.printStackTrace();
Thread.currentThread().interrupt();
}
catch (ExecutionException e) {
e.printStackTrace();
}
catch (TimeoutException e) {
e.printStackTrace();
}
Map<TopicPartition, Long> endOffsets = consumer.endOffsets(map.keySet());
map.forEach((tp, off) -> {
System.out.println("group: " + group + " tp: " + tp
+ " current offset: " + off.offset()
+ " end offset: " + endOffsets.get(tp));
});
});
}
};
}

Related

PostForEntity data is not saving in database

public UaaGroup createGroup() {
String requestUrl = appConfig.getUaa().getBase_url() + "/Groups";
LOGGER.info("requestUrl : {}", requestUrl);
UaaGroup uaaGroup = new UaaGroup();
uaaGroup.setDescription("description");
uaaGroup.setDisplayName(UUID.randomUUID().toString());
LOGGER.info("DisplayName before rest call : {}", uaaGroup.getDisplayName());
try {
ResponseEntity<UaaGroup> responseEntity = restTemplate.postForEntity(requestUrl, uaaGroup, UaaGroup.class,
"");
uaaGroup = responseEntity.getBody();
LOGGER.info("UaaGroupServiceImpl.createGroup: uaaGroup={}", responseEntity.getBody().toString());
return uaaGroup;
} catch (Exception e) {
LOGGER.error("Create UAA Group failed: {}", e);
throw e;
}
}
public UaaGroup updateGroup(String groupId, GroupRequest groupRequest) {
String requestUrl = appConfig.getUaa().getBase_url() + "/Groups/{groupId}";
UaaGroup uaaGroup = new UaaGroup();
if (!Strings.isNullOrEmpty(groupId)) {
String displayName = "eid-" + groupRequest.getEnterpriseId() + '-' + "gid-" + groupId + '-'
+ groupRequest.getRole();
String description = groupRequest.getEnterpriseName() + ":" + groupRequest.getName();
uaaGroup.setDisplayName(displayName);
uaaGroup.setDescription(description);
try {
HttpEntity<UaaGroup> entity = new HttpEntity<UaaGroup>(uaaGroup);
ResponseEntity<UaaGroup> responseEntity = restTemplate.exchange(requestUrl, HttpMethod.PUT, entity,
UaaGroup.class, groupId);
uaaGroup = responseEntity.getBody();
LOGGER.info("Updated Group", responseEntity.getBody().toString());
return uaaGroup;
} catch (Exception e) {
LOGGER.info("Failed to update the Group: {}", e.getMessage());
}
}
return uaaGroup;
}
#Override
public UaaGroup handleGroup(GroupRequest request) {
UaaGroup uaaGroup = this.createGroup();
LOGGER.info("handleGroup() createdGroup: {}", uaaGroup);
UaaGroupList uaaGroupList = uaaService.listUaaGroups(); /** newly created group is not displaying here
String groupId = "";
if (uaaGroup != null) {
for (UaaGroupList.Resources resources : uaaGroupList.getResources()) {
if (uaaGroup.getDisplayName().equals(resources.getDisplayName())) {
groupId = resources.getId();
LOGGER.info("groupId: {}", groupId);
}
}
}
// if (Strings.isNullOrEmpty(groupId)) {
// UaaGroup uaagroup = createGroup();
// uaaGroupList = uaaService.listUaaGroups();
// if (uaaGroupList != null) {
// for (UaaGroupList.Resources resources : uaaGroupList.getResources()) {
// if (uaagroup.getDisplayName().equals(resources.getDisplayName())) {
// groupId = resources.getId();
// LOGGER.info("Uaa User Group Id found: {}", groupId);
// }
// }
// if (Strings.isNullOrEmpty(groupId)) {
// // this should never happen...
// LOGGER.error("Failed to create UAA Group : {}");
// }
// }
// }
// }
if (!Strings.isNullOrEmpty(groupId)) {
LOGGER.info("grupId:{}", groupId);
uaaGroup = updateGroup(groupId, request);
LOGGER.info("upfatedGroup : {}", uaaGroup);
return uaaGroup;
}
return uaaGroup; // every time I am getting only createGroup object
}
while creating group first I want to create a group with randomUUID and by using that random UUID i will try to get the groupId. In my case after creating the group .I am not able to see the newly created group in listof groups.
In handleGroup() method every time iam getting created group Object but that created group i snout displaying in list of groups

Kafka pipe example don't pipe all records from topic-a to topic-b

I use the consumer and producer example called "KafkaConsumerProducerDemo" provided by Kafka.
I use the example streams-app called "pipe" provided by Kafka.
I want to realize:
- Producer write to topic1
- pipe-APP consume from topic1 and produce to topic2
- Consumer consume from topic2.
what i have done is just change the topic as following:
Consumer: consumerThread = new Consumer(KafkaProperties.TOPIC2, "DemoConsumer", false, 500, latch);
pipe-App: builder.stream("topic1").to("topic2");
Producer: producerThread = new Producer(KafkaProperties.TOPIC, isAsync, null, false, 500, latch);
As a result, pipe-App did't transfer all records from topic1 to topic2.
In topic1 there are 500 records, but in topic2 just 127!
Besides, I've tried to connect directly my Producer to Consumer, it works good! So i think the problem should be at pipe-App.
Here are the codes,
public class Pipe {
public static void main(String[] args) throws Exception {
Properties props = new Properties();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-pipe");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("streams-plaintext-input").to("streams-pipe-output");
final Topology topology = builder.build();
final KafkaStreams streams = new KafkaStreams(topology, props);
final CountDownLatch latch = new CountDownLatch(1);
//System.out.print(topology);
System.out.println(topology.describe());
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
#Override
public void run() {
streams.close();
latch.countDown();
}
});
try {
streams.start();
latch.await();
} catch (Throwable e) {
System.exit(1);
}
System.exit(0);
}
}
public class KafkaConsumerDemo {
public static void main(String[] args) {
SampleConsumer consumerThread = new SampleConsumer("streams-pipe-output");
consumerThread.start();
}
}
public class SampleConsumer extends ShutdownableThread {
private final KafkaConsumer<Integer, String> consumer;
private final String topic;
public static final String KAFKA_SERVER_URL = "localhost";
public static final int KAFKA_SERVER_PORT = 9092;
public static final String CLIENT_ID = "SampleConsumer";
public SampleConsumer(String topic){
super("KafkaConsumerExample", false);
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_SERVER_URL + ":" + KAFKA_SERVER_PORT);
props.put(ConsumerConfig.GROUP_ID_CONFIG, CLIENT_ID);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.IntegerDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
consumer = new KafkaConsumer<>(props);
this.topic = topic;
}
#Override
public void doWork() {
consumer.subscribe(Collections.singletonList(this.topic));
ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofSeconds(1));
for (ConsumerRecord<Integer, String> record : records) {
System.out.println("Received message: (" + record.key() + ", " + record.value() + ") at offset " + record.offset());
}
}
#Override
public String name() {
return null;
}
#Override
public boolean isInterruptible() {
return false;
}
}
public class KafkaProducerDemo {
public static final String TOPIC = "streams-plaintext-input";
public static void main(String[] args) {
boolean isAsync = false;
SampleProducer producerThread = new SampleProducer(TOPIC, isAsync);
// start the producer
producerThread.start();
}
}
public class SampleProducer extends Thread {
private final KafkaProducer<Integer, String> producer;
private final String topic;
private final Boolean isAsync;
public static final String KAFKA_SERVER_URL = "localhost";
public static final int KAFKA_SERVER_PORT = 9092;
public static final String CLIENT_ID = "SampleProducer";
public SampleProducer(String topic, Boolean isAsync) {
Properties properties = new Properties();
properties.put("bootstrap.servers", KAFKA_SERVER_URL + ":" + KAFKA_SERVER_PORT);
properties.put("client.id", CLIENT_ID);
properties.put("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
producer = new KafkaProducer<>(properties);
this.topic = topic;
this.isAsync = isAsync;
}
public void run() {
int messageNo = 1;
while (true) {
String messageStr = "Message_try" + messageNo;
long startTime = System.currentTimeMillis();
if (isAsync) { // Send asynchronously
producer.send(new ProducerRecord<>(topic,
messageNo,
messageStr), new DemoCallBack(startTime, messageNo, messageStr));
} else { // Send synchronously
try {
producer.send(new ProducerRecord<>(topic,
messageNo,
messageStr)).get();
System.out.println("Sent message: (" + messageNo + ", " + messageStr + ")");
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
// handle the exception
}
}
++messageNo;
}
}
}
class DemoCallBack implements Callback {
private final long startTime;
private final int key;
private final String message;
public DemoCallBack(long startTime, int key, String message) {
this.startTime = startTime;
this.key = key;
this.message = message;
}
/**
* onCompletion method will be called when the record sent to the Kafka Server has been acknowledged.
*
* #param metadata The metadata contains the partition and offset of the record. Null if an error occurred.
* #param exception The exception thrown during processing of this record. Null if no error occurred.
*/
public void onCompletion(RecordMetadata metadata, Exception exception) {
long elapsedTime = System.currentTimeMillis() - startTime;
if (metadata != null) {
System.out.println(
"message(" + key + ", " + message + ") sent to partition(" + metadata.partition() +
"), " +
"offset(" + metadata.offset() + ") in " + elapsedTime + " ms");
} else {
exception.printStackTrace();
}
}
}
Producer, Consumer and Stream-app should use the same type of serde.
After changing key.serializer from String to Integer, it worked.
Thank #cricket_007 who help me to solve the problem, and #Cahit Gungor who help me to modify my expression!

Read from splunk source and write to topic - writing same record. not pulling latest records

same record is being written to topic. not pulling latest records from splunk. time parameters are set in start method to pull last one min data. Any inputs.
currently i dont set offset from source. when poll is run every time, does it look for source offset and then poll? in logs can we have time as offset.
#Override
public List<SourceRecord> poll() throws InterruptedException {
List<SourceRecord> results = new ArrayList<>();
Map<String, String> recordProperties = new HashMap<String, String>();
while (true) {
try {
String line = null;
InputStream stream = job.getResults(previewArgs);
String earlierKey = null;
String value = null;
ResultsReaderCsv csv = new ResultsReaderCsv(stream);
HashMap<String, String> event;
while ((event = csv.getNextEvent()) != null) {
for (String key: event.keySet()) {
if(key.equals("rawlogs")){
recordProperties.put("rawlogs", event.get(key)); results.add(extractRecord(Splunklog.SplunkLogSchema(), line, recordProperties));
return results;}}}
csv.close();
stream.close();
Thread.sleep(500);
} catch(Exception ex) {
System.out.println("Exception occurred : " + ex);
}
}
}
private SourceRecord extractRecord(Schema schema, String line, Map<String, String> recordProperties) {
Map<String, String> sourcePartition = Collections.singletonMap(FILENAME_FIELD, FILENAME);
Map<String, String> sourceOffset = Collections.singletonMap(POSITION_FIELD, recordProperties.get(OFFSET_KEY));
return new SourceRecord(sourcePartition, sourceOffset, TOPIC_NAME, schema, recordProperties);
}
#Override
public void start(Map<String, String> properties) {
try {
config = new SplunkSourceTaskConfig(properties);
} catch (ConfigException e) {
throw new ConnectException("Couldn't start SplunkSourceTask due to configuration error", e);
}
HttpService.setSslSecurityProtocol(SSLSecurityProtocol.TLSv1_2);
Service service = new Service("splnkip", port);
String credentials = "user:pwd";
String basicAuthHeader = Base64.encode(credentials.getBytes());
service.setToken("Basic " + basicAuthHeader);
String startOffset = readOffset();
JobArgs jobArgs = new JobArgs();
if (startOffset != null) {
log.info("-------------------------------task OFFSET!NULL ");
jobArgs.setExecutionMode(JobArgs.ExecutionMode.BLOCKING);
jobArgs.setSearchMode(JobArgs.SearchMode.NORMAL);
jobArgs.setEarliestTime(startOffset);
jobArgs.setLatestTime("now");
jobArgs.setStatusBuckets(300);
} else {
log.info("-------------------------------task OFFSET=NULL ");
jobArgs.setExecutionMode(JobArgs.ExecutionMode.BLOCKING);
jobArgs.setSearchMode(JobArgs.SearchMode.NORMAL);
jobArgs.setEarliestTime("+419m");
jobArgs.setLatestTime("+420m");
jobArgs.setStatusBuckets(300);
}
String mySearch = "search host=search query";
job = service.search(mySearch, jobArgs);
while (!job.isReady()) {
try {
Thread.sleep(500);
} catch (InterruptedException ex) {
log.error("Exception occurred while waiting for job to start: " + ex);
}
}
previewArgs = new JobResultsPreviewArgs();
previewArgs.put("output_mode", "csv");
stop = new AtomicBoolean(false);
}

Spark save Kafka InputDStream as Json file

I just wondered whether there is a method in Spark, so I can save a JavaInputDStream as a Json file, or generally as any file.
And if not, whether there is an other possibility to save the content of
a kafka topic as a file in Spark.
Thank you very much!
As you map your JavaInputDStream to a stream you could do as follow:
stream.foreachRDD(rdd -> {
OffsetRange[] offsetRanges = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
rdd.mapToPair(new PairFunction<ConsumerRecord<String, String>, String, String>() {
#Override
public Tuple2<String, String> call(ConsumerRecord<String, String> record) {
return new Tuple2<>(record.key(), record.value());
}
}).foreachPartition(partition -> {
OffsetRange o = offsetRanges[TaskContext.get().partitionId()];
System.out.println(o.topic() + " " + o.partition() + " " + o.fromOffset() + " " + o.untilOffset());
if (partition.hasNext()) {
PrintWriter out = new PrintWriter("filename.txt");;
out.println(text);
try {
while (partition.hasNext()) {
Tuple2<String, String> message = partition.next();
out.println(message);
}
} catch (Exception e) {
e.printStackTrace(
}
});
});
ssc.start();
ssc.awaitTermination();
Just don't forget that if you have multiple partitions inside your Kafka topic, you are going to write a file per partition following the approach above.

Spring Statemachine Factory -stays in memory

I have used Spring state-machine in quite a complex scenario. I will explain my problem with the simplest part of the SM. Refer below image. This is my main state machine
The state circled in red points to the following sub-machine
So, as you can see, I have 3 actions. sendBasicTemplate, timeoutLogAction and processBasicTemplateReply. I will provide the related code segments and my configuration below.
What I have observed during this process is that the state-machines created by the factory resides in memory always. There's some reference to it which i cannot think of.
Is it that the SM doesn't stop or is there anything I'm doing wrong? Here's my code.
Configuration class
#Configuration #EnableStateMachineFactory public class CambodiaStateMachine extends StateMachineConfigurerAdapter<String, String> {
#Override
public void configure(StateMachineModelConfigurer<String, String> model) throws Exception {
model
.withModel()
.factory(modelFactory());
}
#Override public void configure(StateMachineConfigurationConfigurer<String, String> config) throws Exception {
config
.withConfiguration()
.machineId("cambodia")
.autoStartup(true)
.listener(listener()); }
#Bean
public StateMachineListener<String, String> listener() {
return new StateMachineListenerAdapter<String, String>() {
#Override
public void stateChanged(State<String, String> from, State<String, String> to) {
System.out.println("State change to " + to.getId());
}
};
}
#Bean
public StateMachineModelFactory<String, String> modelFactory() {
return new UmlStateMachineModelFactory("classpath:stm/model.uml");
}
}
Methods : 1. This is how my events are fed to the machine and where new SM instances are made. I take my events from a queue
#RabbitListener(bindings = #QueueBinding(value = #Queue(value = "sims.events.mq", durable = "true"), exchange = #Exchange(type = ExchangeTypes.TOPIC, value = "sims.events.mq.xch", ignoreDeclarationExceptions = "true", durable = "true"), key = "events"))
public void process(GenericMessage<String> message) {
try {
String imei = (String) message.getHeaders().get("imei");
Subscriber subscriber = subscriberService.findSubscriber(imei);
// quickly create 'new' state machine
StateMachine<String, String> stateMachine = factory.getStateMachine();
stateMachine.addStateListener(new CompositeStateMachineListener<String, String>() {
#Override
public void stateContext(StateContext<String, String> arg0) {
String user = (String) arg0.getExtendedState().getVariables().get("imei");
if (user == null) {
return;
}
log.info(arg0.getStage().toString() + "**********" + stateMachine.getState());
try {
redisStateMachinePersister.persist(arg0.getStateMachine(), "testprefixSw:" + user);
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
});
// restore from persistent
String user = (String) message.getHeaders().get("imei");
log.info(user);
// attempt restoring only if key is exist
if (redisTemplate.hasKey("testprefixSw:" + user)) {
System.out.println("************************ prefix exists...restoring");
resetStateMachineFromStore(stateMachine, user);
} else {
stateMachine.start();
System.out.println("************************ No prefix");
}
log.info("Payload == > " + message.getPayload());
try {
stateMachine.getExtendedState().getVariables().put("imei", user);
stateMachine.getExtendedState().getVariables().put("fromState", stateMachine.getState().getId());
stateMachine.getExtendedState().getVariables().put("eventName", message.getPayload());
if(null!= message.getHeaders().get("templates"))
stateMachine.getExtendedState().getVariables().put("templates", message.getHeaders().get("templates"));
if(null!= message.getHeaders().get("ttl"))
stateMachine.getExtendedState().getVariables().put("ttl", message.getHeaders().get("ttl"));
} catch (Exception e) {
log.error(e.getMessage(), e);
}
// check if state is properly restored...
log.info("Current State " + stateMachine.getState().toString());
feedMachine(stateMachine, user, message);
log.info("handler exited");
} catch (Exception e) {
log.error(e.getMessage(), e);
}
// TODO: save persistant state..
}
private void feedMachine(StateMachine<String, String> stateMachine, String user, GenericMessage<String> event)
throws Exception {
stateMachine.sendEvent(event);
System.out.println("persist machine --- > state :" + stateMachine.getState().toString());
redisStateMachinePersister.persist(stateMachine, "testprefixSw:" + user);
}
private StateMachine<String, String> resetStateMachineFromStore(StateMachine<String, String> stateMachine,
String user) throws Exception {
StateMachine<String, String> machine = redisStateMachinePersister.restore(stateMachine, "testprefixSw:" + user);
System.out.println("restore machine --- > state :" + machine.getState().toString());
return machine;
}
Actions
#Bean
public Action<String, String> sendBasicTemplate() {
// Action handler...
return new Action<String, String>() {
#Override
public void execute(StateContext<String, String> context) {
// MP: variables are the right way to do
String imeiNo = (String) context.getExtendedState().getVariables().get("imei");
String template = (String) context.getMessageHeader("template");
log.info("sending basic template " + template + " to " + imeiNo);
findTemplateNSend(context, template, imeiNo);
xbossBalanceCheck(context, imeiNo, "Direct Query");
setRiskyState(context, "testprefixSw:RISKY_StateBasic_WFT_Timeout" + imeiNo, 0);
}
};
}
#Bean
public Action<String, String> processBasicTemplateReply() {
// Action handler...
return new Action<String, String>() {
#Override
public void execute(StateContext<String, String> context) {
log.info("Result for basic template processing started");
log.info(context.getStateMachine().getState().getIds().toString());
String imeiNo = (String) context.getExtendedState().getVariables().get("imei");
saveDirectValues(context, imeiNo);
String fromState = (String) context.getExtendedState().getVariables().get("fromState");
String eventName = (String) context.getExtendedState().getVariables().get("eventName");
long trId = (Long) context.getMessageHeader("processId") != null? (Long) context.getMessageHeader("processId") : 0;
String key = "testprefixSw:RISKY_StateBasic_WFT_Timeout" + imeiNo;
log.info("*Going to delete if exists key ==>" + key);
if (clearRiskyStateIfSet(context, key)) {
log.info("------------------------------Jedis Exists");
sendSubscriberEventLog(imeiNo, fromState, context.getStateMachine().getState().getId(), trId, eventName, false, "Query Event Success");
}
// mark as success sent
context.getStateMachine().sendEvent("SEQUENCE_COMPLETE");
}
};
}
#Bean
public Action<String, String> timeoutLogAction() {
// Action handler...
return new Action<String, String>() {
#Override
public void execute(StateContext<String, String> context) {
// log.info("timeout log Action");
String imeiNo = (String) context.getStateMachine().getExtendedState().getVariables().get("imei");
// String imeiNo = (String)
// context.getExtendedState().getVariables().get("imei");
String fromState = (String) context.getExtendedState().getVariables().get("fromState");
String eventName = (String) context.getExtendedState().getVariables().get("eventName");
long trId = (Long) context.getMessageHeader("processId") != null ? (Long) context.getMessageHeader("processId") : 0;
String key = "testprefixSw:RISKY_StateBasic_WFT_Timeout" + imeiNo;
log.info("*Going to delete if exists key ==>" + key);
if (clearRiskyStateIfSet(context, key)) {
log.info("------------------------------Jedis Exists at timeout. Event Failed");
sendSubscriberEventLog(imeiNo, fromState, context.getStateMachine().getId(), trId, eventName, true, "Direct Query Failed due to Timeout");
sendAlert(imeiNo, EventPriority.NORMAL, "Direct Query Failed due to Timeout");
}
}
};
}
So based on the above, Is there anything I'm missing so that the created state machines are not collected by garbage? or any other explanation as to why memory is being consumed with each request and it never gets released?

Categories

Resources