Client hangs on sending messages to RabbitMQ with Java client - java

I've implemented RabbitMQ publisher and consumer in reactive manner with Java, but my publishing functionality hangs channel. The queue itself, declaring a queue and consuming however works fine, I've tested it with admin's management UI. When attempting to send more messages I don't see any more of logs like "queue declare success" or "delivering message to exchange...". By the way I know I do not need declareQueue in deliver(), but I added it to verify if communication in this particular channel works.
My code is:
#Slf4j
#Component
public class RabbitConfigurator {
private TasksQueueConfig cfg;
private ReceiverOptions recOpts;
private List<Address> addresses;
private Utils.ExceptionFunction<ConnectionFactory, Connection> connSupplier;
public RabbitConfigurator(TasksQueueConfig cfg) {
this.cfg = cfg;
addresses = cfg
.getHosts()
.stream()
.map(Address::new)
.collect(Collectors.toList());
connSupplier = cf -> {
LOG.info("initializing new RabbitMQ connection");
return cf.newConnection(addresses, "dmTasksProc");
};
}
#Bean
public ConnectionFactory rabbitMQConnectionFactory() {
ConnectionFactory cf = new ConnectionFactory();
cf.setHost(cfg.getHosts().get(0));
cf.setPort(5672);
cf.setUsername(cfg.getUsername());
cf.setPassword(cfg.getPassword());
return cf;
}
#Bean
public Sender sender(ConnectionFactory connFactory) {
SenderOptions sendOpts = new SenderOptions()
.connectionClosingTimeout(Duration.parse(cfg.getConnectionTimeout()))
.connectionFactory(connFactory)
.connectionSupplier(connSupplier)
.connectionSubscriptionScheduler(Schedulers.elastic());
return RabbitFlux.createSender(sendOpts);
}
#Bean
public Receiver receiver(ConnectionFactory connFactory) {
ReceiverOptions recOpts = new ReceiverOptions()
.connectionClosingTimeout(Duration.parse(cfg.getConnectionTimeout()))
.connectionFactory(connFactory)
.connectionSupplier(connSupplier)
.connectionSubscriptionScheduler(Schedulers.elastic());
return RabbitFlux.createReceiver(recOpts);
}
#Bean
public Flux<Delivery> deliveryFlux(Receiver receiver) {
return receiver.consumeAutoAck(cfg.getName(), new ConsumeOptions().qos(cfg.getPrefetchCount()));
}
#Bean
public AmqpAdmin rabbitAmqpAdmin(ConnectionFactory connFactory) {
return new RabbitAdmin(new CachingConnectionFactory(connFactory));
}
}
and the consumer/publisher:
#Slf4j
#Service
public class TasksQueue implements DisposableBean {
private TasksQueueConfig cfg;
private ObjectMapper mapper;
private Flux<Delivery> deliveryFlux;
private Receiver receiver;
private Sender sender;
private Disposable consumer;
public TasksQueue(TasksQueueConfig cfg, AmqpAdmin amqpAdmin, ObjectMapper mapper, Flux<Delivery> deliveryFlux,
Receiver receiver, Sender sender) {
this.cfg = cfg;
this.mapper = mapper;
this.deliveryFlux = deliveryFlux;
this.receiver = receiver;
this.sender = sender;
amqpAdmin.declareQueue(new Queue(cfg.getName(), false, false, false));
consumer = consume();
}
public Mono<Void> deliver(Flux<Task> tasks) {
var pub = sender.sendWithPublishConfirms(
tasks.map(task -> {
try {
String exchange = "";
LOG.debug("delivering message to exchange='{}', routingKey='{}'", exchange, cfg.getName());
return new OutboundMessage(exchange, cfg.getName(), mapper.writeValueAsBytes(task));
} catch(JsonProcessingException ex) {
throw Exceptions.propagate(ex);
}
}));
return sender.declareQueue(QueueSpecification.queue(cfg.getName()))
.flatMap(declareOk -> {
LOG.info("queue declare success");
return Mono.just(declareOk);
})
.thenMany(pub)
.doOnError(JsonProcessingException.class, ex -> LOG.error("Cannot prepare queue message:", ex))
.doOnError(ex -> LOG.error("Failed to send task to the queue:", ex))
.map(res -> {
if(res.isAck()) {
LOG.info("Message {} sent successfully", new String(res.getOutboundMessage().getBody()));
return res;
} else {
LOG.info("todo");
return res;
}
})
.then();
}
private Disposable consume() {
return deliveryFlux
.retryWhen(Retry.fixedDelay(10, Duration.ofSeconds(1)))
.doOnError(err -> {
LOG.error("tasks consumer error", err);
})
.subscribe(m -> {
LOG.info("Received message {}", new String(m.getBody()));
});
}
#Override
public void destroy() throws Exception {
LOG.info("Cleaning up tasks queue resources");
consumer.dispose();
receiver.close();
sender.close();
}
}
Five minutes after attempting to send message I get log:
r.r.ChannelCloseHandlers$SenderChannelCloseHandler:47: closing channel 1 by signal cancel
r.r.ChannelCloseHandlers$SenderChannelCloseHandler:53: Channel 1 didn't close normally: null
Big thanks for input in advance!

Related

Spring integration Handle Connection Close event with Event Listener and Re establish it while using Dynamic TCP Routing

I am using spring integration to create flow for request / response architecture and also receiving arbitrary data from server. Until this stage, i checked examples from spring-integration github and advices from #Gary Russell and #Artem Bilan.
Here is my gateway interface
#Component
#MessagingGateway(defaultRequestChannel = "toTcp.input")
public interface ToTCP {
byte[] send(String data, #Header("host") String host, #Header("port") int port, #Header("irregularMessageChannelName") String channelName);
byte[] send(String data, #Header("host") String host, #Header("port") int port);
}
Here is my my TcpClientConfig
#Component
public class TcpClientConfig {
#Bean
public IntegrationFlow toTcp() {
return f -> f.route(new TcpRouter());
}
}
Here is my TcpRouter That Extends AbstractMessageRouter
public class TcpRouter extends AbstractMessageRouter {
private final Logger log = LoggerFactory.getLogger(TcpRouter.class);
private final static int MAX_CACHED = 100; // When this is exceeded, we remove the LRU.
private HashMap<String, Message<?>> connectionRegistery = new HashMap<>();
private final LinkedHashMap<String, MessageChannel> subFlows =
new LinkedHashMap<String, MessageChannel>(MAX_CACHED, .75f, true) {
#Override
protected boolean removeEldestEntry(Map.Entry<String, MessageChannel> eldest) {
if (size() > MAX_CACHED) {
removeSubFlow(eldest);
return true;
} else {
return false;
}
}
};
#Autowired
private IntegrationFlowContext flowContext;
#Override
protected Collection<MessageChannel> determineTargetChannels(Message<?> message) {
MessageChannel channel;
boolean hasThisConnectionIrregularChannel = message.getHeaders().containsKey("irregularMessageChannelName");
if (hasThisConnectionIrregularChannel) {
channel = this.subFlows.get(message.getHeaders().get("host", String.class) + message.getHeaders().get("port") + ".extended");
} else {
channel = this.subFlows.get(message.getHeaders().get("host", String.class) + message.getHeaders().get("port"));
}
if (channel == null) {
channel = createNewSubflow(message);
}
return Collections.singletonList(channel);
}
private MessageChannel createNewSubflow(Message<?> message) {
String host = (String) message.getHeaders().get("host");
Integer port = (Integer) message.getHeaders().get("port");
boolean hasThisConnectionIrregularChannel = message.getHeaders().containsKey("irregularMessageChannelName");
Assert.state(host != null && port != null, "host and/or port header missing");
String flowRegisterKey;
if (hasThisConnectionIrregularChannel) {
flowRegisterKey = host + port + ".extended";
} else {
flowRegisterKey = host + port;
}
TcpNetClientConnectionFactory cf = new TcpNetClientConnectionFactory(host, port);
cf.setSoTimeout(0);
cf.setSoKeepAlive(true);
ByteArrayCrLfSerializer byteArrayCrLfSerializer = new ByteArrayCrLfSerializer();
byteArrayCrLfSerializer.setMaxMessageSize(1048576);
cf.setSerializer(byteArrayCrLfSerializer);
cf.setDeserializer(byteArrayCrLfSerializer);
TcpOutboundGateway tcpOutboundGateway;
if (hasThisConnectionIrregularChannel) {
log.info("TcpRouter # createNewSubflow extended TcpOutboundGateway will be created");
String irregularMessageChannelName = (String) message.getHeaders().get("irregularMessageChannelName");
DirectChannel directChannel = getBeanFactory().getBean(irregularMessageChannelName, DirectChannel.class);
tcpOutboundGateway = new ExtendedTcpOutboundGateway(directChannel);
} else {
log.info("TcpRouter # createNewSubflow extended TcpOutboundGateway will be created");
tcpOutboundGateway = new TcpOutboundGateway();
}
tcpOutboundGateway.setConnectionFactory(cf);
tcpOutboundGateway.setAdviceChain(Arrays.asList(new Advice[]{tcpRetryAdvice()}));
IntegrationFlow flow = f -> f.handle(tcpOutboundGateway);
IntegrationFlowContext.IntegrationFlowRegistration flowRegistration =
this.flowContext.registration(flow)
//.addBean(cf)
.addBean("client_connection_" + flowRegisterKey, cf)
.id(flowRegisterKey + ".flow")
.register();
MessageChannel inputChannel = flowRegistration.getInputChannel();
this.subFlows.put(flowRegisterKey, inputChannel);
this.connectionRegistery.put("client_connection_" + flowRegisterKey, message);
return inputChannel;
}
private void removeSubFlow(Map.Entry<String, MessageChannel> eldest) {
String hostPort = eldest.getKey();
this.flowContext.remove(hostPort + ".flow");
}
#Bean
public RequestHandlerRetryAdvice tcpRetryAdvice() {
SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy();
retryPolicy.setMaxAttempts(3);
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(100);
backOffPolicy.setMaxInterval(1000);
backOffPolicy.setMultiplier(2);
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy);
retryTemplate.setBackOffPolicy(backOffPolicy);
RequestHandlerRetryAdvice tcpRetryAdvice = new RequestHandlerRetryAdvice();
tcpRetryAdvice.setRetryTemplate(retryTemplate);
// This allows fail-controlling
tcpRetryAdvice.setRecoveryCallback(new ErrorMessageSendingRecoverer(failMessageChannel()));
return tcpRetryAdvice;
}
#Bean
public MessageChannel failMessageChannel() {
return new DirectChannel();
}
#ServiceActivator(inputChannel = "failMessageChannel")
public void messageAggregation(String in) {
log.error("TcpRouter # connection retry failed with message : " + in);
}
#Autowired
private ToTCP toTCP;
#EventListener
public void listen(TcpConnectionCloseEvent event) {
String connectionFactoryName = event.getConnectionFactoryName();
boolean isConnectionRegistered = this.connectionRegistery.containsKey(connectionFactoryName);
if (isConnectionRegistered) {
Message<?> message = this.connectionRegistery.get(connectionFactoryName);
String host = (String) message.getHeaders().get("host");
Integer port = (Integer) message.getHeaders().get("port");
boolean hasThisConnectionIrregularChannel = message.getHeaders().containsKey("irregularMessageChannelName");
if (hasThisConnectionIrregularChannel) {
log.info("TcpRouter # listen # registered tcp connection with arbitrary message channel closed for host {} and port {}, it will open again !!", host, port);
String unsolicitedMessageChannelName = (String) message.getHeaders().get("irregularMessageChannelName");
toTCP.send(message.getPayload().toString(), host, port, unsolicitedMessageChannelName);
} else {
log.info("TcpRouter # listen # registered tcp connection closed for host {} and port {}, it will open again !!", host, port);
toTCP.send(message.getPayload().toString(), host, port);
}
} else {
log.info("TcpRouter # listen # unregistered tcp connection closed, no action required.");
}
}
}
In case of any connection close event, I can handle it with event listener. In event listener i can understand from connectionFactoryName that was registered in addBean("client_connection_" + flowRegisterKey, cf). Here is solution for that part
After handle which connection is closed, i should open it again to continue to receive arbitrary data OR make ready connection between TCP server to send any request... But i am not sure the way that i re establish connection with sending data.
Should i use
#Autowired
private ToTCP toTCP;
in TcpRouter class to send message again
OR
Should i send message directly to
#Override
protected Collection<MessageChannel> determineTargetChannels(Message<?> message)
Method. I am confused about their working behaviour... Can you give me the correct idea that helps me to use more convenient way for EventListener to reestablish connection ?
Actually you are right, reconnection request is same with initial time i called it.
Should i use determineTargetChannels in that case ?
No; do exactly the same in the event listener as whatever calls ToTCP in the first place (send a new request and handle the reply).

How to use AmazonSQS listener with two accounts

I have application with two worker classes. I want them to pull from AWS SQS ,but from two different accounts.
I am using #SQSListener to achive this. I am having trouble to set the right AmazonSQS client for each queue.Tried to use custom destionationResolver but again it cannot access the right amazonSQS client bean.
I'm using AmazonSQSAsync maybe this is part of the problem. Whit the custom destination resolver i am getting access denied for one of the queues.
My config code:
#Bean(destroyMethod = "shutdown")
#Primary
public AmazonSQSAsync amazonSQS() {
AmazonSQSAsync amazonSQSAsyncClient = new AmazonSQSAsyncClient(new AWSCredentialsProvider() {
public void refresh() {}
public AWSCredentials getCredentials() {
return new AWSCredentials() {
public String getAWSSecretKey() {return secretKey;}
public String getAWSAccessKeyId() {return accessKey;}
};
}
});
QueueBufferConfig config = new QueueBufferConfig();
config.setMaxBatchOpenMs(maxBatchOpenMs);
config.setMaxBatchSize(maxBatchSize);
LOGGER.info("SQS Client Initialized Successfully");
return new AmazonSQSBufferedAsyncClient(amazonSQSAsyncClient, config);
}
#Bean(destroyMethod = "shutdown")
#Qualifier("workerSQS")
public AmazonSQSAsync workerSQS() {
final ClientConfiguration cc = new ClientConfiguration();
cc.setConnectionTimeout(listenerConnectionTimeout);
cc.setSocketTimeout(listenerSocketTimeout);
cc.setMaxConnections(listenerMaxConnection);
cc.setRequestTimeout(listenerRequestTimeout);
cc.setUseReaper(true);
//cc.setConnectionMaxIdleMillis();
AWSCredentialsProvider awsCredentialsProvider = new AWSCredentialsProvider() {
public void refresh() {}
public AWSCredentials getCredentials() {
return new AWSCredentials() {
public String getAWSSecretKey() {return routingSecretKey;}
public String getAWSAccessKeyId() {return routingAccessKey;}
};
}
};
AmazonSQSAsync amazonSQSAsyncClient = AmazonSQSAsyncClientBuilder.standard()
.withCredentials(awsCredentialsProvider)
.withRegion(Regions.US_EAST_1)
.withClientConfiguration(cc)
.build();
// See https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-client-side-buffering-request-batching.html
// for QueueBufferConfig Configuration Parameters
QueueBufferConfig config = new QueueBufferConfig();
config.setLongPoll(true);
return new AmazonSQSBufferedAsyncClient(amazonSQSAsyncClient, config);
}
#Bean
public SimpleMessageListenerContainerFactory simpleMessageListenerContainerFactory() {
SimpleMessageListenerContainerFactory msgListenerContainerFactory = new SimpleMessageListenerContainerFactory();
msgListenerContainerFactory.setBackOffTime(listenerBackOffTime);
msgListenerContainerFactory.setWaitTimeOut(listenerWaitTimeOut);
msgListenerContainerFactory.setVisibilityTimeout(listenerVisibilityTimeOut);
msgListenerContainerFactory.setMaxNumberOfMessages(listenerMaxMessagesPerPoll);
msgListenerContainerFactory.setDestinationResolver(destinationResolver());
return msgListenerContainerFactory;
}
#Bean
public CustomDestinationResolver destinationResolver(){
return new CustomDestinationResolver();
}
#Component
public static class CustomDestinationResolver implements DestinationResolver{
#Autowired
private AmazonSQS amazonSQS;
#Autowired
#Qualifier("workerSQS")
private AmazonSQSAsync amazonSQSAsync;
#Override
public String resolveDestination(String name) throws DestinationResolutionException {
String queueName = name;
if (queueName.startsWith("tl")) {
try {
GetQueueUrlResult getQueueUrlResult = amazonSQSAsync.getQueueUrl(new GetQueueUrlRequest(name));
return getQueueUrlResult.getQueueUrl();
} catch (QueueDoesNotExistException var4) {
throw new DestinationResolutionException(var4.getMessage(), var4);
}
} else {
try {
GetQueueUrlResult getQueueUrlResult = amazonSQS.getQueueUrl(new GetQueueUrlRequest(name));
return getQueueUrlResult.getQueueUrl();
} catch (QueueDoesNotExistException var4) {
throw new DestinationResolutionException(var4.getMessage(), var4);
}
}
}
}
I was not able to do it with SQS Listener,so i tried with JMS listener and it worked.
I simply created two JMS listenerContainerFactory and used them. Each listener have different AWS account

Manualy NACK message from AmqpInboundChannelAdapter

This is my current code:
#Bean
public IntegrationFlow someFlow() {
return IntegrationFlows
.from(someInboundAdapter())
.transform(new JsonToObjectTransformer(SomeObject.class))
.filter((SomeObject s) -> s.getId()!=null && s.getId().isRealId(), f -> f.discardChannel(manualNackChannel()))
.channel(amqpInputChannel())
.get();
}
#ServiceActivator(inputChannel = "manualNackChannel")
public void manualNack(#Header(AmqpHeaders.CHANNEL) Channel channel, #Header(AmqpHeaders.DELIVERY_TAG) Long tag) throws IOException {
channel.basicNack(tag, false, false);
}
#Bean
public AmqpInboundChannelAdapter someInboundAdapter() {
AmqpInboundChannelAdapter adapter = new AmqpInboundChannelAdapter(someListenerContainer());
adapter.setErrorChannel(manualNackChannel()); //NOT WORKING
return adapter;
}
#Bean
public SimpleMessageListenerContainer someListenerContainer() {
SimpleMessageListenerContainer listenerContainer = new SimpleMessageListenerContainer(commonConfig.connectionFactory());
listenerContainer.setQueues(someQueue());
listenerContainer.setConcurrentConsumers(4);
listenerContainer.setMessageConverter(jackson2JsonConverter());
listenerContainer.setAcknowledgeMode(AcknowledgeMode.MANUAL);
listenerContainer.setConsumerTagStrategy(consumerTagStrategy());
listenerContainer.setAfterReceivePostProcessors(new GUnzipPostProcessor());
listenerContainer.setAdviceChain(commonConfig.retryInterceptor()); //reties 3 times and RejectAndDontRequeueRecoverer
return listenerContainer;
}
Here I use MANUAL ACK-ing, since I want to ACK/NACK message only if processed sucesfully in last part of IntegrationFlow.
Here, in case that message cannot be deserialized, retryInterceptor is invoked, but after exausting all the retries, I need to be able to manually NACK the message. I expected to do it with setErrorChannel method on adapter, but I cannot get AMQP channel headers in manualNack.
Is this proper way to manually NACK message from AmqpInboundChannelAdapter?
UPDATE
I guess this is my current solution, but don't know if good enough:
private ErrorMessageStrategy nackStrategy(){
return (throwable, attributes) -> {
Object inputMessage = attributes.getAttribute(ErrorMessageUtils.INPUT_MESSAGE_CONTEXT_KEY);
return new ErrorMessage(throwable, ((Message)inputMessage).getHeaders());
};
}
#Bean
public AmqpInboundChannelAdapter someInboundAdapter() {
AmqpInboundChannelAdapter adapter = new AmqpInboundChannelAdapter(someListenerContainer());
adapter.setRecoveryCallback(new ErrorMessageSendingRecoverer(manualNackChannel(), nackStrategy()));
adapter.setRetryTemplate(commonConfig.retryTemplate());
return adapter;
}
in case that message cannot be deserialized
Since AMQP message cannot be deserialized, the Spring Message isn't created and therefore no AmqpHeaders.CHANNEL header.
I'm not sure though how that ErrorMessageSendingRecoverer can help you here because deserialization really happens on the SimpleMessageListenerContainer level a bit earlier than onMessage() in the AmqpInboundChannelAdapter.
Not sure yet how to help you but maybe you can share some simply Spring Boot project to play from our side? Thanks
Here is the full working code for this example. You can test ACK/NACK on 3 REST endpoints:
http://localhost:8080/sendForAck -> will send Object SomeObject to queue proba, transform it, forward to exchange probaEx and ACK it after that
http://localhost:8080/sendForNack -> will send malformed byte[] message which cannot be deserialized and will be NACK-ed.
http://localhost:8080/sendForNack2 -> will create malformed json message and will be NACK-ed with InvalidFormatException
#Controller
#EnableAutoConfiguration
#Configuration
public class SampleController {
#Autowired
public RabbitTemplate rabbitTemplate;
#RequestMapping("/sendForAck")
#ResponseBody
String sendForAck() {
SomeObject s = new SomeObject();
s.setId(2);
rabbitTemplate.convertAndSend("", "proba", s);
return "Sent for ACK!";
}
#RequestMapping("/sendForNack")
#ResponseBody
String sendForNack() {
rabbitTemplate.convertAndSend("", "proba", new byte[]{1,2,3});
return "Sent for NACK!";
}
#RequestMapping("/sendForNack2")
#ResponseBody
String sendForNack2() {
MessageProperties p = new MessageProperties();
p.getHeaders().put("__TypeId__", "SampleController$SomeObject");
p.setDeliveryMode(MessageDeliveryMode.PERSISTENT);
p.setPriority(0);
p.setContentEncoding("UTF-8");
p.setContentType("application/json");
rabbitTemplate.send("", "proba", new org.springframework.amqp.core.Message("{\"id\":\"abc\"}".getBytes(), p));
return "Sent for NACK2!";
}
static class SomeObject{
private Integer id;
public Integer getId(){return id;}
public void setId(Integer id){ this.id=id; }
#Override
public String toString() {
return "SomeObject{" +
"id=" + id +
'}';
}
}
#Bean
public IntegrationFlow someFlow() {
return IntegrationFlows
.from(someInboundAdapter())
.transform(new JsonToObjectTransformer(SomeObject.class))
.filter((SomeObject s) -> s.getId()!=null, f -> f.discardChannel(manualNackChannel()))
.transform((SomeObject s) -> {s.setId(s.getId()*2); return s;})
.handle(amqpOutboundEndpoint())
.get();
}
#Bean
public MessageChannel manualNackChannel() {
return new DirectChannel();
}
#Bean
public MessageChannel manualAckChannel() {
return new DirectChannel();
}
#ServiceActivator(inputChannel = "manualNackChannel")
public void manualNack(#Header(AmqpHeaders.CHANNEL) Channel channel, #Header(AmqpHeaders.DELIVERY_TAG) Long tag, #Payload Object p) throws IOException {
channel.basicNack(tag, false, false);
System.out.println("NACKED " + p);
}
#ServiceActivator(inputChannel = "manualAckChannel")
public void manualAck(#Header(AmqpHeaders.CHANNEL) Channel channel, #Header(AmqpHeaders.DELIVERY_TAG) Long tag, #Payload Object p) throws IOException {
channel.basicAck(tag, false);
System.out.println("ACKED " + p);
}
private ErrorMessageStrategy nackStrategy() {
return (throwable, attributes) -> {
Message inputMessage = (Message)attributes.getAttribute(ErrorMessageUtils.INPUT_MESSAGE_CONTEXT_KEY);
return new ErrorMessage(throwable, inputMessage.getHeaders());
};
}
#Bean
public AmqpInboundChannelAdapter someInboundAdapter() {
AmqpInboundChannelAdapter adapter = new AmqpInboundChannelAdapter(someListenerContainer());
adapter.setRecoveryCallback(new ErrorMessageSendingRecoverer(manualNackChannel(), nackStrategy()));
adapter.setRetryTemplate(retryTemplate());
return adapter;
}
#Bean
public RetryTemplate retryTemplate() {
RetryTemplate template = new RetryTemplate();
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(10);
backOffPolicy.setMaxInterval(5000);
backOffPolicy.setMultiplier(4);
template.setBackOffPolicy(backOffPolicy);
SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy();
retryPolicy.setMaxAttempts(4);
template.setRetryPolicy(retryPolicy);
return template;
}
#Bean
public AmqpOutboundEndpoint amqpOutboundEndpoint() {
AmqpOutboundEndpoint outboundEndpoint = new AmqpOutboundEndpoint(ackTemplate());
outboundEndpoint.setConfirmAckChannel(manualAckChannel());
outboundEndpoint.setConfirmCorrelationExpressionString("#root");
outboundEndpoint.setExchangeName("probaEx");
return outboundEndpoint;
}
#Bean
public MessageConverter jackson2JsonConverter() {
return new Jackson2JsonMessageConverter();
}
#Bean
public RabbitTemplate ackTemplate() {
RabbitTemplate ackTemplate = new RabbitTemplate(connectionFactory());
ackTemplate.setMessageConverter(jackson2JsonConverter());
return ackTemplate;
}
#Bean
public Queue someQueue() {
return QueueBuilder.nonDurable("proba").build();
}
#Bean
public Exchange someExchange(){
return ExchangeBuilder.fanoutExchange("probaEx").build();
}
#Bean
public ConnectionFactory connectionFactory() {
CachingConnectionFactory factory = new CachingConnectionFactory();
factory.setHost("10.10.121.137");
factory.setPort(35672);
factory.setUsername("root");
factory.setPassword("123456");
factory.setPublisherConfirms(true);
return factory;
}
#Bean
public SimpleMessageListenerContainer someListenerContainer() {
SimpleMessageListenerContainer listenerContainer = new SimpleMessageListenerContainer(connectionFactory());
listenerContainer.setQueues(someQueue());
listenerContainer.setMessageConverter(jackson2JsonConverter());
listenerContainer.setAcknowledgeMode(AcknowledgeMode.MANUAL);
return listenerContainer;
}
public static void main(String[] args) throws Exception {
SpringApplication.run(SampleController.class, args);
}
}
Still, the question remains if this private ErrorMessageStrategy nackStrategy() could be written in a better way?

Spring AMQP - Message re queuing using dead letter mechanism with TTL

Its like "Houston we have a problem here" where I need to schedule/delay a message for 5 minutes after it fails on the first attempt to process an event.
I have implemented dead letter exchange in this scenario.
The messages on failing, route to the DLX --> Retry Queue and comes back to work queue after a TTL of 5 minutes for another attempt.
Here is the configuration I am using:
public class RabbitMQConfig {
#Bean(name = "work")
#Primary
Queue workQueue() {
return new Queue(WORK_QUEUE, true, false, false, null);
}
#Bean(name = "workExchange")
#Primary
TopicExchange workExchange() {
return new TopicExchange(WORK_EXCHANGE, true, false);
}
#Bean
Binding workBinding(Queue queue, TopicExchange exchange) {
return BindingBuilder.bind(workQueue()).to(workExchange()).with("#");
}
#Bean(name = "retryExchange")
FanoutExchange retryExchange() {
return new FanoutExchange(RETRY_EXCHANGE, true, false);
}
#Bean(name = "retry")
Queue retryQueue() {
Map<String, Object> args = new HashMap<String, Object>();
args.put("x-dead-letter-exchange", WORK_EXCHANGE);
args.put("x-message-ttl", RETRY_DELAY); //delay of 5 min
return new Queue(RETRY_QUEUE, true, false, false, args);
}
#Bean
Binding retryBinding(Queue queue,FanoutExchange exchange) {
return BindingBuilder.bind(retryQueue()).to(retryExchange());
}
#Bean
public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory(ConnectionFactory connectionFactory) {
SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory();
factory.setConnectionFactory(connectionFactory);
return factory;
}
#Bean
Consumer receiver() {
return new Consumer();
}
#Bean
MessageListenerAdapter listenerAdapter(Consumer receiver) {
return new MessageListenerAdapter(receiver, "receiveMessage");
}
}
Producer.java:
#GetMapping(path = "/hello")
public String sayHello() {
// Producer operation
String messages[];
messages = new String[] {" hello "};
for (int i = 0; i < 5; i++) {
String message = util.getMessage(messages)+i;
rabbitTemplate.convertAndSend("WorkExchange","", message);
System.out.println(" Sent '" + message + "'");
}
return "hello";
}
Consumer.java:
public class Consumer {
#RabbitListener(queues = "WorkQueue")
public void receiveMessage(String message, Channel channel,
#Header(AmqpHeaders.DELIVERY_TAG) Long tag) throws IOException, InterruptedException {
try {
System.out.println("message to be processed: " + message);
doWorkTwo(message);
channel.basicAck(tag, false);
} catch (Exception e) {
System.out.println("In the exception catch block");
System.out.println("message in dead letter exchange: " + message);
channel.basicPublish("RetryExchange", "", null, message.getBytes());
}
}
private void doWorkTwo(String task) throws InterruptedException {
int c = 0;
int b = 5;
int d = b / c;
}
}
Is it the correct way to use a dead letter exchange for my scenario and after waiting once in the RETRY QUEUE for 5 min, on the second time attempt it does not wait for 5 min in the RETRY QUEUE (I have mentioned TTL as 5 min) and moves to the WORK QUEUE immediately.
I am running this application by hitting localhost:8080/hello url.
Here is my updated configuration.
RabbitMQConfig.java:
#EnableRabbit
public class RabbitMQConfig {
final static String WORK_QUEUE = "WorkQueue";
final static String RETRY_QUEUE = "RetryQueue";
final static String WORK_EXCHANGE = "WorkExchange"; // Dead Letter Exchange
final static String RETRY_EXCHANGE = "RetryExchange";
final static int RETRY_DELAY = 60000; // in ms (1 min)
#Bean(name = "work")
#Primary
Queue workQueue() {
Map<String, Object> args = new HashMap<String, Object>();
args.put("x-dead-letter-exchange", RETRY_EXCHANGE);
return new Queue(WORK_QUEUE, true, false, false, args);
}
#Bean(name = "workExchange")
#Primary
DirectExchange workExchange() {
return new DirectExchange(WORK_EXCHANGE, true, false);
}
#Bean
Binding workBinding(Queue queue, DirectExchange exchange) {
return BindingBuilder.bind(workQueue()).to(workExchange()).with("");
}
#Bean(name = "retryExchange")
DirectExchange retryExchange() {
return new DirectExchange(RETRY_EXCHANGE, true, false);
}
// Messages will drop off RetryQueue into WorkExchange for re-processing
// All messages in queue will expire at same rate
#Bean(name = "retry")
Queue retryQueue() {
Map<String, Object> args = new HashMap<String, Object>();
//args.put("x-dead-letter-exchange", WORK_EXCHANGE);
//args.put("x-message-ttl", RETRY_DELAY);
return new Queue(RETRY_QUEUE, true, false, false, null);
}
#Bean
Binding retryBinding(Queue queue, DirectExchange exchange) {
return BindingBuilder.bind(retryQueue()).to(retryExchange()).with("");
}
#Bean
public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory(ConnectionFactory connectionFactory) {
SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory();
factory.setConnectionFactory(connectionFactory);
factory.setDefaultRequeueRejected(false);
/*factory.setAdviceChain(new Advice[] {
org.springframework.amqp.rabbit.config.RetryInterceptorBuilder
.stateless()
.maxAttempts(2).recoverer(new RejectAndDontRequeueRecoverer())
.backOffOptions(1000, 2, 5000)
.build()
});*/
return factory;
}
#Bean
Consumer receiver() {
return new Consumer();
}
#Bean
MessageListenerAdapter listenerAdapter(Consumer receiver) {
return new MessageListenerAdapter(receiver, "receiveMessage");
}
}
Consumer.java:
public class Consumer {
#RabbitListener(queues = "WorkQueue")
public void receiveMessage(String message, Channel channel,
#Header(AmqpHeaders.DELIVERY_TAG) Long tag,
#Header(required = false, name = "x-death") HashMap<String, String> xDeath)
throws IOException, InterruptedException {
doWorkTwo(message);
channel.basicAck(tag, false);
}
private void doWorkTwo(String task) {
int c = 0;
int b = 5;
if (c < b) {
throw new AmqpRejectAndDontRequeueException(task);
}
}
}
If you reject the message so the broker routes it to a DLQ, you can examine the x-death header. In this scenario, I have a DLQ with a TTL of 5 seconds and the consumer of the message from the main queue rejects it; the broker routes it to the DLQ, then it expires and is routed back to the main queue - the x-death header shows the number of re-routing operations:

How to keep STOMP connection longer?

I'm currently writing a test echo client for STOMP over Websocket server in Java. However I noticed that the connection is somehow unpredictable as usually it's closed before message received thus client produces exception
java.io.IOException: java.util.concurrent.ExecutionException: java.io.IOException: Unable to write the complete message as the WebSocket connection has been closed
at org.apache.tomcat.websocket.WsRemoteEndpointImplBase.startMessageBlock(WsRemoteEndpointImplBase.java:282) ~[tomcat-embed-websocket-8.0.30.jar:8.0.30]
at org.apache.tomcat.websocket.WsSession.sendCloseMessage(WsSession.java:584) [tomcat-embed-websocket-8.0.30.jar:8.0.30]
And here is my simple test client
#Slf4j
#RunWith(SpringJUnit4ClassRunner.class)
#ContextConfiguration(loader = SpringApplicationContextLoader.class, classes = Application.class)
#WebIntegrationTest(randomPort = true)
public class WebSocketConfigurationIT {
#Value("${local.server.port}")
private int port;
private SockJsClient sockJsClient;
#Before
public void setUp() {
final WebSocketTransport webSocketTransport = new WebSocketTransport(new StandardWebSocketClient());
final RestTemplateXhrTransport restTemplateXhrTransport = new RestTemplateXhrTransport(new RestTemplate());
sockJsClient = new SockJsClient(Lists.newArrayList(webSocketTransport, restTemplateXhrTransport));
}
#Test
public void testEcho() throws Exception {
CountDownLatch countDownLatch = new CountDownLatch(1);
StompSessionHandler stompSessionHandler = new StompSessionHandlerAdapter() {};
WebSocketStompClient webSocketStompClient = new WebSocketStompClient(sockJsClient);
webSocketStompClient.setDefaultHeartbeat(new long[]{0, 0});
webSocketStompClient.setMessageConverter(new MappingJackson2MessageConverter());
ListenableFuture<StompSession> connect = webSocketStompClient.connect("ws://localhost:{port}/api/ws/media/socket", stompSessionHandler, port);
StompSession session = connect.get();
String message = UUID.randomUUID().toString();
log.debug("sending {}", message);
session.send("/echo/" + message, null);
session.subscribe("/topic/echo/" + message, new StompFrameHandler() {
#Override
public Type getPayloadType(final StompHeaders headers) {
return String.class;
}
#Override
public void handleFrame(final StompHeaders headers, final Object payload) {
log.debug("received {}", payload);
assertEquals(message, payload);
countDownLatch.countDown();
}
});
// wait for messange being echoed
if (!countDownLatch.await(15, TimeUnit.SECONDS)) {
fail("message not received");
}
}
#Controller
public static class EchoController {
#MessageMapping("/echo/{message}")
public String echo(SimpMessageHeaderAccessor simpMessageHeaderAccessor, #DestinationVariable("message") String message) {
log.debug("header {}", simpMessageHeaderAccessor);
log.debug("echoed {}", message);
return message;
}
}
}
And configuration class
#Configuration
#EnableScheduling
#EnableWebSocketMessageBroker
public class WebSocketConfiguration extends AbstractWebSocketMessageBrokerConfigurer {
#Override
public void registerStompEndpoints(final StompEndpointRegistry registry) {
registry.addEndpoint("/api/ws/media/socket").withSockJS();
}
#Override
public void configureMessageBroker(final MessageBrokerRegistry registry) {
super.configureMessageBroker(registry);
//registry.setApplicationDestinationPrefixes("/");
registry.enableSimpleBroker("/topic", "/queue");
}
}

Categories

Resources