How to use sshd mina/netty with projectreactor - java

I want to develop a java reactive application which needs to communicate to some external services via ssh. As reactive framework, I am using spring boot webflux from the project reactor and sshd mina/netty for the ssh client. Basically, the application will open a ssh session and run some commands on the server. The logic with the command depends on the responses of previous commands.
The question is, how to integrate sshd mina into spring boot project reactor (Mono/Flux)?
sshd mina offers the possibility to use async response, as shown in the test: https://github.com/apache/mina-sshd/blob/c876cce935f9278b0d50f02fd554eff1af949060/sshd-core/src/test/java/org/apache/sshd/client/ClientTest.java#L560
but I don't know how to integrate that with Mono/Flux.
Until now, I am able to get the responce corresponding to the login but not the following response after sending a command.
Here is my setup code
the test ssh server is create via docker
docker run -d --rm -e SUDO_ACCESS=false -e PASSWORD_ACCESS=true -e USER_NAME=username -e USER_PASSWORD=password -p 2222:2222 lscr.io/linuxserver/openssh-server:latest
the java project contains the following dependencies
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-webflux</artifactId>
</dependency>
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-netty</artifactId>
<version>2.8.0</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-test</artifactId>
<scope>test</scope>
</dependency>
the ssh client code which I would like it to be reactive
with the mina documentation (https://github.com/apache/mina-sshd)
and the only example I could find of mina async client usage (https://github.com/apache/mina-sshd/blob/master/sshd-core/src/test/java/org/apache/sshd/client/ClientTest.java#L518)
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.time.Duration;
import org.apache.sshd.client.SshClient;
import org.apache.sshd.client.channel.ClientChannel;
import org.apache.sshd.client.session.ClientSession;
import org.apache.sshd.common.channel.StreamingChannel;
import org.apache.sshd.common.future.SshFutureListener;
import org.apache.sshd.common.io.IoInputStream;
import org.apache.sshd.common.io.IoOutputStream;
import org.apache.sshd.common.io.IoReadFuture;
import org.apache.sshd.common.io.IoWriteFuture;
import org.apache.sshd.common.util.buffer.Buffer;
import org.apache.sshd.common.util.buffer.ByteArrayBuffer;
import reactor.core.publisher.Mono;
import reactor.core.publisher.MonoSink;
public class SshDocker implements AutoCloseable {
private static final String hostname = "localhost";
private static final String username = "username";
private static final String password = "password";
private static final int port = 2222;
private static final Duration clientTimeout = Duration.ofSeconds(10);
private SshClient client;
private ClientSession session;
private ClientChannel channel;
public Mono<String> open() throws IOException {
client = SshClient.setUpDefaultClient();
client.start();
session = client.connect(username, hostname, port).verify(clientTimeout).getSession();
session.addPasswordIdentity(password);
session.auth().verify(clientTimeout);
channel = session.createShellChannel();
channel.setStreaming(StreamingChannel.Streaming.Async);
channel.open().verify(clientTimeout);
final Duration timeout = Duration.ofSeconds(10);
return readResponse(timeout);
}
#Override
public void close() throws Exception {
channel.close();
session.close();
client.close();
}
public Mono<String> execCommand(final String command, final Duration timeout) {
return runCommand(command, timeout).flatMap(v -> readResponse(timeout));
}
private Mono<Void> runCommand(final String command, final Duration timeout) {
final IoOutputStream requestStream = channel.getAsyncIn();
return Mono.create(
voidMonoSink -> {
final ReactiveMonoRequestListener reactiveMonoRequestListener =
new ReactiveMonoRequestListener(timeout, voidMonoSink);
try {
requestStream
.writeBuffer(new ByteArrayBuffer(command.getBytes()))
.addListener(reactiveMonoRequestListener);
} catch (final IOException e) {
throw new RuntimeException(e);
}
});
}
private Mono<String> readResponse(final Duration timeout) {
final IoInputStream responseStream = channel.getAsyncOut();
return Mono.create(
monoSink -> {
final ReactiveMonoResponseListener reactiveResponseListener =
new ReactiveMonoResponseListener(responseStream, timeout, monoSink);
responseStream.read(new ByteArrayBuffer()).addListener(reactiveResponseListener);
});
}
public static class ReactiveMonoResponseListener implements SshFutureListener<IoReadFuture> {
final IoInputStream responseStream;
final ByteArrayOutputStream result = new ByteArrayOutputStream();
private final Duration timeout;
private final MonoSink<String> handler;
public ReactiveMonoResponseListener(
final IoInputStream responseStream,
final Duration timeout,
final MonoSink<String> handler) {
this.responseStream = responseStream;
this.timeout = timeout;
this.handler = handler;
}
#Override
public void operationComplete(final IoReadFuture ioReadFuture) {
System.out.println("Operation Read Complete");
if (handler != null) {
try {
ioReadFuture.verify(timeout);
final Buffer buffer = ioReadFuture.getBuffer();
result.write(buffer.array(), buffer.rpos(), buffer.available());
buffer.rpos(buffer.rpos() + buffer.available());
buffer.compact();
if (!result.toString().endsWith("$ ")) { // read response until next prompt
responseStream.read(buffer).addListener(this);
} else {
System.out.println("response >>>>>>>>");
System.out.println(result);
System.out.println("<<<<<<<< response");
handler.success(result.toString());
}
} catch (final IOException e) {
handler.error(e);
}
}
}
}
public static class ReactiveMonoRequestListener implements SshFutureListener<IoWriteFuture> {
private final MonoSink<Void> handler;
private final Duration timeout;
public ReactiveMonoRequestListener(final Duration timeout, final MonoSink<Void> handler) {
this.handler = handler;
this.timeout = timeout;
}
#Override
public void operationComplete(final IoWriteFuture ioWriteFuture) {
System.out.println("Operation Write Complete");
if (handler != null) {
try {
ioWriteFuture.verify(timeout);
handler.success();
} catch (final IOException e) {
handler.error(e);
}
}
}
}
}
the test used to run the reactive ssh client
import java.time.Duration;
import org.junit.jupiter.api.Test;
class SshDockerTest {
#Test
void run() throws Exception {
final SshDocker sshClient = new SshDocker();
sshClient
.open()
.flatMap(v -> sshClient.execCommand("ls\n", Duration.ofSeconds(3)))
.subscribe(System.out::println);
}
}
when running the test, beside the debug log, I obtain:
Operation Read Complete
response >>>>>>>>
Welcome to OpenSSH Server
65d098057769:~$
<<<<<<<< response
Operation Write Complete
but no sign of the response for the ls command
if it is not possible to transform that sshd mina into reactive, what would be an alternative reactive solution?
Thanks

I finally found a working solution. The previous problem was that Mono<Void> does not trigger the subsequent tasks in the pipeline.
Here are the new changes:
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.time.Duration;
import org.apache.sshd.client.SshClient;
import org.apache.sshd.client.channel.ClientChannel;
import org.apache.sshd.client.session.ClientSession;
import org.apache.sshd.common.channel.StreamingChannel;
import org.apache.sshd.common.future.SshFutureListener;
import org.apache.sshd.common.io.IoInputStream;
import org.apache.sshd.common.io.IoOutputStream;
import org.apache.sshd.common.io.IoReadFuture;
import org.apache.sshd.common.io.IoWriteFuture;
import org.apache.sshd.common.util.buffer.Buffer;
import org.apache.sshd.common.util.buffer.ByteArrayBuffer;
import reactor.core.publisher.Mono;
import reactor.core.publisher.MonoSink;
import reactor.core.scheduler.Schedulers;
// start a docker container
// docker run -d --rm -e SUDO_ACCESS=false -e PASSWORD_ACCESS=true -e USER_NAME=username -e
// USER_PASSWORD=password -p 2222:2222 lscr.io/linuxserver/openssh-server:latest
public class SshDocker {
private static final String hostname = "localhost";
private static final String username = "username";
private static final String password = "password";
private static final int port = 2222;
private static final Duration clientTimeout = Duration.ofSeconds(10);
private IoInputStream responseStream;
private IoOutputStream requestStream;
private SshClient client;
private ClientSession session;
private ClientChannel channel;
private Boolean openNotReactive() throws IOException {
client = SshClient.setUpDefaultClient();
client.start();
session = client.connect(username, hostname, port).verify(clientTimeout).getSession();
session.addPasswordIdentity(password);
session.auth().verify(clientTimeout);
channel = session.createShellChannel();
channel.setStreaming(StreamingChannel.Streaming.Async);
channel.open().verify(clientTimeout);
responseStream = channel.getAsyncOut();
requestStream = channel.getAsyncIn();
return true;
}
public Mono<String> open() {
final Duration timeout = Duration.ofSeconds(10);
final Mono<Boolean> open =
Mono.fromCallable(this::openNotReactive).subscribeOn(Schedulers.boundedElastic());
return open.flatMap(r -> readResponse(timeout));
}
public Mono<Boolean> close() {
return Mono.fromCallable(
() -> {
closeNotReactive();
return true;
})
.subscribeOn(Schedulers.boundedElastic());
}
private void closeNotReactive() throws Exception {
System.out.println("Closing");
channel.close();
session.close();
client.close();
}
public Mono<String> execCommand(final String command, final Duration timeout) {
return runCommand(command, timeout).flatMap(v -> readResponse(timeout)).log();
}
private Mono<Boolean> runCommand(final String command, final Duration timeout) {
final String cmd = String.format("%s\n", command.strip());
return Mono.create(
monoSink -> {
final ReactiveMonoRequestListener reactiveMonoRequestListener =
new ReactiveMonoRequestListener(timeout, monoSink);
try {
final IoWriteFuture writeFuture =
requestStream.writeBuffer(new ByteArrayBuffer(cmd.getBytes()));
writeFuture.addListener(reactiveMonoRequestListener);
monoSink.onDispose(() -> writeFuture.removeListener(reactiveMonoRequestListener));
} catch (final IOException e) {
throw new RuntimeException(e);
}
});
}
private Mono<String> readResponse(final Duration timeout) {
// https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#create-java.util.function.Consumer-
return Mono.create(
monoSink -> {
final ReactiveMonoResponseListener reactiveResponseListener =
new ReactiveMonoResponseListener(responseStream, timeout, monoSink);
final IoReadFuture readFuture = responseStream.read(new ByteArrayBuffer());
readFuture.addListener(reactiveResponseListener);
monoSink.onDispose(() -> readFuture.removeListener(reactiveResponseListener));
});
}
public static class ReactiveMonoResponseListener implements SshFutureListener<IoReadFuture> {
final IoInputStream responseStream;
final ByteArrayOutputStream result = new ByteArrayOutputStream();
private final Duration timeout;
private final MonoSink<String> handler;
public ReactiveMonoResponseListener(
final IoInputStream responseStream,
final Duration timeout,
final MonoSink<String> handler) {
this.responseStream = responseStream;
this.timeout = timeout;
this.handler = handler;
}
#Override
public void operationComplete(final IoReadFuture ioReadFuture) {
System.out.println("Operation Read Complete");
if (handler != null) {
try {
ioReadFuture.verify(timeout);
final Buffer buffer = ioReadFuture.getBuffer();
result.write(buffer.array(), buffer.rpos(), buffer.available());
buffer.rpos(buffer.rpos() + buffer.available());
buffer.compact();
if (!result.toString().endsWith("$ ")) { // read response until next prompt
responseStream.read(buffer).addListener(this);
} else {
System.out.println("response mono >>>>>>>>");
System.out.println(result);
System.out.println("<<<<<<<< response mono");
handler.success(result.toString());
}
} catch (final IOException e) {
handler.error(e);
}
}
}
}
public static class ReactiveMonoRequestListener implements SshFutureListener<IoWriteFuture> {
private final MonoSink<Boolean> handler;
private final Duration timeout;
public ReactiveMonoRequestListener(final Duration timeout, final MonoSink<Boolean> handler) {
this.handler = handler;
this.timeout = timeout;
}
#Override
public void operationComplete(final IoWriteFuture ioWriteFuture) {
System.out.println("Operation Write Complete");
if (handler != null) {
try {
ioWriteFuture.verify(timeout);
handler.success(true);
} catch (final IOException e) {
handler.error(e);
}
}
}
}
}
and the "test" to run the application:
import java.time.Duration;
import org.junit.jupiter.api.Test;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
class SshDockerTest {
#Test
void run() {
final SshDocker sshClient = new SshDocker();
final Mono<Boolean> result = sshClient
.open()
.flatMap(r -> sshClient.execCommand("ls", Duration.ofSeconds(3)))
.flatMap(r -> sshClient.execCommand("pwd", Duration.ofSeconds(3)))
.flatMap(r -> sshClient.close());
StepVerifier.create(result).expectNext(true).verifyComplete();
}
}
Is it possible to use AutoClosable with reactor so that I don't need to pay to much attention to manually calling the close method?
If you see any improvement, please let me know.

Related

How to resolve "org.eclipse.jetty.websocket.api.MessageTooLargeException" for Java WebSocketStompClient

When I'm running a Java WebSocketStompClient, I got below error:
org.eclipse.jetty.websocket.api.MessageTooLargeException: Text message size [73728] exceeds maximum size [65536]
Sample code:
import org.apache.log4j.Logger;
import org.springframework.messaging.simp.stomp.StompFrameHandler;
import org.springframework.messaging.simp.stomp.StompHeaders;
import org.springframework.messaging.simp.stomp.StompSession;
import org.springframework.messaging.simp.stomp.StompSessionHandlerAdapter;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.web.socket.WebSocketHttpHeaders;
import org.springframework.web.socket.client.WebSocketClient;
import org.springframework.web.socket.client.standard.StandardWebSocketClient;
import org.springframework.web.socket.messaging.WebSocketStompClient;
import org.springframework.web.socket.sockjs.client.SockJsClient;
import org.springframework.web.socket.sockjs.client.Transport;
import org.springframework.web.socket.sockjs.client.WebSocketTransport;
import org.springframework.web.socket.sockjs.frame.Jackson2SockJsMessageCodec;
import java.lang.reflect.Type;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
public class HelloClient {
private static Logger logger = Logger.getLogger(HelloClient.class);
StompSession session;
private final static WebSocketHttpHeaders headers = new WebSocketHttpHeaders();
public ListenableFuture<StompSession> connect() {
Transport webSocketTransport = new WebSocketTransport(new StandardWebSocketClient());
List<Transport> transports = Collections.singletonList(webSocketTransport);
SockJsClient sockJsClient = new SockJsClient(transports);
sockJsClient.setMessageCodec(new Jackson2SockJsMessageCodec());
WebSocketStompClient stompClient = new WebSocketStompClient(sockJsClient);
long[] hb = stompClient.getDefaultHeartbeat();
boolean en = stompClient.isDefaultHeartbeatEnabled();
long timeout = stompClient.getReceiptTimeLimit();
String url = "https://www.test.com";
return stompClient.connect(url, headers, new MyHandler());
}
public void subscribeMsg(StompSession stompSession) throws ExecutionException, InterruptedException {
stompSession.subscribe("/topic/test", new StompFrameHandler() {
public Type getPayloadType(StompHeaders stompHeaders) {
return byte[].class;
}
public void handleFrame(StompHeaders stompHeaders, Object o) {
logger.info("Received message " + new String((byte[]) o));
String response = new String((byte[]) o);
}
});
}
private class MyHandler extends StompSessionHandlerAdapter {
public void afterConnected(StompSession stompSession, StompHeaders stompHeaders) {
logger.info("Now connected");
session = stompSession;
}
}
public boolean isConnected() {
try {
Thread.sleep(500);
return session != null && session.isConnected();
} catch (Exception e) {
logger.warn("Error happens when checking connection status, ", e);
return false;
}
}
public static void main(String[] args) throws Exception {
HelloClient helloClient = new HelloClient();
ListenableFuture<StompSession> f = helloClient.connect();
StompSession stompSession = f.get();
helloClient.subscribeMsg(stompSession);
while (true) {
if (!helloClient.isConnected()) {
logger.info("wss diconnected ");
logger.info("need re-create ");
}
}
}
}
How to increase the limitation for a Java stomp websocket client? I found some not related answers How can I set max buffer size for web socket client(Jetty) in Java which are not suitable for stomp websocket client.
Also tried stompClient.setInboundMessageSizeLimit(Integer.MAX_VALUE); which doesn't work.

Springboot jetty web socket client annotation doesn't work

I developed web socket client with java springboot.
I used jetty websocket library while add below line on build.gradle
compile group: 'org.eclipse.jetty.websocket', name: 'websocket-client', version: '9.4.12.v20180830'
I made web socket event handler like below - SimpleEchoSocket.java.
package com.iimp.pom.socket;
import org.eclipse.jetty.websocket.api.Session;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
#WebSocket(maxTextMessageSize = 64 * 1024)
public class SimpleEchoSocket{
private final CountDownLatch closeLatch;
#SuppressWarnings("unused")
private Session session;
public SimpleEchoSocket(){
this.closeLatch = new CountDownLatch(1);
}
public boolean awaitClose(int duration, TimeUnit unit) throws InterruptedException{
return this.closeLatch.await(duration,unit);
}
#OnWebSocketClose
public void onClose(int statusCode, String reason){
System.out.printf("Connection closed: %d - %s%n",statusCode,reason);
this.session = null;
this.closeLatch.countDown(); // trigger latch
}
#OnWebSocketConnect
public void onConnect(Session session){
System.out.printf("Got connect: %s%n",session);
this.session = session;
try{
// CommonGlobalVariable.webSocketSession = session;
Future<Void> fut;
fut = session.getRemote().sendStringByFuture("Hello");
fut.get(2,TimeUnit.SECONDS); // wait for send to complete.
}catch (Throwable t){
t.printStackTrace();
}
}
#OnWebSocketMessage
public void onMessage(String msg){
System.out.printf("Got msg: %s%n",msg);
}
}
Also, I made connection part like below.
String destUri = "ws://"+body.get("host").toString()+"/va?api-key="+body.get("apiKey").toString()+"&plate=img";
WebSocketClient client = new WebSocketClient();
SimpleEchoSocket socket = new SimpleEchoSocket();
client.start();
URI echoUri = new URI(destUri);
ClientUpgradeRequest requestws = new ClientUpgradeRequest();
requestws.setSubProtocols("va-metadata");
client.connect(socket,echoUri,requestws);
System.out.printf("Connecting to : %s%n",echoUri);
As a result, I guess successfully connected to web socket server for I found the logs from server.
However codes in #OnWebSocketConnect annotation method are not executed.
How can I run the codes in SimpleEchoSocket.java?
I figured out by referring to the comment of Joakim.
I changed the SimpleEchoSocket.java like below.
#OnWebSocketConnect
public void onConnect(Session session){
System.out.printf("Got connect: %s%n",session);
this.session = session;
try{
// CommonGlobalVariable.webSocketSession = session;
// Future<Void> fut;
// fut = session.getRemote().sendStringByFuture("Hello");
// fut.get(2,TimeUnit.SECONDS); // wait for send to complete.
}catch (Throwable t){
t.printStackTrace();
}
}
#OnWebSocketMessage
public void onMessageString(Session session, String msg){
System.out.println("getRemoteAddress1:"+session.getRemoteAddress());
String vaHost = session.getRemoteAddress().toString().replaceAll("/", "");
System.out.println();
System.out.printf("Got msg: %s%n",msg);
}
#OnWebSocketMessage
public void onMessageBuffer(Session session, byte[] byteArray, int offset, int length) throws IOException {
System.out.println("onMessageBuffer");
System.out.println("getRemoteAddress2:"+session.getRemoteAddress());
FileUtils.writeByteArrayToFile(new File("C:/files/ws/"+System.nanoTime()+".jpg"), byteArray);
}
Thank you, Joakim!

Netty Pipeline being executed out of order

I am trying to figure out why my pipeline is being executed out of order.
I took the HexDumpProxy example and was trying to turn it into a http-proxy where I can look at all the traffic. For some reason the code is being executed backwards and I can't figure out why.
My server listens on 8443 and takes in the http content. I wanted to read the host header and create a frontend handler to route the data to the server, but my frontend handler executes first despite being last in the pipeline. I am unsure why it is running first I thought it would be execute in the following order.
LoggingHandler
HttpRequestDecoder
HttpObjectAggregator
HttpProxyListener
HttpReEncoder
HTTPProxyFrontEnd
The goal is to remove frontendhandler from the pipeline and have the HTTPProxy listener add it to the pipeline after reading the host header. but if I remove the frontend handler no data is transferred. Using breakpoints HTTPProxyFrontEnd is hit before HttpProxyListener. I am unsure why it is being executed so out of order.
Main
```
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new HttpProxyServerInitializer(REMOTE_HOST, REMOTE_PORT))
.childOption(ChannelOption.AUTO_READ, false)
.bind(LOCAL_PORT).sync().channel().closeFuture().sync();
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
```
Pipeline
```
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.handler.codec.MessageToByteEncoder;
import io.netty.handler.codec.http.*;
import io.netty.handler.logging.LogLevel;
import io.netty.handler.logging.LoggingHandler;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.ssl.util.SelfSignedCertificate;
import javax.net.ssl.SSLEngine;
public class HttpProxyServerInitializer extends ChannelInitializer {
private final String remoteHost;
private final int remotePort;
public HttpProxyServerInitializer(String remoteHost, int remotePort) {
this.remoteHost = remoteHost;
this.remotePort = remotePort;
}
#Override
protected void initChannel(Channel ch) throws Exception {
ch.pipeline().addLast(
new LoggingHandler(LogLevel.INFO),
new HttpRequestDecoder(),
new HttpObjectAggregator(8192),
new HttpProxyListener(),
new HttpReEncoder(),
new HTTPProxyFrontEnd(remoteHost, remotePort));
}
}
```
Proxy Front end
```
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.*;
import io.netty.channel.embedded.EmbeddedChannel;
import io.netty.handler.codec.DecoderResult;
import io.netty.handler.codec.http.*;
import io.netty.handler.codec.http.cookie.ServerCookieDecoder;
import io.netty.handler.codec.http.cookie.ServerCookieEncoder;
import io.netty.util.CharsetUtil;
import java.net.SocketAddress;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
public class HTTPProxyFrontEnd extends ChannelInboundHandlerAdapter {
private final String remoteHost;
private final int remotePort;
private final StringBuilder buf = new StringBuilder();
private HttpRequest request;
// As we use inboundChannel.eventLoop() when building the Bootstrap this does not need to be volatile as
// the outboundChannel will use the same EventLoop (and therefore Thread) as the inboundChannel.
private Channel outboundChannel;
public HTTPProxyFrontEnd(String remoteHost, int remotePort) {
this.remoteHost = remoteHost;
this.remotePort = remotePort;
}
#Override
public void channelActive(ChannelHandlerContext ctx) {
System.out.println("HTTPFrontEnd");
final Channel inboundChannel = ctx.channel();
// Start the connection attempt.
Bootstrap b = new Bootstrap();
b.group(inboundChannel.eventLoop())
.channel(ctx.channel().getClass())
.handler(new HexDumpProxyBackendHandler(inboundChannel))
.option(ChannelOption.AUTO_READ, false);
ChannelFuture f = b.connect(remoteHost, remotePort);
SocketAddress test = ctx.channel().remoteAddress();
outboundChannel = f.channel();
f.addListener(new ChannelFutureListener() {
#Override
public void operationComplete(ChannelFuture future) {
if (future.isSuccess()) {
// connection complete start to read first data
inboundChannel.read();
} else {
// Close the connection if the connection attempt has failed.
inboundChannel.close();
}
}
});
}
#Override
public void channelRead(final ChannelHandlerContext ctx, Object msg) throws InterruptedException {
if (outboundChannel.isActive()) {
outboundChannel.writeAndFlush(msg).addListener(new ChannelFutureListener() {
#Override
public void operationComplete(ChannelFuture future) {
if (future.isSuccess()) {
// was able to flush out data, start to read the next chunk
ctx.channel().read();
} else {
future.channel().close();
}
}
});
}
}
private boolean writeResponse(HttpObject currentObj, ChannelHandlerContext ctx) {
// Decide whether to close the connection or not.
boolean keepAlive = HttpUtil.isKeepAlive(request);
// Build the response object.
FullHttpResponse response = new DefaultFullHttpResponse(
HTTP_1_1, currentObj.decoderResult().isSuccess()? OK : BAD_REQUEST,
Unpooled.copiedBuffer(buf.toString(), CharsetUtil.UTF_8));
response.headers().set(HttpHeaderNames.CONTENT_TYPE, "text/plain; charset=UTF-8");
if (keepAlive) {
// Add 'Content-Length' header only for a keep-alive connection.
response.headers().setInt(HttpHeaderNames.CONTENT_LENGTH, response.content().readableBytes());
// Add keep alive header as per:
// - http://www.w3.org/Protocols/HTTP/1.1/draft-ietf-http-v11-spec-01.html#Connection
response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
// Encode the cookie.
String cookieString = request.headers().get(HttpHeaderNames.COOKIE);
if (cookieString != null) {
Set<io.netty.handler.codec.http.cookie.Cookie> cookies = ServerCookieDecoder.STRICT.decode(cookieString);
if (!cookies.isEmpty()) {
// Reset the cookies if necessary.
for (io.netty.handler.codec.http.cookie.Cookie cookie: cookies) {
response.headers().add(HttpHeaderNames.SET_COOKIE, io.netty.handler.codec.http.cookie.ServerCookieEncoder.STRICT.encode(cookie));
}
}
} else {
// Browser sent no cookie. Add some.
response.headers().add(HttpHeaderNames.SET_COOKIE, io.netty.handler.codec.http.cookie.ServerCookieEncoder.STRICT.encode("key1", "value1"));
response.headers().add(HttpHeaderNames.SET_COOKIE, ServerCookieEncoder.STRICT.encode("key2", "value2"));
}
// Write the response.
//ctx.writeAndFlush(response);
return keepAlive;
}
private static void appendDecoderResult(StringBuilder buf, HttpObject o) {
DecoderResult result = o.decoderResult();
if (result.isSuccess()) {
return;
}
buf.append(".. WITH DECODER FAILURE: ");
buf.append(result.cause());
buf.append("\r\n");
}
#Override
public void channelInactive(ChannelHandlerContext ctx) {
if (outboundChannel != null) {
closeOnFlush(outboundChannel);
}
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
closeOnFlush(ctx.channel());
}
/**
* Closes the specified channel after all queued write requests are flushed.
*/
static void closeOnFlush(Channel ch) {
if (ch.isActive()) {
ch.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE);
}
}
}
```

How to only allow a single connection (url/port) to read and write from a flink application

I read from a url/port perform some processing and write back to the url/port. The Url/Port allows only a single connection (of which you need to read and write when needed).
Flink can read and write to the rl port but opens 2 connections.
I have used the basic connection and from a url/port through flink
// set up the streaming execution environment
val env = StreamExecutionEnvironment.getExecutionEnvironment
val data_stream = env.socketTextStream(url, port, socket_stream_deliminator, socket_connection_retries)
.map(x => printInput(x))
.writeToSocket(url, port, new SimpleStringSchema())
//.addSink(new SocketClientSink[String](url, port.toInt, new SimpleStringSchema))
// execute program
env.execute("Flink Streaming Scala API Skeleton")
The ideal solution or only solution for my case is to read and write from the same connection and not create 2 seperate connections
How would I go about doing this?
As I said in the comment, you have to store your connection in some static variable because your Sources- and Sinks won't use the same connection otherwise.
You must also ensure that your Source and Sink run on the same JVM using the same Classloader, otherwise you will still have more than one connection.
I built this wrapper class which holds a raw Socket-Connection and a Reader/Writer instance for that connection. Because your Source will always stop before your Sink (thats how Flink works), this class also does reconnect if it was closed before.
package example;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.Socket;
public class SocketConnection implements Closeable {
private final String host;
private final int port;
private final Object lock;
private volatile Socket socket;
private volatile BufferedReader reader;
private volatile PrintStream writer;
public SocketConnection(String host, int port) {
this.host = host;
this.port = port;
this.lock = new Object();
this.socket = null;
this.reader = null;
this.writer = null;
}
private void connect() throws IOException {
this.socket = new Socket(this.host, this.port);
this.reader = new BufferedReader(new InputStreamReader(this.socket.getInputStream()));
this.writer = new PrintStream(this.socket.getOutputStream());
}
private void ensureConnected() throws IOException {
// only acquire lock if null
if (this.socket == null) {
synchronized (this.lock) {
// recheck if socket is still null
if (this.socket == null) {
connect();
}
}
}
}
public BufferedReader getReader() throws IOException {
ensureConnected();
return this.reader;
}
public PrintStream getWriter() throws IOException {
ensureConnected();
return this.writer;
}
#Override
public void close() throws IOException {
if (this.socket != null) {
synchronized (this.lock) {
if (this.socket != null) {
this.reader.close();
this.reader = null;
this.writer.close();
this.writer = null;
this.socket.close();
this.socket = null;
}
}
}
}
}
Your Main Class (or any other class) holds one instance of this class which is then accessed by both your source and your sink:
package example;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
public class Main {
public static final SocketConnection CONNECTION = new SocketConnection("your-host", 12345);
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.addSource(new SocketTextStreamSource())
.addSink(new SocketTextStreamSink());
env.execute("Flink Streaming Scala API Skeleton");
}
}
Your SourceFunction could look more or less like this:
package example;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
public class SocketTextStreamSource implements SourceFunction<String> {
private volatile boolean running;
public SocketTextStreamSource() {
this.running = true;
}
#Override
public void run(SourceContext<String> context) throws Exception {
try (SocketConnection conn = Main.CONNECTION) {
String line;
while (this.running && (line = conn.getReader().readLine()) != null) {
context.collect(line);
}
}
}
#Override
public void cancel() {
this.running = false;
}
}
And your SinkFunction:
package example;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
public class SocketTextStreamSink extends RichSinkFunction<String> {
private transient SocketConnection connection;
#Override
public void open(Configuration parameters) throws Exception {
this.connection = Main.CONNECTION;
}
#Override
public void invoke(String value, Context context) throws Exception {
this.connection.getWriter().println(value);
this.connection.getWriter().flush();
}
#Override
public void close() throws Exception {
this.connection.close();
}
}
Note that I always use getReader() and getWriter() because the underlying Socket may have been closed in the meantime.

The netty server seems to be blocked when I add a ExecutionHandler?

THE SCENE:
I am writing a echo client and server. The data being transfered is a string:
Client encode a string,and send it to server.
Server recv data, decode string, then encode the received string, send it back to client.
The above process will be repeated 100000 times.(Note: the connection is persistent).
DEFERENT CONTIONS:
When I run ONE server and TWO client at the same time, everything is ok,every client receives 100000 messages and terminated normally.
But When I add a ExecutionHandler on server, and then run ONE server and TWO client at the same time, one client will never terminate, and the network traffic is zero.
I cant locate the key point of this problem for now, will you give me some suggestions?
MY CODE:
string encoder , string decoder, client handler , server handler , client main ,server main.
//Decoder=======================================================
import java.nio.charset.Charset;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.handler.codec.frame.FrameDecoder;
public class Dcd extends FrameDecoder {
public static final Charset cs = Charset.forName("utf8");
#Override
protected Object decode(ChannelHandlerContext ctx, Channel channel,
ChannelBuffer buffer) throws Exception {
if (buffer.readableBytes() < 4) {
return null;
}
int headlen = 4;
int length = buffer.getInt(0);
if (buffer.readableBytes() < length + headlen) {
return null;
}
String ret = buffer.toString(headlen, length, cs);
buffer.skipBytes(length + headlen);
return ret;
}
}
//Encoder =======================================================
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.handler.codec.oneone.OneToOneEncoder;
public class Ecd extends OneToOneEncoder {
#Override
protected Object encode(ChannelHandlerContext ctx, Channel channel,
Object msg) throws Exception {
if (!(msg instanceof String)) {
return msg;
}
byte[] data = ((String) msg).getBytes();
ChannelBuffer buf = ChannelBuffers.dynamicBuffer(data.length + 4, ctx
.getChannel().getConfig().getBufferFactory());
buf.writeInt(data.length);
buf.writeBytes(data);
return buf;
}
}
//Client handler =======================================================
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
/**
* Handler implementation for the echo client. It initiates the ping-pong
* traffic between the echo client and server by sending the first message to
* the server.
*/
public class EchoClientHandler extends SimpleChannelUpstreamHandler {
private static final Logger logger = Logger
.getLogger(EchoClientHandler.class.getName());
private final AtomicLong transferredBytes = new AtomicLong();
private final AtomicInteger counter = new AtomicInteger(0);
private final AtomicLong startTime = new AtomicLong(0);
private String dd;
/**
* Creates a client-side handler.
*/
public EchoClientHandler(String data) {
dd = data;
}
public long getTransferredBytes() {
return transferredBytes.get();
}
#Override
public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Send the first message. Server will not send anything here
// because the firstMessage's capacity is 0.
startTime.set(System.currentTimeMillis());
Channels.write(ctx.getChannel(), dd);
}
#Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
// Send back the received message to the remote peer.
transferredBytes.addAndGet(((String) e.getMessage()).length());
int i = counter.incrementAndGet();
int N = 100000;
if (i < N) {
e.getChannel().write(e.getMessage());
} else {
ctx.getChannel().close();
System.out.println(N * 1.0
/ (System.currentTimeMillis() - startTime.get()) * 1000);
}
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
// Close the connection when an exception is raised.
logger.log(Level.WARNING, "Unexpected exception from downstream.",
e.getCause());
e.getChannel().close();
}
}
//Client main =======================================================
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Executors;
import org.jboss.netty.bootstrap.ClientBootstrap;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
/**
* Sends one message when a connection is open and echoes back any received data
* to the server. Simply put, the echo client initiates the ping-pong traffic
* between the echo client and server by sending the first message to the
* server.
*/
public class EchoClient {
private final String host;
private final int port;
public EchoClient(String host, int port) {
this.host = host;
this.port = port;
}
public void run() {
// Configure the client.
final ClientBootstrap bootstrap = new ClientBootstrap(
new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()));
// Set up the pipeline factory.
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(new Dcd(), new Ecd(),
new EchoClientHandler("abcdd"));
}
});
bootstrap.setOption("sendBufferSize", 1048576);
bootstrap.setOption("receiveBufferSize", 1048576);
bootstrap.setOption("tcpNoDelay", true);
bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);
List<ChannelFuture> list = new ArrayList<ChannelFuture>();
for (int i = 0; i < 1; i++) {
// Start the connection attempt.
ChannelFuture future = bootstrap.connect(new InetSocketAddress(
host, port));
// Wait until the connection is closed or the connection
// attempt
// fails.
list.add(future);
}
for (ChannelFuture f : list) {
f.getChannel().getCloseFuture().awaitUninterruptibly();
}
// Shut down thread pools to exit.
bootstrap.releaseExternalResources();
}
private static void testOne() {
final String host = "192.168.0.102";
final int port = 8000;
new EchoClient(host, port).run();
}
public static void main(String[] args) throws Exception {
testOne();
}
}
//server handler =======================================================
import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
/**
* Handler implementation for the echo server.
*/
public class EchoServerHandler extends SimpleChannelUpstreamHandler {
private static final Logger logger = Logger
.getLogger(EchoServerHandler.class.getName());
private final AtomicLong transferredBytes = new AtomicLong();
public long getTransferredBytes() {
return transferredBytes.get();
}
#Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
// Send back the received message to the remote peer.
transferredBytes.addAndGet(((String) e.getMessage()).length());
Channels.write(ctx.getChannel(), e.getMessage());
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
// Close the connection when an exception is raised.
logger.log(Level.WARNING, "Unexpected exception from downstream.",
e.getCause());
e.getChannel().close();
}
}
//Server main =======================================================
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.handler.execution.ExecutionHandler;
import org.jboss.netty.handler.execution.OrderedMemoryAwareThreadPoolExecutor;
/**
* Echoes back any received data from a client.
*/
public class EchoServer {
private final int port;
public EchoServer(int port) {
this.port = port;
}
public void run() {
// Configure the server.
ServerBootstrap bootstrap = new ServerBootstrap(
new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()));
System.out.println(Runtime.getRuntime().availableProcessors() * 2);
final ExecutionHandler executionHandler = new ExecutionHandler(
new OrderedMemoryAwareThreadPoolExecutor(16, 1048576, 1048576));
// Set up the pipeline factory.
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
public ChannelPipeline getPipeline() throws Exception {
System.out.println("new pipe");
return Channels.pipeline(new Dcd(), new Ecd(),
executionHandler, new EchoServerHandler());
}
});
bootstrap.setOption("child.sendBufferSize", 1048576);
bootstrap.setOption("child.receiveBufferSize", 1048576);
bootstrap.setOption("child.tcpNoDelay", true);
bootstrap.setOption("child.writeBufferLowWaterMark", 32 * 1024);
bootstrap.setOption("child.writeBufferHighWaterMark", 64 * 1024);
// Bind and start to accept incoming connections.
bootstrap.bind(new InetSocketAddress(port));
}
public static void main(String[] args) throws Exception {
int port = 8000;
new EchoServer(port).run();
}
}
I have found the reason now, it is a hard work, but full with pleasure.
When added a ExecutionHandler, the message will be wrapped into a Runnable task, and will be executed in a ChildExecutor. The key point is here : A task maybe added to ChildExecutor when the executor almostly exit , then is will be ignored by the ChildExecutor.
I added three lines code and some comments, the final code looks like below, and it works now,should I mail to the author? :
private final class ChildExecutor implements Executor, Runnable {
private final Queue<Runnable> tasks = QueueFactory
.createQueue(Runnable.class);
private final AtomicBoolean isRunning = new AtomicBoolean();
public void execute(Runnable command) {
// TODO: What todo if the add return false ?
tasks.add(command);
if (!isRunning.get()) {
doUnorderedExecute(this);
} else {
}
}
public void run() {
// check if its already running by using CAS. If so just return
// here. So in the worst case the thread
// is executed and do nothing
boolean acquired = false;
if (isRunning.compareAndSet(false, true)) {
acquired = true;
try {
Thread thread = Thread.currentThread();
for (;;) {
final Runnable task = tasks.poll();
// if the task is null we should exit the loop
if (task == null) {
break;
}
boolean ran = false;
beforeExecute(thread, task);
try {
task.run();
ran = true;
onAfterExecute(task, null);
} catch (RuntimeException e) {
if (!ran) {
onAfterExecute(task, e);
}
throw e;
}
}
//TODO NOTE (I added): between here and "isRunning.set(false)",some new tasks maybe added.
} finally {
// set it back to not running
isRunning.set(false);
}
}
//TODO NOTE (I added): Do the remaining works.
if (acquired && !isRunning.get() && tasks.peek() != null) {
doUnorderedExecute(this);
}
}
}
This was a bug and will be fixed in 3.4.0.Alpha2.
See https://github.com/netty/netty/issues/234

Categories

Resources