Low Level Http2(H2) Client and Server Implementation - java

I am looking for sample examples implementing all the features of HTTP/2(Client-Server) like STREAMS,FRAMES PUSH_PROMISE,HPACK with detailed configuration using JETTY EMBEDDED(Jetty 10/Jetty 11) ?
It should be able to run at localhost and also if ssl certificates are needed for low level https2 detailed way to implement that.
The examples in the documentations are not every clear and are explained in parts. I tried a lot with them. Hopefully a successfully answer to this will help a lot of amateurs who wants to implement low level h2 with embedded jetty. Can anyone help me with the reference ?

Code for reference for others.
H2Server.java
import java.net.InetSocketAddress;
import java.net.Socket;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.function.UnaryOperator;
import org.eclipse.jetty.http.HttpFields;
import org.eclipse.jetty.http.HttpMethod;
import org.eclipse.jetty.http.HttpStatus;
import org.eclipse.jetty.http.HttpURI;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.http.MetaData;
import org.eclipse.jetty.http2.ErrorCode;
import org.eclipse.jetty.http2.api.Session;
import org.eclipse.jetty.http2.api.Stream;
import org.eclipse.jetty.http2.api.server.ServerSessionListener;
import org.eclipse.jetty.http2.frames.DataFrame;
import org.eclipse.jetty.http2.frames.GoAwayFrame;
import org.eclipse.jetty.http2.frames.HeadersFrame;
import org.eclipse.jetty.http2.frames.PushPromiseFrame;
import org.eclipse.jetty.http2.frames.ResetFrame;
import org.eclipse.jetty.http2.frames.SettingsFrame;
import org.eclipse.jetty.http2.parser.Parser;
import org.eclipse.jetty.http2.server.RawHTTP2ServerConnectionFactory;
import org.eclipse.jetty.io.ByteBufferPool;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.SecureRequestCustomizer;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.util.BufferUtil;
import org.eclipse.jetty.util.Callback;
import org.eclipse.jetty.util.resource.Resource;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import static java.lang.System.Logger.Level.INFO;
import java.io.OutputStream;
#SuppressWarnings("unused") public class H2Server {
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
// Create a Server instance.
QueuedThreadPool serverExecutor = new QueuedThreadPool();
serverExecutor.setName("server");
// server = new Server(serverExecutor);
Server server = new Server(serverExecutor);
// ServerSessionListener sessionListener = new ServerSessionListener.Adapter();
ServerSessionListener sessionListener = new ServerSessionListener.Adapter() {
#Override
public Map<Integer, Integer> onPreface(Session session) {
System.out.println("onPreface Called");
// Customize the settings, for example:
Map<Integer, Integer> settings = new HashMap<>();
// Tell the client that HTTP/2 push is disabled.
settings.put(SettingsFrame.ENABLE_PUSH, 0);
settings.put(SettingsFrame.ENABLE_CONNECT_PROTOCOL, 8);
return settings;
}
#Override
public void onAccept(Session session) {
System.out.println("onAccept Called");
InetSocketAddress remoteAddress = session.getRemoteAddress();
System.getLogger("http2").log(INFO, "Connection from {0}", remoteAddress);
}
#Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame) {
System.out.println("onNewStream Called");
// This is the "new stream" event, so it's guaranteed to be a request.
MetaData.Request request = (MetaData.Request) frame.getMetaData();
if (frame.isEndStream()) {
respond(stream, request);
return null;
} else {
// Return a Stream.Listener to handle the request events,
// for example request content events or a request reset.
return new Stream.Listener.Adapter() {
#Override
public void onData(Stream stream, DataFrame frame, Callback callback) {
// Get the content buffer.
ByteBuffer buffer = frame.getData();
// Consume the buffer, here - as an example - just log it.
// System.getLogger("http2").log(INFO, "Consuming buffer {0}", buffer);
System.getLogger("http2").log(INFO, "Consuming buffer {0}", buffer);
System.out.println("Consuming buffer {0} " +StandardCharsets.UTF_8.decode(buffer).toString());
// Tell the implementation that the buffer has been consumed.
callback.succeeded();
// By returning from the method, implicitly tell the implementation
// to deliver to this method more DATA frames when they are available.
if (frame.isEndStream()) {
System.out.println("EndStream");
respond(stream, request);
}
}
};
}
}
private void respond(Stream stream, MetaData.Request request) {
// Prepare the response HEADERS frame.
System.out.println("respond Called");
// The response HTTP status and HTTP headers.
MetaData.Response response = new MetaData.Response(HttpVersion.HTTP_2, HttpStatus.OK_200,
HttpFields.EMPTY);
if (HttpMethod.GET.is(request.getMethod())) {
// The response content.
ByteBuffer resourceBytes = getResourceBytes(request);
System.out.println("Request==GET resourceBytes== "+ StandardCharsets.UTF_8.decode(resourceBytes).toString());
// Send the HEADERS frame with the response status and headers,
// and a DATA frame with the response content bytes.
stream.headers(new HeadersFrame(stream.getId(), response, null, false))
.thenCompose(s -> s.data(new DataFrame(s.getId(), resourceBytes, true)));
} else {
// Send just the HEADERS frame with the response status and headers.
System.out.println("Request==POST response== "+ response);
String content1 = "{\"greet\": \"Welcome!!!\"}";
ByteBuffer buffer1 = StandardCharsets.UTF_8.encode(content1);
//stream.headers(new HeadersFrame(stream.getId(), response, null, true));
stream.headers(new HeadersFrame(stream.getId(), response, null, false))
.thenCompose(s -> s.data(new DataFrame(s.getId(), buffer1, true)));
}
}
private ByteBuffer getResourceBytes(MetaData.Request request)
{
return ByteBuffer.allocate(1024);
}
};
// HTTP Configuration
HttpConfiguration httpConfig = new HttpConfiguration();
// httpConfig.setSecureScheme("https");
httpConfig.setSecureScheme("https");
// httpConfig.setSecurePort(8443);
httpConfig.setSecurePort(8443);
httpConfig.setSendXPoweredBy(true);
httpConfig.setSendServerVersion(true);
// httpConfig.setRequestHeaderSize(16 * 1024);
// HTTPS Configuration
HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
httpsConfig.addCustomizer(new SecureRequestCustomizer());
// Create a ServerConnector with RawHTTP2ServerConnectionFactory.
RawHTTP2ServerConnectionFactory http2 = new RawHTTP2ServerConnectionFactory(httpConfig, sessionListener);
// Configure RawHTTP2ServerConnectionFactory, for example:
// Configure the max number of concurrent requests.
http2.setMaxConcurrentStreams(128);
// Enable support for CONNECT.
http2.setConnectProtocolEnabled(true);
// Create the ServerConnector.
ServerConnector connector = new ServerConnector(server, http2);
// connector.setPort(8080);
connector.setPort(8443);
connector.setHost("localhost");
connector.setAcceptQueueSize(128);
// Add the Connector to the Server
server.addConnector(connector);
// Start the Server so it starts accepting connections from clients.
server.start();
// new H2Server().testNoPrefaceBytes();
}
}
H2Client.java
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import org.eclipse.jetty.http.HttpFields;
import org.eclipse.jetty.http.HttpHeader;
import org.eclipse.jetty.http.HttpURI;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.http.MetaData;
import org.eclipse.jetty.http2.api.Session;
import org.eclipse.jetty.http2.api.Stream;
import org.eclipse.jetty.http2.client.HTTP2Client;
import org.eclipse.jetty.http2.frames.DataFrame;
import org.eclipse.jetty.http2.frames.HeadersFrame;
import org.eclipse.jetty.http2.frames.SettingsFrame;
import org.eclipse.jetty.io.ClientConnector;
import org.eclipse.jetty.util.Callback;
import org.eclipse.jetty.util.Promise;
public class H2Client {
public static void main(String[] args) throws Exception {
HTTP2Client http2Client = new HTTP2Client();
http2Client.start();
ClientConnector connector = http2Client.getClientConnector();
// Address of the server's encrypted port.
SocketAddress serverAddress = new InetSocketAddress("localhost", 8443);
// SocketAddress serverAddress = new InetSocketAddress("http://www.google.com/", 8080);
// Address of the server's encrypted port.
// SocketAddress serverAddress = new InetSocketAddress("localhost", 8443);
// CompletableFuture<Session> sessionCF = http2Client.connect(connector.getSslContextFactory(), serverAddress, new Session.Listener.Adapter());
CompletableFuture<Session> sessionCF = http2Client.connect(serverAddress, new Session.Listener.Adapter()
{
#Override
public Map<Integer, Integer> onPreface(Session session)
{
System.out.println("onPreface Called");
Map<Integer, Integer> configuration = new HashMap<>();
// Disable push from the server.
configuration.put(SettingsFrame.ENABLE_PUSH, 0);
// Override HTTP2Client.initialStreamRecvWindow for this session.
configuration.put(SettingsFrame.INITIAL_WINDOW_SIZE, 1024 * 1024);
return configuration;
}
});
Session session = sessionCF.get();
/*
// Configure the request headers.
HttpFields requestHeaders = HttpFields.build()
.put(HttpHeader.USER_AGENT, "Jetty HTTP2Client {version}");
// The request metadata with method, URI and headers.
MetaData.Request request = new MetaData.Request("GET", HttpURI.from("http://localhost:61432"), HttpVersion.HTTP_2, requestHeaders);
// MetaData.Request request = new MetaData.Request("GET", HttpURI.from("https://www.google.com/"), HttpVersion.HTTP_2, requestHeaders);
// The HTTP/2 HEADERS frame, with endStream=true
// to signal that this request has no content.
HeadersFrame headersFrame = new HeadersFrame(request, null, true);
// Open a Stream by sending the HEADERS frame.
session.newStream(headersFrame,new Promise.Adapter<>(), new Stream.Listener.Adapter()
{
#Override
public void onHeaders(Stream stream, HeadersFrame frame)
{
System.out.println("onHeaders Called");
MetaData metaData = frame.getMetaData();
// Is this HEADERS frame the response or the trailers?
if (metaData.isResponse())
{
MetaData.Response response = (MetaData.Response)metaData;
System.out.println( "Received response {0}== "+ response);
}
else
{
System.out.println("Received trailers {0}== " + metaData.getFields());
}
}
#Override
public void onData(Stream stream, DataFrame frame, Callback callback)
{
System.out.println("onData Called");
// Get the content buffer.
ByteBuffer buffer = frame.getData();
// Consume the buffer, here - as an example - just log it.
System.out.println("Consuming buffer {0}" +buffer);
// Tell the implementation that the buffer has been consumed.
callback.succeeded();
// By returning from the method, implicitly tell the implementation
// to deliver to this method more DATA frames when they are available.
}
});
*/
// Configure the request headers.
HttpFields requestHeaders = HttpFields.build()
.put(HttpHeader.CONTENT_TYPE, "application/json");
// The request metadata with method, URI and headers.
MetaData.Request request = new MetaData.Request("POST", HttpURI.from("http://localhost:8443/"), HttpVersion.HTTP_2, requestHeaders);
// MetaData.Request request = new MetaData.Request("POST", HttpURI.from("0.0.0.0:63780"), HttpVersion.HTTP_2, requestHeaders);
// The HTTP/2 HEADERS frame, with endStream=false to
// signal that there will be more frames in this stream.
HeadersFrame headersFrame = new HeadersFrame(request, null, false);
// Open a Stream by sending the HEADERS frame.
// CompletableFuture<Stream> streamCF = session.newStream(headersFrame, new Stream.Listener.Adapter());
// Open a Stream by sending the HEADERS frame.
CompletableFuture<Stream> streamCF = session.newStream(headersFrame, new Stream.Listener.Adapter()
{
public void onHeaders(Stream stream, HeadersFrame frame)
{
System.out.println("onHeaders Called");
MetaData metaData = frame.getMetaData();
// Is this HEADERS frame the response or the trailers?
if (metaData.isResponse())
{
MetaData.Response response = (MetaData.Response)metaData;
System.out.println( "Received response {0}== "+ response);
}
else
{
System.out.println("Received trailers {0}== " + metaData.getFields());
}
}
#Override
public void onData(Stream stream, DataFrame frame, Callback callback)
{
System.out.println("onData Called");
// Get the content buffer.
ByteBuffer buffer = frame.getData();
// Consume the buffer, here - as an example - just log it.
System.out.println("Consuming buffer {0}" +StandardCharsets.UTF_8.decode(buffer).toString());
// Tell the implementation that the buffer has been consumed.
callback.succeeded();
// By returning from the method, implicitly tell the implementation
// to deliver to this method more DATA frames when they are available.
}
});
// Block to obtain the Stream.
// Alternatively you can use the CompletableFuture APIs to avoid blocking.
Stream stream = streamCF.get();
// The request content, in two chunks.
String content1 = "{\"greet\": \"hello world\"}";
ByteBuffer buffer1 = StandardCharsets.UTF_8.encode(content1);
String content2 = "{\"user\": \"jetty\"}";
ByteBuffer buffer2 = StandardCharsets.UTF_8.encode(content2);
// Send the first DATA frame on the stream, with endStream=false
// to signal that there are more frames in this stream.
CompletableFuture<Stream> dataCF1 = stream.data(new DataFrame(stream.getId(), buffer1, false));
// Only when the first chunk has been sent we can send the second,
// with endStream=true to signal that there are no more frames.
dataCF1.thenCompose(s -> s.data(new DataFrame(s.getId(), buffer2, true)));
// end::newStreamWithData[]
System.out.println("EOF");
/*
*/
}
}

Related

Trying to run a simple example of Java NIO SSL to load the contents of https://www.amazon.com but getting 400 Bad Request

Trying everyting but it does not work :(
The complete code and example can be found here: https://examples.javacodegeeks.com/core-java/nio/java-nio-ssl-example/
Also you can download the full source (it is only 3 classes) by clicking here: https://examples.javacodegeeks.com/wp-content/uploads/2015/12/NioSSLExample.zip
Thanks for any help!
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.SocketChannel;
import java.util.Iterator;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLSession;
public class NioSSLExample
{
public static void main(String[] args) throws Exception
{
InetSocketAddress address = new InetSocketAddress("www.amazon.com", 443);
Selector selector = Selector.open();
SocketChannel channel = SocketChannel.open();
channel.connect(address);
channel.configureBlocking(false);
int ops = SelectionKey.OP_CONNECT | SelectionKey.OP_READ;
SelectionKey key = channel.register(selector, ops);
// create the worker threads
final Executor ioWorker = Executors.newSingleThreadExecutor();
final Executor taskWorkers = Executors.newFixedThreadPool(2);
// create the SSLEngine
final SSLEngine engine = SSLContext.getDefault().createSSLEngine();
engine.setUseClientMode(true);
engine.beginHandshake();
final int ioBufferSize = 32 * 1024;
final NioSSLProvider ssl = new NioSSLProvider(key, engine, ioBufferSize, ioWorker, taskWorkers)
{
#Override
public void onFailure(Exception ex)
{
System.out.println("handshake failure");
ex.printStackTrace();
}
#Override
public void onSuccess()
{
System.out.println("handshake success");
SSLSession session = engine.getSession();
try
{
System.out.println("local principal: " + session.getLocalPrincipal());
System.out.println("remote principal: " + session.getPeerPrincipal());
System.out.println("cipher: " + session.getCipherSuite());
}
catch (Exception exc)
{
exc.printStackTrace();
}
//HTTP request
StringBuilder http = new StringBuilder();
http.append("GET / HTTP/1.0\r\n");
http.append("Connection: close\r\n");
http.append("\r\n");
byte[] data = http.toString().getBytes();
ByteBuffer send = ByteBuffer.wrap(data);
this.sendAsync(send);
}
#Override
public void onInput(ByteBuffer decrypted)
{
// HTTP response
byte[] dst = new byte[decrypted.remaining()];
decrypted.get(dst);
String response = new String(dst);
System.out.print(response);
System.out.flush();
}
#Override
public void onClosed()
{
System.out.println("ssl session closed");
}
};
// NIO selector
while (true)
{
key.selector().select();
Iterator keys = key.selector().selectedKeys().iterator();
while (keys.hasNext())
{
keys.next();
keys.remove();
ssl.processInput();
}
}
}
}
http.append("GET / HTTP/1.0\r\n");
http.append("Connection: close\r\n");
http.append("\r\n");
While this is in theory a correct HTTP/1.0 request in practice, most systems today require that a Host header is included. While this is mandatory only with HTTP/1.1 it is needed if an IP address hosts multiple domains:
http.append("GET / HTTP/1.0\r\n");
http.append("Host: www.amazon.com\r\n");
http.append("\r\n");
Also note that the Connection: close is unnecessary since it is implicit with HTTP/1.0 (but not with HTTP/1.1).
Apart from that HTTP is way more complex than this simple request and even this one had its problems as you saw. If you need to implement it for yourself please study the standards instead of making assumptions of how servers react or looking only at a few examples.

Twitter Java project : failed to establish connection properly

I am trying to consume Twitter streams with the help of a Java Kafka application.
I have created a Twitter developer account and a Twitter application and generated all the 4 keys required.
Please find the code below:
package com.github.simpleanand.kafkabeginner.tutorial2;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.http.HttpHost;
import org.apache.http.conn.params.ConnRoutePNames;
import org.apache.http.impl.client.DefaultHttpClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.twitter.hbc.ClientBuilder;
import com.twitter.hbc.core.Client;
import com.twitter.hbc.core.Constants;
import com.twitter.hbc.core.Hosts;
import com.twitter.hbc.core.HttpHosts;
import com.twitter.hbc.core.endpoint.StatusesFilterEndpoint;
import com.twitter.hbc.core.processor.StringDelimitedProcessor;
import com.twitter.hbc.httpclient.auth.Authentication;
import com.twitter.hbc.httpclient.auth.OAuth1;
public class TwitterProducer {
private Logger logger = LoggerFactory.getLogger(TwitterProducer.class);
private String consumerKey = "<consumer-key>";
private String consumerSecret = "<consumerSecret>";
private String token = "<token value>";
private String secret = "<secret>";
public TwitterProducer() {
}
public static void main(String[] args) {
new TwitterProducer().run();
}
public void run() {
logger.info("inside run........");
// create a twitter client
/**
* Set up your blocking queues: Be sure to size these properly based on
* expected TPS of your stream
*/
BlockingQueue<String> msgQueue = new LinkedBlockingQueue<String>(100000);
Client client = createTwitterClient(msgQueue);
client.connect();
// create a kafka producer
// loop to send tweets to kafka
// on a different thread, or multiple different threads....
while (!client.isDone()) {
String msg = null;
try {
msg = msgQueue.poll(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
client.stop();
}
if (null != msg) {
logger.info("Msg -> " + msg);
}
}
logger.info("end of application........");
}
public Client createTwitterClient(BlockingQueue<String> msgQueue) {
HttpHost proxy = new HttpHost("<compayny proxy value>", 8080);
DefaultHttpClient httpClient = new DefaultHttpClient();
httpClient.getParams().setParameter(ConnRoutePNames.DEFAULT_PROXY, proxy);
/**
* Declare the host you want to connect to, the endpoint, and
* authentication (basic auth or oauth)
*/
Hosts hosebirdHosts = new HttpHosts(Constants.STREAM_HOST);
StatusesFilterEndpoint hosebirdEndpoint = new StatusesFilterEndpoint();
// Optional: set up some followings and track terms
// List<Long> followings = Lists.newArrayList(1234L, 566788L);
List<String> terms = Lists.newArrayList("bitcoin");
// hosebirdEndpoint.followings(followings);
hosebirdEndpoint.trackTerms(terms);
// These secrets should be read from a config file
Authentication hosebirdAuth = new OAuth1(consumerKey, consumerSecret, token, secret);
hosebirdAuth.setupConnection(httpClient);
// Creating a client:
ClientBuilder builder = new ClientBuilder().name("Hosebird-Client-01") // optional:
// mainly
// for
// the
// logs
.hosts(hosebirdHosts).authentication(hosebirdAuth).endpoint(hosebirdEndpoint)
.processor(new StringDelimitedProcessor(msgQueue));
Client hosebirdClient = builder.build();
// Attempts to establish a connection.
return hosebirdClient;
}
}
I'm getting the following error:
[hosebird-client-io-thread-0] INFO com.twitter.hbc.httpclient.ClientBase - Hosebird-Client-01 Establishing a connection
[hosebird-client-io-thread-0] WARN com.twitter.hbc.httpclient.ClientBase - Hosebird-Client-01 Unknown host - stream.twitter.com
[hosebird-client-io-thread-0] WARN com.twitter.hbc.httpclient.ClientBase - Hosebird-Client-01 failed to establish connection properly
[hosebird-client-io-thread-0] INFO com.twitter.hbc.httpclient.ClientBase - Hosebird-Client-01 Done processing, preparing to close connection
Please advise on how to solve this error.
It seems there is an issue of proxy object being sent.It that correct?

How do you read and print a chunked HTTP response using java.net.http as chunks arrive?

Java 11 introduces a new package, java.net.http, for making HTTP requests. For general usage, it's pretty straight forward.
My question is: how do I use java.net.http to handle chunked responses as each chunk is received by the client?
java.http.net contains a reactive BodySubscriber which appears to be what I want, but I can't find an example of how it's used.
http_get_demo.py
Below is a python implementation that prints chunks as they arrive, I'd like to the same thing with java.net.http:
import argparse
import requests
def main(url: str):
with requests.get(url, stream=True) as r:
for c in r.iter_content(chunk_size=1):
print(c.decode("UTF-8"), end="")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Read from a URL and print as text as chunks arrive")
parser.add_argument('url', type=str, help="A URL to read from")
args = parser.parse_args()
main(args.url)
HttpGetDemo.java
Just for completeness, here's a simple example of making a blocking request using java.net.http:
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpResponse;
import java.net.http.HttpRequest;
public class HttpGetDemo {
public static void main(String[] args) throws Exception {
var request = HttpRequest.newBuilder()
.uri(URI.create(args[0]))
.build();
var bodyHandler = HttpResponse.BodyHandlers
.ofString();
var client = HttpClient.newHttpClient();
var response = client.send(request, bodyHandler);
System.out.println(response.body());
}
}
HttpAsyncGetDemo.java
And here's the example making an non-blocking/async request:
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpResponse;
import java.net.http.HttpRequest;
/**
* ReadChunked
*/
public class HttpAsyncGetDemo {
public static void main(String[] args) throws Exception {
var request = HttpRequest.newBuilder()
.uri(URI.create(args[0]))
.build();
var bodyHandler = HttpResponse.BodyHandlers
.ofString();
var client = HttpClient.newHttpClient();
client.sendAsync(request, bodyHandler)
.thenApply(HttpResponse::body)
.thenAccept(System.out::println)
.join();
}
}
The python code does not ensure that the response body data is made available one HTTP chunk at a time. It just provides small amounts of data to the application, thus reducing the amount of memory consumed at the application level ( it could be buffered lower in the stack ). The Java 11 HTTP Client supports streaming through one of the streaming body handlers, HttpResponse.BodyHandlers: ofInputStream, ofByteArrayConsumer, asLines, etc.
Or write your own handler / subscriber as demonstrated:
https://www.youtube.com/watch?v=qiaC0QMLz5Y
You can print ByteBuffers as they come, but there's no guarantee that a ByteBuffer corresponds to a chunk. Chunks are handled by the stack. One ByteBuffer slice will be pushed for every chunk - but if there isn’t enough space remaining in the buffer, then a partial chunk will be pushed. All the consumer sees is a stream of ByteBuffers that contain the data.
So what you can do is print those ByteBuffers as they come, but you have no guarantee that they correspond exactly one chunk each as was sent by the server.
Note: If the body of your request is text based, then you can use
BodyHandlers.fromLineSubscriber(Subscriber<? super String> subscriber) with a custom Subscriber<String> that will print each line as it comes.
The BodyHandlers.fromLineSubscriber does the hard word of decoding bytes into chars using the charset indicated in the response headers, buffering bytes if needed until they can be decoded (a ByteBuffer might end in the middle of an encoding sequence if the text contains chars encoded over multiple bytes), and splitting them at the line boundary. The Subscriber::onNext method will be invoked once for each line in the text. See https://download.java.net/java/early_access/jdk11/docs/api/java.net.http/java/net/http/HttpResponse.BodyHandlers.html#fromLineSubscriber(java.util.concurrent.Flow.Subscriber) for more info.
Thanks to #pavel and #chegar999 for their partial answers. They led me to my solution.
Overview
The solution I came up with is below. Basically, the solution is to use a custom java.net.http.HttpResponse.BodySubscriber. A BodySubscriber contains reactive methods (onSubscribe, onNext, onError, and onComplete) and a getBody method that basically returns a java CompletableFuture that will eventually produce the body of the HTTP request. Once you have your BodySubscriber in hand you can use it like:
HttpClient client = HttpClient.newHttpClient();
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(uri))
.build();
return client.sendAsync(request, responseInfo -> new StringSubscriber())
.whenComplete((r, t) -> System.out.println("--- Status code " + r.statusCode()))
.thenApply(HttpResponse::body);
Note the line:
client.sendAsync(request, responseInfo -> new StringSubscriber())
That's where we register our custom BodySubscriber; in this case, my custom class is named StringSubscriber.
CustomSubscriber.java
This is a complete working example. Using Java 11, you can run it without compiling it. Just past it into a file named CustomSubscriber.java, then run the command java CustomSubscriber <some url>. It prints the contents of each chunk as it arrives. It also collects them and returns them as the body when the response has completed.
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.net.http.HttpResponse.BodyHandlers;
import java.net.http.HttpResponse.BodySubscriber;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Flow;
import java.util.stream.Collectors;
import java.util.List;
public class CustomSubscriber {
public static void main(String[] args) {
CustomSubscriber cs = new CustomSubscriber();
String body = cs.get(args[0]).join();
System.out.println("--- Response body:\n: ..." + body + "...");
}
public CompletableFuture<String> get(String uri) {
HttpClient client = HttpClient.newHttpClient();
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(uri))
.build();
return client.sendAsync(request, responseInfo -> new StringSubscriber())
.whenComplete((r, t) -> System.out.println("--- Status code " + r.statusCode()))
.thenApply(HttpResponse::body);
}
static class StringSubscriber implements BodySubscriber<String> {
final CompletableFuture<String> bodyCF = new CompletableFuture<>();
Flow.Subscription subscription;
List<ByteBuffer> responseData = new CopyOnWriteArrayList<>();
#Override
public CompletionStage<String> getBody() {
return bodyCF;
}
#Override
public void onSubscribe(Flow.Subscription subscription) {
this.subscription = subscription;
subscription.request(1); // Request first item
}
#Override
public void onNext(List<ByteBuffer> buffers) {
System.out.println("-- onNext " + buffers);
try {
System.out.println("\tBuffer Content:\n" + asString(buffers));
}
catch (Exception e) {
System.out.println("\tUnable to print buffer content");
}
buffers.forEach(ByteBuffer::rewind); // Rewind after reading
responseData.addAll(buffers);
subscription.request(1); // Request next item
}
#Override
public void onError(Throwable throwable) {
bodyCF.completeExceptionally(throwable);
}
#Override
public void onComplete() {
bodyCF.complete(asString(responseData));
}
private String asString(List<ByteBuffer> buffers) {
return new String(toBytes(buffers), StandardCharsets.UTF_8);
}
private byte[] toBytes(List<ByteBuffer> buffers) {
int size = buffers.stream()
.mapToInt(ByteBuffer::remaining)
.sum();
byte[] bs = new byte[size];
int offset = 0;
for (ByteBuffer buffer : buffers) {
int remaining = buffer.remaining();
buffer.get(bs, offset, remaining);
offset += remaining;
}
return bs;
}
}
}
Trying it out
To test this solution, you'll need a server that sends a response that uses Transfer-encoding: chunked and sends it slow enough to watch the chunks arrive. I've created one at https://github.com/hohonuuli/demo-chunk-server but you can spin it up using Docker like so:
docker run -p 8080:8080 hohonuuli/demo-chunk-server
Then run the CustomSubscriber.java code using java CustomSubscriber.java http://localhost:8080/chunk/10
There is now a new Java library to address this kind of requirements
RxSON: https://github.com/rxson/rxson
It utilizes the JsonPath wit RxJava to read JSON streamed chunks from the response as soon as they arrive, and parse them to java objects.
Example:
String serviceURL = "https://think.cs.vt.edu/corgis/datasets/json/airlines/airlines.json";
HttpRequest req = HttpRequest.newBuilder(URI.create(serviceURL)).GET().build();
RxSON rxson = new RxSON.Builder().build();
String jsonPath = "$[*].Airport.Name";
Flowable<String> airportStream = rxson.create(String.class, req, jsonPath);
airportStream
.doOnNext(it -> System.out.println("Received new item: " + it))
//Just for test
.toList()
.blockingGet();

Azure Web Job Upload using java client

History for context:
I am trying to run a web job from an HTTP Client. The file is a ZIP file . and contains a java class and bat file to run that java class. This runs okay when i do from POSTMAN. But when i use HTTP client, i get the following error always " '---i-NPsGbTVUpaP0CeJxMQVrHoDHvaxo3' is not recognized as an internal or external command" - Please help – Jagaran yesterday
#Jagaran if it only happen from some clients, it is likely unrelated. Please ask a new question – David Ebbo 21 hours ago
No any HTTP Client i am using in java, it is the same. it works in CURL or loading from web console. My sample code below – Jagaran 2 hours ago
No any HTTP Client i am using in java, it is the same. it works in CURL or loading from web console.
Do you have any sample Java based HTTP Client where I can publish Azure Web Job? I have tried all Java REST clients.
May be i am doing something wrong. The error I get in Azure console is '---i-NPsGbTVUpaP0CeJxMQVrHoDHvaxo3' is not recognized as an internal or external command, [08/25/2017 09:30:22 > e7f683: ERR ] operable program or batch file.o
I feel Content type = applciation /zip is not happening correctly when using java. Please help us.
Sample Code:
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.apache.http.entity.ContentType;
import com.mashape.unirest.http.HttpResponse;
import com.mashape.unirest.http.Unirest;
/**
* #author jagaran.das
*
*/
public class AIPHTTPClient {
/**
* #param args
* #throws IOException
*/
#SuppressWarnings({ "unused", "rawtypes" })
public static void main(String[] args) throws IOException {
try {
URI uri = new AIPHTTPClient().getURI();
HttpResponse<InputStream> jsonResponse = Unirest.put("https://<URL>/api/triggeredwebjobs/TestJOb")
.basicAuth("$AzureWebJobTestBRMS", "XXXXX")
.header("content-disposition","attachement; filename=acvbgth.bat")
.field("file", new FileInputStream(new File(uri))
,ContentType.create("content-type: application/zip"),"AzureWebJob.zip").asBinary();
System.out.println(jsonResponse.getStatusText());
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public InputStream readZip() {
ZipFile zipFile = null;
ZipEntry zipEntry = zipFile.getEntry("run.bat");
InputStream stream = null;
/* try {
zipFile = new ZipFile("/Users/jagaran.das/Documents/work/AIP/AzureWebJob.zip");
java.util.Enumeration<? extends ZipEntry> entries = zipFile.entries();
while(entries.hasMoreElements()){
ZipEntry entry = entries.nextElement();
stream = zipFile.getInputStream(entry);
}
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} */
try {
stream = zipFile.getInputStream(zipEntry);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return stream;
}
public URI getURI() throws MalformedURLException {
File file = new File("/Users/jagaran.das/Documents/work/AIP/azure-poc/AzureWebJob.zip");
URI fileUri = file.toURI();
System.out.println("URI:" + fileUri);
URL fileUrl = file.toURI().toURL();
System.out.println("URL:" + fileUrl);
URL fileUrlWithoutSpecialCharacterHandling = file.toURL();
System.out.println("URL (no special character handling):" + fileUrlWithoutSpecialCharacterHandling);
return fileUri;
}
}
I've been a little too harsh in my answer before really trying stuff out. Apologies. I've now tried out your snippet and looks like you're hitting an issue with Unirest - probably this one.
My advice would be to just move to Apache's HTTP library.
Here's a working sample:
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.client.HttpClient;
import org.apache.http.client.entity.EntityBuilder;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import java.io.File;
public class App
{
public static void main( String[] args )
{
File sourceZipFile = new File("webjob.zip");
String kuduApiUrl = "https://yoursitename.scm.azurewebsites.net/api/zip/site/wwwroot/app_data/jobs/triggered/job988/";
HttpEntity httpEntity = EntityBuilder.create()
.setFile(sourceZipFile)
.build();
CredentialsProvider provider = new BasicCredentialsProvider();
UsernamePasswordCredentials credentials = new UsernamePasswordCredentials(
"$yoursitename", "SiteLevelPasSw0rD"
);
provider.setCredentials(AuthScope.ANY, credentials);
HttpClient client = HttpClientBuilder.create()
.setDefaultCredentialsProvider(provider)
.build();
HttpPut putRequest = new HttpPut(kuduApiUrl);
putRequest.setEntity(httpEntity);
// Kudu's Zip API expects application/zip
putRequest.setHeader("Content-type", "application/zip");
try {
HttpResponse response = client.execute(putRequest);
int statusCode = response.getStatusLine().getStatusCode();
HttpEntity entity = response.getEntity();
String resBody = EntityUtils.toString(entity, "UTF-8");
System.out.println(statusCode);
System.out.println(resBody);
}
catch (Exception e) {
e.printStackTrace();
}
}
}
That's sending Content-Type: application/zip and the raw zip contents in the body (no multipart horse manure). I've probably over-engineered the sample.. but it is what it is.
The upload is successful and the WebJob published:
Glad for you that you have solved the issue and I try to provide a workaround for your reference.
Deploy WebJob to azure , in addition to using REST API, you can also use the FTP way. Of course, the premise is that you need to know the directory uploaded by webjob via KUDU.
I offer you the snippet of code below via FTP4J libiary:
import java.io.File;
import it.sauronsoftware.ftp4j.FTPClient;
public class UploadFileByFTP {
private static String hostName = <your host name>;
private static String userName = <user name>;
private static String password = <password>;
public static void main(String[] args) {
try {
// create client
FTPClient client = new FTPClient();
// connect host
client.connect(hostName);
// log in
client.login(userName, password);
// print address
System.out.println(client);
// change directory
client.changeDirectory("/site/wwwroot/App_Data/jobs/continuous");
// current directory
String dir = client.currentDirectory();
System.out.println(dir);
File file = new File("D:/test.zip");
client.upload(file);
} catch (Exception e) {
e.printStackTrace();
}
}
}
You can follow this tutorial to configure your parameters.

HTTP Request Object

Is there an object within the standard Java SE that can accept a HTTP request from a socket? I have found how to create and send one, however I have not found a way to retrieve a HTTP object from a socket. I can create one my self, but I would rather rely on a heavily tested object.
This seems like something that would be readily available given the structure of JSP.
There is a small HTTP server in the Java 6 SDK (not sure if it will be in the JRE or in non-Sun JVM's).
From http://www.java2s.com/Code/Java/JDK-6/LightweightHTTPServer.htm :
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Executors;
import com.sun.net.httpserver.Headers;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
public class HttpServerDemo {
public static void main(String[] args) throws IOException {
InetSocketAddress addr = new InetSocketAddress(8080);
HttpServer server = HttpServer.create(addr, 0);
server.createContext("/", new MyHandler());
server.setExecutor(Executors.newCachedThreadPool());
server.start();
System.out.println("Server is listening on port 8080" );
}
}
class MyHandler implements HttpHandler {
public void handle(HttpExchange exchange) throws IOException {
String requestMethod = exchange.getRequestMethod();
if (requestMethod.equalsIgnoreCase("GET")) {
Headers responseHeaders = exchange.getResponseHeaders();
responseHeaders.set("Content-Type", "text/plain");
exchange.sendResponseHeaders(200, 0);
OutputStream responseBody = exchange.getResponseBody();
Headers requestHeaders = exchange.getRequestHeaders();
Set<String> keySet = requestHeaders.keySet();
Iterator<String> iter = keySet.iterator();
while (iter.hasNext()) {
String key = iter.next();
List values = requestHeaders.get(key);
String s = key + " = " + values.toString() + "\n";
responseBody.write(s.getBytes());
}
responseBody.close();
}
}
}
Yeah, you make a new HTTP Request object from what you accept on the socket. What you do after that is up to you, but it should probably involve an HTTP Response.
import java.io.*;
import java.net.*;
import java.util.*;
public final class WebServer {
public static void main(String args[]) throws Exception {
int PORT = 8080;
ServerSocket listenSocket = new ServerSocket(PORT);
while(true) {
HttpRequest request = new HttpRequest(listenSocket.accept());
Thread thread = new Thread(request);
thread.start();
}
}
}
From: http://www.devhood.com/tutorials/tutorial_details.aspx?tutorial_id=396
There's some more work to be done in the tutorial, but it does look nice.
It looks like you are looking for a Servlet. A servlet is an API that lets you receive and respond to an HTTP request.
Your servlet gets deployed in a container, which is basically the actual Web server that will take care of all the protocol complexities. (The most populare are Tomcat and Jetty)

Categories

Resources