SocketException: Too many open files in AWS Lambda - java

This Java method is part of the executed code within an AWS Lambda function:
private static String getLocationIQAddress(double lat, double lng, boolean cached) {
lat = truncate(lat);
lng = truncate(lng);
MultiValueMap<String, String> headers = new LinkedMultiValueMap<>();
RestTemplate restTemplate = new RestTemplate();
ClientHttpRequestInterceptor ri = new LoggingRequestInterceptor();
List<ClientHttpRequestInterceptor> ris = new ArrayList<>();
ris.add(ri);
restTemplate.setInterceptors(ris);
HttpEntity<?> request = new HttpEntity<Object>(headers);
ResponseEntity<LocationIQResponse> response = null;
LocationIQResponse obj = null;
try {
URIBuilder uriBuilder = new URIBuilder(LOCATIONIQ_URL);
uriBuilder.addParameter("key", LOCATIONIQ_TOKEN);
uriBuilder.addParameter("lat", String.valueOf(lat));
uriBuilder.addParameter("lon", String.valueOf(lng));
uriBuilder.addParameter("format", "json");
uriBuilder.addParameter("accept-language", "es");
long delay = 100;
boolean retry = true;
do {
try {
long start = System.currentTimeMillis();
response = restTemplate.exchange(uriBuilder.build(), HttpMethod.GET, request,
LocationIQResponse.class);
CloudWatchSubmit.sendMetric(LOCATION_IQ, ELAPSED, (double) (System.currentTimeMillis() - start));
obj = response.getBody();
} catch (HttpMessageNotReadableException e) {
LOG.log(Level.SEVERE, "Problem input: {0},{1}", new Object[] { lat, lng });
HttpInputMessage msg = e.getHttpInputMessage();
if (msg == null) {
LOG.severe("Null response body");
} else {
LOG.severe("Not null response body");
try (Scanner scanner = new Scanner(msg.getBody())) {
LOG.log(Level.SEVERE, "HTTP Body: {0}", scanner.useDelimiter("\\A").next());
}
}
LOG.log(Level.SEVERE, "cause");
LOG.log(Level.SEVERE, e.getMostSpecificCause().getMessage());
LOG.log(Level.SEVERE, "exception");
LOG.log(Level.SEVERE, e.getMessage());
retry = false;
} catch (HttpServerErrorException e) {
LOG.log(Level.SEVERE, e.getMessage() + " / " + e.getResponseBodyAsString());
CloudWatchSubmit.sendMetric(LOCATION_IQ, SERVER_ERROR, 1);
retry = false;
} catch (ResourceAccessException e) {
LOG.log(Level.SEVERE, e.getMessage());
CloudWatchSubmit.sendMetric(LOCATION_IQ, SERVER_ERROR, 1);
retry = false;
} catch (HttpClientErrorException e) {
String err = e.getResponseBodyAsString();
if (HttpStatus.TOO_MANY_REQUESTS.equals(e.getStatusCode())) {
if (err.contains(RATE_LIMITED_SECOND)) {
CloudWatchSubmit.sendMetric(LOCATION_IQ, RETRIES, 1);
try {
Thread.sleep(delay);
delay *= 2;
} catch (InterruptedException e2) {
Thread.currentThread().interrupt();
}
} else {
LOG.log(Level.SEVERE, err);
CloudWatchSubmit.sendMetric(LOCATION_IQ, err, 1);
retry = false;
}
} else {
LOG.log(Level.SEVERE, e.getMessage() + " / " + err + " / " + e.getResponseBodyAsString());
CloudWatchSubmit.sendMetric(LOCATION_IQ, CLIENT_ERROR, 1);
retry = false;
}
} finally {
CloudWatchSubmit.sendMetric(LOCATION_IQ, INVOCATIONS, 1);
}
} while (obj == null && retry);
StackTraceElement elem = Thread.currentThread().getStackTrace()[2];
StackTraceElement elem2 = Thread.currentThread().getStackTrace()[3];
CloudWatchSubmit.sendMetric(elem.getClassName(), elem.getMethodName(), elem2.getClassName(),
elem2.getMethodName(), cached);
} catch (Exception e) {
LOG.log(Level.SEVERE, "Big Problem input: {0},{1}", new Object[] { lat, lng });
LOG.log(Level.SEVERE, e.getMessage(), e);
}
return getAddress(obj);
}
After several invocations of the corresponding Lambda, we ocasionally get errors like:
Caused by: java.io.FileNotFoundException: /var/task/com/amazonaws/services/dynamodbv2/document/Table.class (Too many open files)
On another lambda using this same method we get the same Exception, but regarding log4j classes so, we are pretty sure it has to do with the referenced Java method, as it's opening HTTP connections (which correlate to file descriptors in Linux) via Spring's RestTemplate.
I've already read several posts and blogs regarding this issue on several environment where you can actually tweak the OS limits but, as we are inside an AWS Lambda the scenario is a bit different and I'd like recommendations on how to deal with this issue as, I'm trying to reproduce this issue outside Lambda and, haven't been able to do so. The idea was to get the same exception so check how several suggested changes may impact, either correcting the issue or, making it worse. But, as we haven't been able to reproduce it outside locally, we'll have to to trial and error on AWS Lambda execution environment.

Related

Unit Testing on Java's ListenableFuture and the addListener callback

I have a module that has a method which writes data to a Kinesis stream. We call it the Feedback stream.
i want to run a verify unit test on my analytics service's functions that are invoked on ListenableFuture's result, i have tried setting up mocks but am getting a ClassCast exception. I think I may be setting up the mocks incorrectly.
method i want to unit test:
public void writeData(KinesisRecord kinesisRecord, Response response) {
FeedbackRecord feedbackRecord = new
FeedbackRecord(kinesisRecord.getVersion(), kinesisRecord.getNotificationId(), response.name(),
kinesisRecord.getDestination());
Gson g = new Gson();
byte[] dataBytes = g.toJson(feedbackRecord).getBytes(StandardCharsets.UTF_8);
ListenableFuture<UserRecordResult> future = kinesisProducer.addUserRecord(streamName, kinesisRecord.getNotificationId(), ByteBuffer.wrap(dataBytes));
future.addListener(() -> {
try {
UserRecordResult userRecordResult = future.get();
analyticsService.trackSuccess(AnalyticsConstants.FeedbackProducer.WRITE_SUCCESS, AnalyticsConstants.FeedbackProducer.WRITEDATA_LOCATION + " " + userRecordResult.getSequenceNumber());
log.debug("Write to Kinesis Feedback successful, record sequence number: " + userRecordResult.getSequenceNumber());
} catch (InterruptedException e) {
e.printStackTrace();
analyticsService.trackError(AnalyticsConstants.FeedbackProducer.WRITE_ERROR, AnalyticsConstants.FeedbackProducer.WRITEDATA_LOCATION);
log.error("Feedback Kinesis Write failed: " + e);
} catch (ExecutionException e) {
e.printStackTrace();
analyticsService.trackError(AnalyticsConstants.FeedbackProducer.WRITE_ERROR, AnalyticsConstants.FeedbackProducer.WRITEDATA_LOCATION);
log.error("Feedback Kinesis Write failed: " + e);
}
}, MoreExecutors.directExecutor());
}
unit test attempt:
#Test
public void addUserRecordFutureTest() throws ExecutionException, InterruptedException {
ListenableFuture<UserRecordResult> responseFuture = mock(ListenableFuture.class);
Attempt attempt = new Attempt(1, 2, "AttemptError", "5xx", true);
List<Attempt> attempts = new ArrayList<>();
attempts.add(attempt);
UserRecordResult mockUserRecordResult = mock(UserRecordResult.class);
when(kinesisProducer.addUserRecord(any(), any(), any())).thenReturn(responseFuture);
doAnswer(invocation -> {
ListenableFutureCallback listenableFutureCallback = invocation.getArgument(0);
listenableFutureCallback.onSuccess(mockUserRecordResult);
// do asserts here
return null;
}).when(responseFuture).addListener(any(), any());
feedbackProducer.writeData(mockRecord, FeedbackProducer.Response.invalid_channel);
}
STACK TRACE:

How can I catch a runtime exception from a EJB?

I have a very curious situation.
I'm trying to execute EJB's method and returns the result with JAX-RS
public Service readSingle(...) {
try {
service.query(...);
} catch (final NoResultException nre) {
throw new NotFoundException(...);
} catch (final NonUniqueResultException nure) {
throw new BadRequstException(...);
}
}
The query method requires some values and a BiFuction and a Function.
The actual call looks like this.
try {
return serviceService.<Service>query(
id,
ofNullable(matrixParameters.getFirst("onid"))
.map(Integer::parseInt).orElse(null),
ofNullable(matrixParameters.getFirst("tsid"))
.map(Integer::parseInt).orElse(null),
ofNullable(matrixParameters.getFirst("sid"))
.map(Integer::parseInt).orElse(null),
ofNullable(matrixParameters.getFirst("number"))
.map(Integer::parseInt).orElse(null),
ofNullable(matrixParameters.getFirst("programId"))
.orElse(null),
operatorId,
(builder, root) -> emptyList(),
TypedQuery::getSingleResult);
} catch (final NoResultException nre) {
throw new NotFoundException(
"no entity idnetified by " + serviceIdSegment.getPath()
+ " with " + matrixParameters.toString());
} catch (final NonUniqueResultException nure) {
throw new BadRequestException("multiple entities identified");
}
Ok I passed TypedQuery::getSingleResult and I expect NonUniqueResultException should be caught when it has to be thrown.
But Payara keep responding with 500 and the log shows that the NonUniqueResultException has never caught by the code.
I disabled my ExceptionMappers the the results are same.
Ok. I figured it out. I had to do this.
try {
// execute EJB
} catch (final EJBTransactionRolledbackException ejbtre) {
Exception leaf = ejbtre;
try {
for (Exception c;
(c = ((EJBException) leaf).getCausedByException()) != null;
leaf = c);
} catch (final ClassCastException cce) {
}
logger.severe("causedByException: " + leaf);
if (leaf instanceof NoResultException) {
throw new NotFoundException(
"no entity idnetified by " + serviceIdSegment.getPath()
+ " with " + matrixParameters.toString());
} else if (leaf instanceof NonUniqueResultException) {
throw new BadRequestException(
"multiple entities identified by "
+ serviceIdSegment.getPath()
+ " with " + matrixParameters.toString());
}
throw new InternalServerErrorException(ejbtre);
}
This is far nasty beyond I've expected. The EJB's method design is not good.
Is there any way to do this more simply?
Let me introduce one of my utility class I used to justify myself.
public final class EJBExceptions {
private static final Logger logger
= getLogger(EJBExceptions.class.getName());
public static Stream<Exception> causedByExceptions(EJBException ejbe) {
final Stream.Builder<Exception> builder = Stream.builder();
while (ejbe != null) {
final Exception causedByException = ejbe.getCausedByException();
if (causedByException != null) {
builder.add(causedByException);
} else {
break;
}
if (causedByException instanceof EJBException) {
ejbe = (EJBException) causedByException;
} else {
break;
}
}
return builder.build();
}
public static Optional<Exception> lastCausedByException(
final EJBException ejbe) {
return causedByExceptions(ejbe).reduce((first, second) -> second);
}
private EJBExceptions() {
super();
}
}

code without log messages

From time to time I come across a class where a bigger part of code are logging calls.
e.g.
public init(Config config) {
logger.info("Configuring ...");
if (config.hasInitInterval()) {
initInterval = config.getInitInterval();
logger.info("Set initInterval to " + initInterval);
}
...
try {
logger.info("Updating access points " + config.getAccessPoints());
updateAccessPoints(config.getAccessPoints())
} catch (Throwable e) {
logger.warn("Init failed due to ", e);
}
...
if (logger.isDebugEnabled ()) {
for(int i = 0; i < config.getModifiers().size(); i++) {
try {
isValidModifier(config.getModifiers().get(i));
} catch (Exception e) {
throw new IllegalArgumentException ("Wrong modifier: " config.getModifiers().get(i));
}
}
}
}
When a class is not formatted well plus contains comments, it's hard to read the code.
I used proxy pattern to partially improve it but it's suitable only to log something before or after a call of method.
What are the best practices to separate functionality from logging?

Connection close in Java Servlet and SSE

I try to implement Server-Sent-Event in my Webapp with Java Serlvet on server.
Is it possible to check in Servlet that connection is closed by client? The loop while(true) in Servlet is infinite even if client browser is closed.
Client code
function startLogSSE(lastEventId, level) {
var eventSource = new EventSource("log-sse?last-event-id=" + lastEventId + "&level=" + level);
eventSource.onmessage = function (event) {
document.getElementById('log').innerHTML = event.data + "\n" + document.getElementById('log').innerHTML;
};
}
Server code
public class LogSSEServlet extends HttpServlet {
private static final Logger logger = LoggerFactory.getLogger(LogSSEServlet.class);
#Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
response.setContentType("text/event-stream");
response.setCharacterEncoding("UTF-8");
PrintWriter writer = response.getWriter();
// get logger purgerDB appender
PurgerDBAppender appender = LogUtils.getPurgerDBAppender();
if (appender == null) {
writer.write("data: [ERROR] Appender 'purgerDB' isn't found for logger 'com.bp3'\n\n");
writer.close();
return;
}
int eventId = 0;
// get last-event-id
String lastEventId = request.getHeader("last-event-id");
if (lastEventId == null) {
// try to get lastEventId from parameter
lastEventId = request.getParameter("last-event-id");
}
if (lastEventId != null) {
try {
eventId = Integer.parseInt(lastEventId);
} catch (NumberFormatException e) {
logger.error("Failed to parse last-event-id: " + lastEventId);
}
}
String minLevel = request.getParameter("level");
if (minLevel == null) {
minLevel = "TRACE";
}
// get logs from purgerDB logger appender
LogServices logServices = new LogServices();
try {
logServices.open();
} catch (SQLException e) {
throw new ServletException(e);
}
try {
while (true) {
List<LogMessage> messages = logServices.getLastMessages(Level.toLevel(minLevel), eventId, 0);
if (messages.size() > 0) {
writer.write("id: " + messages.get(0).getEventId() + "\n");
writer.write("data: " + LogUtils.formatLog(messages) + "\n");
writer.flush();
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
break;
}
}
} catch (SQLException e) {
throw new ServletException(e);
} finally {
logServices.closeQuietly();
}
}
}
Is it possible to check in Servlet that connection is closed by client?
Eventually an exception will be thrown: either an IOException: connection reset if you are streaming directly to the socket, or an OutOfMemoryError if the container is streaming to memory, which it does when you aren't using a fixed-length or chunked transfer mode.
The loop while(true) in Servlet is infinite even if client browser is closed.
No it isn't.
One way to check, wihin the Servlet, that connection is closed, is using the writer.checkError() method. I tested this fix on Chrome and it works. Your code would be:
boolean error=false;
while (!error) {
//...
writer.write("data: " + /*...*/ "\n");
//writer.flush();
error = writer.checkError(); //internally calls writer.flush()
}
Details:
The PrintWriter's API says:
Methods in this class never throw I/O exceptions, although some of its
constructors may. The client may inquire as to whether any errors have
occurred by invoking checkError().
and the checkError() says:
Flushes the stream if it's not closed and checks its error state

Java Google Drive SDK SocketTimeoutException on File.insert.execute

I have been using the Google Drive SDK in both Java and Objective-c for months now and suddenly today (Jul 10, 2013) inserting new files using the java api (google-api-client-1.15.0-rc.jar & version 1.14) is failing to execute on FileInsert.execute(). Here is the static helper method I've been using (basically a copy from the DrEdit tutorials):
public static File insertFile(Drive driveService, String title, String description,
String parentId, String mimeType, java.io.File content) {
System.out.println("<GDrive insertFile> start");
File body = new File();
body.setTitle(title);
body.setDescription(description);
body.setMimeType(mimeType);
if (parentId != null && parentId.length() > 0) {
body.setParents( Arrays.asList(new ParentReference().setId(parentId)) );
}
Insert fileInsert = null;
System.out.println("<GDrive insertFile> before content attache");
if (content != null && content.length() > 0) {
FileContent mediaContent = new FileContent(mimeType, content);
try { fileInsert = driveService.files().insert(body, mediaContent); }
catch (IOException e) { e.printStackTrace(); }
} else {
try { fileInsert = driveService.files().insert(body); }
catch (IOException e) { e.printStackTrace(); }
}
try {
if (fileInsert != null) {
System.out.println("<GDrive insertFile> before execute");
File file = fileInsert.execute();
System.out.println("<GDrive insertFile> file posted " + file.getId());
return file;
} else return null;
} catch (IOException e) {
System.out.println("An error occured: " + e);
return null;
}
}
Obviously System.out was used to locate the issue, it hangs up on "before execute". I haven't been able to find any posts about Google server issues or changes in how the api is to be used. Here is the error message that is logged when the app fails any time after 20 - 180 seconds:
An error occured: java.net.SocketTimeoutException: Read timed out
Exception in thread "main" java.lang.NullPointerException
Thanks.

Categories

Resources