Flink savepoint is declined - java

I'm trying to use save point on a job that I have implemented a customized parallelizable socket source. The source looks something similar to this
#Override
public void run(SourceContext<String> sourceContext) throws Exception {
int idx = getRuntimeContext().getIndexOfThisSubtask();
String[] hosts = (config.hostsStr).split(":");
String[] portStrArr = (config.portsStr).split(":");
int[] ports = new int[portStrArr.length];
for (int i = 0; i < portStrArr.length; i++) {
ports[i] = Integer.parseInt(portStrArr[i]);
}
Socket s = new Socket(hosts[idx], ports[idx]);
BufferedReader in = new BufferedReader(new InputStreamReader(s.getInputStream()));
//ois = new ObjectInputStream(s.getInputStream());
while (running) {
String str = in.readLine();
sourceContext.collect(str);
}
sourceContext.close();
}
#Override
public void cancel() {
running = false;
}
The exception on the cluster looks something like this
flink-1.1.3/bin//flink cancel -s hdfs://flink-master:19000/flink-checkpoints a18499a80099045eb5120ecacdabd421
Retrieving JobManager.
Using address flink-master/10.0.0.16:6123 to connect to JobManager.
Cancelling job a18499a80099045eb5120ecacdabd421 with savepoint to hdfs://flink-master:19000/flink-checkpoints.
java.lang.Exception: Canceling the job with ID a18499a80099045eb5120ecacdabd421 failed.
at org.apache.flink.client.CliFrontend.cancel(CliFrontend.java:637)
at org.apache.flink.client.CliFrontend.parseParameters(CliFrontend.java:1092)
at org.apache.flink.client.CliFrontend$2.call(CliFrontend.java:1133)
at org.apache.flink.client.CliFrontend$2.call(CliFrontend.java:1130)
at org.apache.flink.runtime.security.HadoopSecurityContext$1.run(HadoopSecurityContext.java:43)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.flink.runtime.security.HadoopSecurityContext.runSecured(HadoopSecurityContext.java:40)
at org.apache.flink.client.CliFrontend.main(CliFrontend.java:1130)
Caused by: java.lang.Exception: Failed to trigger savepoint.
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anon$6.apply(JobManager.scala:639)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anon$6.apply(JobManager.scala:629)
at org.apache.flink.runtime.concurrent.impl.FlinkFuture$5.onComplete(FlinkFuture.java:272)
at akka.dispatch.OnComplete.internal(Future.scala:247)
at akka.dispatch.OnComplete.internal(Future.scala:245)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:175)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:172)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)
at akka.dispatch.BatchingExecutor$AbstractBatch.processBatch(BatchingExecutor.scala:55)
at akka.dispatch.BatchingExecutor$BlockableBatch$$anonfun$run$1.apply$mcV$sp(BatchingExecutor.scala:91)
at akka.dispatch.BatchingExecutor$BlockableBatch$$anonfun$run$1.apply(BatchingExecutor.scala:91)
at akka.dispatch.BatchingExecutor$BlockableBatch$$anonfun$run$1.apply(BatchingExecutor.scala:91)
at scala.concurrent.BlockContext$.withBlockContext(BlockContext.scala:72)
at akka.dispatch.BatchingExecutor$BlockableBatch.run(BatchingExecutor.scala:90)
at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:40)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
Caused by: java.io.EOFException: Premature EOF: no length prefix available
at org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed(PBHelper.java:2282)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.createBlockOutputStream(DFSOutputStream.java:1347)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.nextBlockOutputStream(DFSOutputStream.java:1266)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:449)
Suppressed: java.lang.IllegalArgumentException: Self-suppression not permitted
at java.lang.Throwable.addSuppressed(Throwable.java:1043)
at java.io.FilterOutputStream.close(FilterOutputStream.java:159)
at org.apache.flink.runtime.checkpoint.savepoint.SavepointStore.storeSavepointToHandle(SavepointStore.java:207)
at org.apache.flink.runtime.checkpoint.savepoint.SavepointStore.storeSavepointToHandle(SavepointStore.java:150)
at org.apache.flink.runtime.checkpoint.PendingCheckpoint.finalizeCheckpointExternalized(PendingCheckpoint.java:281)
at org.apache.flink.runtime.checkpoint.CheckpointCoordinator.completePendingCheckpoint(CheckpointCoordinator.java:888)
at org.apache.flink.runtime.checkpoint.CheckpointCoordinator.receiveAcknowledgeMessage(CheckpointCoordinator.java:813)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$org$apache$flink$runtime$jobmanager$JobManager$$handleCheckpointMessage$1.apply$mcV$sp(JobManager.scala:1462)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$org$apache$flink$runtime$jobmanager$JobManager$$handleCheckpointMessage$1.apply(JobManager.scala:1461)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$org$apache$flink$runtime$jobmanager$JobManager$$handleCheckpointMessage$1.apply(JobManager.scala:1461)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:40)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
[CIRCULAR REFERENCE:java.io.EOFException: Premature EOF: no length prefix available]
On my local machine the save point is rejected by the following exception:
Cancelling job 4c99e0220c8c4683d1287269073b5c2c with savepoint to savepoints/.
java.lang.Exception: Canceling the job with ID 4c99e0220c8c4683d1287269073b5c2c failed.
at org.apache.flink.client.CliFrontend.cancel(CliFrontend.java:637)
at org.apache.flink.client.CliFrontend.parseParameters(CliFrontend.java:1092)
at org.apache.flink.client.CliFrontend$2.call(CliFrontend.java:1133)
at org.apache.flink.client.CliFrontend$2.call(CliFrontend.java:1130)
at org.apache.flink.runtime.security.HadoopSecurityContext$1.run(HadoopSecurityContext.java:43)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1657)
at org.apache.flink.runtime.security.HadoopSecurityContext.runSecured(HadoopSecurityContext.java:40)
at org.apache.flink.client.CliFrontend.main(CliFrontend.java:1130)
Caused by: java.lang.Exception: Failed to trigger savepoint.
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anon$6.apply(JobManager.scala:639)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$handleMessage$1$$anon$6.apply(JobManager.scala:629)
at org.apache.flink.runtime.concurrent.impl.FlinkFuture$5.onComplete(FlinkFuture.java:272)
at akka.dispatch.OnComplete.internal(Future.scala:247)
at akka.dispatch.OnComplete.internal(Future.scala:245)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:175)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:172)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)
at akka.dispatch.BatchingExecutor$AbstractBatch.processBatch(BatchingExecutor.scala:55)
at akka.dispatch.BatchingExecutor$BlockableBatch$$anonfun$run$1.apply$mcV$sp(BatchingExecutor.scala:91)
at akka.dispatch.BatchingExecutor$BlockableBatch$$anonfun$run$1.apply(BatchingExecutor.scala:91)
at akka.dispatch.BatchingExecutor$BlockableBatch$$anonfun$run$1.apply(BatchingExecutor.scala:91)
at scala.concurrent.BlockContext$.withBlockContext(BlockContext.scala:72)
at akka.dispatch.BatchingExecutor$BlockableBatch.run(BatchingExecutor.scala:90)
at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:40)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:397)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
Caused by: java.lang.Exception: Checkpoint was declined (tasks not ready)
at org.apache.flink.runtime.checkpoint.PendingCheckpoint.abortDeclined(PendingCheckpoint.java:510)
at org.apache.flink.runtime.checkpoint.CheckpointCoordinator.receiveDeclineMessage(CheckpointCoordinator.java:735)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$org$apache$flink$runtime$jobmanager$JobManager$$handleCheckpointMessage$2.apply$mcV$sp(JobManager.scala:1491)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$org$apache$flink$runtime$jobmanager$JobManager$$handleCheckpointMessage$2.apply(JobManager.scala:1490)
at org.apache.flink.runtime.jobmanager.JobManager$$anonfun$org$apache$flink$runtime$jobmanager$JobManager$$handleCheckpointMessage$2.apply(JobManager.scala:1490)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24)
at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24)
... 6 more
Is it because my source cannot be stopped properly so that the checkpoint would not happen? On the cluster it does say it is successful and return the location to the save point but there is no file on that path.

Given the source function excerpt it almost looks good to me. What you should do is to output elements under the checkpoint lock. Otherwise you might run into problems when an element is output at the same time as a checkpoint is triggered. The SourceContext#getCheckpointLock makes sure that these two operations don't happen concurrently.
The first error looks a little bit as if you have a problem on the HDFS side. Could you check the logs whether they contain something suspicious? Maybe the data nodes ran out of disk space.
The second exception indicates that something went wrong while doing the checkpoint. The JobManager logs should contain a log statement saying why the checkpoint has failed. It should have the format: Discarding checkpoint CHECKPOINT_ID because of checkpoint decline from task EXECUTION_ID : REASON.

Related

ClassCastException occurs when Flink DataStream sends a message to a remote stateful function

The DataStream job:
public static final FunctionType DEVICE = new FunctionType("com.github.f1xman.era.anomalydetection.device", "DeviceFunction");
public static void main(String[] args) throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StatefulFunctionsConfig statefunConfig = StatefulFunctionsConfig.fromEnvironment(env);
statefunConfig.setFactoryType(MessageFactoryType.WITH_KRYO_PAYLOADS);
DataStreamSource<String> names = env.addSource(new NamesSourceFunction());
DataStream<RoutableMessage> namesIngress = names.map(name -> RoutableMessageBuilder.builder()
.withTargetAddress(DEVICE, name)
.withMessageBody(name)
.build());
StatefulFunctionDataStreamBuilder.builder("example")
.withDataStreamAsIngress(namesIngress)
.withRequestReplyRemoteFunction(
requestReplyFunctionBuilder(DEVICE, URI.create("http://localhost:8080/statefun"))
)
.withConfiguration(statefunConfig)
.build(env);
env.execute("Flink Streaming Java API Skeleton");
}
When String value passed to .withMessageBody(...) the following exception occurred:
Exception in thread "main" org.apache.flink.runtime.client.JobExecutionException: Job execution failed.
at org.apache.flink.runtime.jobmaster.JobResult.toJobExecutionResult(JobResult.java:144)
at org.apache.flink.runtime.minicluster.MiniClusterJobClient.lambda$getJobExecutionResult$3(MiniClusterJobClient.java:137)
at java.util.concurrent.CompletableFuture.uniApply(CompletableFuture.java:616)
at java.util.concurrent.CompletableFuture$UniApply.tryFire(CompletableFuture.java:591)
at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
at org.apache.flink.runtime.rpc.akka.AkkaInvocationHandler.lambda$invokeRpc$1(AkkaInvocationHandler.java:258)
at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
at org.apache.flink.util.concurrent.FutureUtils.doForward(FutureUtils.java:1389)
at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.lambda$null$1(ClassLoadingUtils.java:93)
at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:68)
at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.lambda$guardCompletionWithContextClassLoader$2(ClassLoadingUtils.java:92)
at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:774)
at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:750)
at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:488)
at java.util.concurrent.CompletableFuture.complete(CompletableFuture.java:1975)
at org.apache.flink.runtime.concurrent.akka.AkkaFutureUtils$1.onComplete(AkkaFutureUtils.java:47)
at akka.dispatch.OnComplete.internal(Future.scala:300)
at akka.dispatch.OnComplete.internal(Future.scala:297)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:224)
at akka.dispatch.japi$CallbackBridge.apply(Future.scala:221)
at scala.concurrent.impl.CallbackRunnable.run$$$capture(Promise.scala:60)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala)
at org.apache.flink.runtime.concurrent.akka.AkkaFutureUtils$DirectExecutionContext.execute(AkkaFutureUtils.java:65)
at scala.concurrent.impl.CallbackRunnable.executeWithValue(Promise.scala:68)
at scala.concurrent.impl.Promise$DefaultPromise.$anonfun$tryComplete$1(Promise.scala:284)
at scala.concurrent.impl.Promise$DefaultPromise.$anonfun$tryComplete$1$adapted(Promise.scala:284)
at scala.concurrent.impl.Promise$DefaultPromise.tryComplete(Promise.scala:284)
at akka.pattern.PromiseActorRef.$bang(AskSupport.scala:621)
at akka.pattern.PipeToSupport$PipeableFuture$$anonfun$pipeTo$1.applyOrElse(PipeToSupport.scala:24)
at akka.pattern.PipeToSupport$PipeableFuture$$anonfun$pipeTo$1.applyOrElse(PipeToSupport.scala:23)
at scala.concurrent.Future.$anonfun$andThen$1(Future.scala:532)
at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:29)
at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:29)
at scala.concurrent.impl.CallbackRunnable.run$$$capture(Promise.scala:60)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala)
at akka.dispatch.BatchingExecutor$AbstractBatch.processBatch(BatchingExecutor.scala:63)
at akka.dispatch.BatchingExecutor$BlockableBatch.$anonfun$run$1(BatchingExecutor.scala:100)
at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:12)
at scala.concurrent.BlockContext$.withBlockContext(BlockContext.scala:81)
at akka.dispatch.BatchingExecutor$BlockableBatch.run(BatchingExecutor.scala:100)
at akka.dispatch.TaskInvocation.run(AbstractDispatcher.scala:49)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(ForkJoinExecutorConfigurator.scala:48)
at java.util.concurrent.ForkJoinTask.doExec$$$capture(ForkJoinTask.java:289)
at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java)
at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:175)
Caused by: org.apache.flink.runtime.JobException: Recovery is suppressed by NoRestartBackoffTimeStrategy
at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.handleFailure(ExecutionFailureHandler.java:138)
at org.apache.flink.runtime.executiongraph.failover.flip1.ExecutionFailureHandler.getFailureHandlingResult(ExecutionFailureHandler.java:82)
at org.apache.flink.runtime.scheduler.DefaultScheduler.handleTaskFailure(DefaultScheduler.java:252)
at org.apache.flink.runtime.scheduler.DefaultScheduler.maybeHandleTaskFailure(DefaultScheduler.java:242)
at org.apache.flink.runtime.scheduler.DefaultScheduler.updateTaskExecutionStateInternal(DefaultScheduler.java:233)
at org.apache.flink.runtime.scheduler.SchedulerBase.updateTaskExecutionState(SchedulerBase.java:684)
at org.apache.flink.runtime.scheduler.SchedulerNG.updateTaskExecutionState(SchedulerNG.java:79)
at org.apache.flink.runtime.jobmaster.JobMaster.updateTaskExecutionState(JobMaster.java:444)
at sun.reflect.GeneratedMethodAccessor14.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.lambda$handleRpcInvocation$1(AkkaRpcActor.java:316)
at org.apache.flink.runtime.concurrent.akka.ClassLoadingUtils.runWithContextClassLoader(ClassLoadingUtils.java:83)
at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcInvocation(AkkaRpcActor.java:314)
at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleRpcMessage(AkkaRpcActor.java:217)
at org.apache.flink.runtime.rpc.akka.FencedAkkaRpcActor.handleRpcMessage(FencedAkkaRpcActor.java:78)
at org.apache.flink.runtime.rpc.akka.AkkaRpcActor.handleMessage(AkkaRpcActor.java:163)
at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:24)
at akka.japi.pf.UnitCaseStatement.apply(CaseStatements.scala:20)
at scala.PartialFunction.applyOrElse(PartialFunction.scala:123)
at scala.PartialFunction.applyOrElse$(PartialFunction.scala:122)
at akka.japi.pf.UnitCaseStatement.applyOrElse(CaseStatements.scala:20)
at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:171)
at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:172)
at scala.PartialFunction$OrElse.applyOrElse(PartialFunction.scala:172)
at akka.actor.Actor.aroundReceive(Actor.scala:537)
at akka.actor.Actor.aroundReceive$(Actor.scala:535)
at akka.actor.AbstractActor.aroundReceive(AbstractActor.scala:220)
at akka.actor.ActorCell.receiveMessage$$$capture(ActorCell.scala:580)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala)
at akka.actor.ActorCell.invoke(ActorCell.scala:548)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:270)
at akka.dispatch.Mailbox.run(Mailbox.scala:231)
at akka.dispatch.Mailbox.exec(Mailbox.scala:243)
... 5 more
Caused by: org.apache.flink.statefun.flink.core.functions.StatefulFunctionInvocationException: An error occurred when attempting to invoke function FunctionType(com.github.f1xman.era.anomalydetection.device, DeviceFunction).
at org.apache.flink.statefun.flink.core.functions.StatefulFunction.receive(StatefulFunction.java:50)
at org.apache.flink.statefun.flink.core.functions.ReusableContext.apply(ReusableContext.java:74)
at org.apache.flink.statefun.flink.core.functions.LocalFunctionGroup.processNextEnvelope(LocalFunctionGroup.java:60)
at org.apache.flink.statefun.flink.core.functions.Reductions.processEnvelopes(Reductions.java:164)
at org.apache.flink.statefun.flink.core.functions.Reductions.apply(Reductions.java:149)
at org.apache.flink.statefun.flink.core.functions.FunctionGroupOperator.processElement(FunctionGroupOperator.java:90)
at org.apache.flink.streaming.runtime.tasks.CopyingChainingOutput.pushToOperator(CopyingChainingOutput.java:82)
at org.apache.flink.streaming.runtime.tasks.CopyingChainingOutput.collect(CopyingChainingOutput.java:57)
at org.apache.flink.streaming.runtime.tasks.CopyingChainingOutput.collect(CopyingChainingOutput.java:29)
at org.apache.flink.streaming.api.operators.CountingOutput.collect(CountingOutput.java:56)
at org.apache.flink.streaming.api.operators.CountingOutput.collect(CountingOutput.java:29)
at org.apache.flink.statefun.flink.core.feedback.FeedbackUnionOperator.sendDownstream(FeedbackUnionOperator.java:180)
at org.apache.flink.statefun.flink.core.feedback.FeedbackUnionOperator.processElement(FeedbackUnionOperator.java:86)
at org.apache.flink.streaming.runtime.tasks.OneInputStreamTask$StreamTaskNetworkOutput.emitRecord(OneInputStreamTask.java:233)
at org.apache.flink.streaming.runtime.io.AbstractStreamTaskNetworkInput.processElement(AbstractStreamTaskNetworkInput.java:134)
at org.apache.flink.streaming.runtime.io.AbstractStreamTaskNetworkInput.emitNext(AbstractStreamTaskNetworkInput.java:105)
at org.apache.flink.streaming.runtime.io.StreamOneInputProcessor.processInput(StreamOneInputProcessor.java:65)
at org.apache.flink.streaming.runtime.tasks.StreamTask.processInput(StreamTask.java:496)
at org.apache.flink.streaming.runtime.tasks.mailbox.MailboxProcessor.runMailboxLoop(MailboxProcessor.java:203)
at org.apache.flink.streaming.runtime.tasks.StreamTask.runMailboxLoop(StreamTask.java:809)
at org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:761)
at org.apache.flink.runtime.taskmanager.Task.runWithSystemExitMonitoring(Task.java:958)
at org.apache.flink.runtime.taskmanager.Task.restoreAndInvoke(Task.java:937)
at org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:766)
at org.apache.flink.runtime.taskmanager.Task.run(Task.java:575)
at java.lang.Thread.run(Thread.java:750)
Caused by: java.lang.ClassCastException: java.lang.String cannot be cast to org.apache.flink.statefun.sdk.reqreply.generated.TypedValue
at org.apache.flink.statefun.flink.core.reqreply.RequestReplyFunction.invoke(RequestReplyFunction.java:118)
at org.apache.flink.statefun.flink.core.functions.StatefulFunction.receive(StatefulFunction.java:48)
... 25 more
Though, sending a String value to an embedded function works well. The workaround I've found is to wrap the value with TypedValue:
.withMessageBody(TypedValue.newBuilder()
.setValue(ByteString.copyFrom(name, StandardCharsets.UTF_8))
.setHasValue(true)
.setTypename("example/Name")
.build()
)
This approach requires the receiver function to unwrap the TypedValue and deserialize the ByteString. It looks too low-level for this kind of API. I believe this is the wrong usage of Stateful Function's SDK for Flink DataStream Integration. What is the correct way to implement Flink DataStream and remote Stateful Functions interoperability?
The job is inspired by the official examples.

Google Cloud Storage Not Always Downloading File

I'm trying to create a Spring Boot application which uses the GCP Pub/Sub binder with Spring Cloud Streams to listen for an event GCP posts to a topic when a file is loaded into a specific bucket. The spring boot application would then need to take the file referenced in the pub/sub message, download it to the temp directory, process it, then delete it from local storage. It successfully processes 371 files all dumped into the directory, but there were 137 files that all threw the same error. The following is the method throwing the exception, and the exception itself
private Optional<List<SingleImport>> load(StorageObjectMessage storageObjectMessage, Storage storage) throws JAXBException {
ImportLoader loader = new ImportLoader();
FileConverter converter = new FileConverter();
Blob blob = storage.get(blobId);
Optional<List<SingleImport>> result = Optional.empty();
log.info(String.format("%s blob with id of %s", blob == null ? "Could not find" : "Found", blobId.getName()));
if(blob != null) {
String systemTemporaryDirectory = String.format("%s%s.tmp", System.getProperty("java.io.tmpdir"), storageObjectMessage.getMd5Hash());
log.info(String.format("Downloading file for processing to temp directory at %s", systemTemporaryDirectory));
blob.downloadTo(Paths.get(systemTemporaryDirectory));
File tempFile = new File(systemTemporaryDirectory);
Import importModel = loader.load(tempFile);
List<SingleImport> singleImport = converter.convert(importModel);
result = Optional.of(singleImport);
}
return result;
}
2022-03-17 10:43:43.380 INFO 39483 --- [sub-subscriber1] c.h.mro2go.sap.service.FileService : Found blob with id of update/inbound/cat_upd_20220104124110-00028.xml
2022-03-17 10:43:43.380 INFO 39483 --- [sub-subscriber1] c.h.mro2go.sap.service.FileService : Downloading file for processing to temp directory at /var/folders/dk/4k5hcmcj1zv43kx0pgpys8j80000gq/T/jrrpW/f1JEx0uFQDq2xsVQ==.tmp
com.google.cloud.storage.StorageException: /var/folders/dk/4k5hcmcj1zv43kx0pgpys8j80000gq/T/jrrpW/f1JEx0uFQDq2xsVQ==.tmp
at com.google.cloud.storage.Blob.downloadTo(Blob.java:237)
at com.google.cloud.storage.Blob.downloadTo(Blob.java:274)
at com.hdsupply.mro2go.sap.service.FileService.load(FileService.java:90)
at com.hdsupply.mro2go.sap.service.FileService.retrieveNewFile(FileService.java:42)
at com.hdsupply.mro2go.sap.message.SapEventConsumer.processFile(SapEventConsumer.java:56)
at com.hdsupply.mro2go.sap.message.SapEventConsumer.accept(SapEventConsumer.java:35)
at com.hdsupply.mro2go.sap.message.SapEventConsumer.accept(SapEventConsumer.java:1)
at org.springframework.cloud.function.context.catalog.SimpleFunctionRegistry$FunctionInvocationWrapper.invokeConsumer(SimpleFunctionRegistry.java:975)
at org.springframework.cloud.function.context.catalog.SimpleFunctionRegistry$FunctionInvocationWrapper.doApply(SimpleFunctionRegistry.java:704)
at org.springframework.cloud.function.context.catalog.SimpleFunctionRegistry$FunctionInvocationWrapper.apply(SimpleFunctionRegistry.java:550)
at org.springframework.cloud.stream.function.PartitionAwareFunctionWrapper.apply(PartitionAwareFunctionWrapper.java:84)
at org.springframework.cloud.stream.function.FunctionConfiguration$FunctionWrapper.apply(FunctionConfiguration.java:749)
at org.springframework.cloud.stream.function.FunctionConfiguration$FunctionToDestinationBinder$1.handleMessageInternal(FunctionConfiguration.java:581)
at org.springframework.integration.handler.AbstractMessageHandler.handleMessage(AbstractMessageHandler.java:56)
at org.springframework.integration.dispatcher.AbstractDispatcher.tryOptimizedDispatch(AbstractDispatcher.java:115)
at org.springframework.integration.dispatcher.UnicastingDispatcher.doDispatch(UnicastingDispatcher.java:133)
at org.springframework.integration.dispatcher.UnicastingDispatcher.dispatch(UnicastingDispatcher.java:106)
at org.springframework.integration.channel.AbstractSubscribableChannel.doSend(AbstractSubscribableChannel.java:72)
at org.springframework.integration.channel.AbstractMessageChannel.send(AbstractMessageChannel.java:317)
at org.springframework.integration.channel.AbstractMessageChannel.send(AbstractMessageChannel.java:272)
at org.springframework.messaging.core.GenericMessagingTemplate.doSend(GenericMessagingTemplate.java:187)
at org.springframework.messaging.core.GenericMessagingTemplate.doSend(GenericMessagingTemplate.java:166)
at org.springframework.messaging.core.GenericMessagingTemplate.doSend(GenericMessagingTemplate.java:47)
at org.springframework.messaging.core.AbstractMessageSendingTemplate.send(AbstractMessageSendingTemplate.java:109)
at org.springframework.integration.endpoint.MessageProducerSupport.sendMessage(MessageProducerSupport.java:208)
at com.google.cloud.spring.pubsub.integration.inbound.PubSubInboundChannelAdapter.consumeMessage(PubSubInboundChannelAdapter.java:144)
at com.google.cloud.spring.pubsub.core.subscriber.PubSubSubscriberTemplate.lambda$subscribeAndConvert$1(PubSubSubscriberTemplate.java:173)
at com.google.cloud.pubsub.v1.MessageDispatcher$4.run(MessageDispatcher.java:396)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
at java.base/java.lang.Thread.run(Thread.java:833)
Caused by: java.nio.file.NoSuchFileException: /var/folders/dk/4k5hcmcj1zv43kx0pgpys8j80000gq/T/jrrpW/f1JEx0uFQDq2xsVQ==.tmp
at java.base/sun.nio.fs.UnixException.translateToIOException(UnixException.java:92)
at java.base/sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:106)
at java.base/sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:111)
at java.base/sun.nio.fs.UnixFileSystemProvider.newByteChannel(UnixFileSystemProvider.java:218)
at java.base/java.nio.file.spi.FileSystemProvider.newOutputStream(FileSystemProvider.java:484)
at java.base/java.nio.file.Files.newOutputStream(Files.java:228)
at com.google.cloud.storage.Blob.downloadTo(Blob.java:234)
... 33 more
What might be causing this to happen at all, let alone only some of the time?

Couchbase + Scala + Java SDK : IndexOutOfBounds

I am relatively new to scala and to couchbase, but I need to learn both fast. Recently while trying to run a sample application using the Java couchbase SDK through Scala I have run into the following problem.
[cb-core-3-2] WARN com.couchbase.client.core.CouchbaseCore - Exception while Handling Request Events RequestEvent{request=null}
java.lang.IndexOutOfBoundsException: Index: 1854, Size: 0
at java.util.ArrayList.rangeCheck(ArrayList.java:653)
at java.util.ArrayList.get(ArrayList.java:429)
at com.couchbase.client.core.config.DefaultCouchbaseBucketConfig.nodeIndexForMaster(DefaultCouchbaseBucketConfig.java:135)
at com.couchbase.client.core.node.locate.KeyValueLocator.calculateNodeId(KeyValueLocator.java:165)
at com.couchbase.client.core.node.locate.KeyValueLocator.locateForCouchbaseBucket(KeyValueLocator.java:124)
at com.couchbase.client.core.node.locate.KeyValueLocator.locateAndDispatch(KeyValueLocator.java:84)
at com.couchbase.client.core.RequestHandler.dispatchRequest(RequestHandler.java:219)
at com.couchbase.client.core.RequestHandler.onEvent(RequestHandler.java:176)
at com.couchbase.client.core.RequestHandler.onEvent(RequestHandler.java:71)
at com.couchbase.client.deps.com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:129)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:137)
at java.lang.Thread.run(Thread.java:745)
[error] (run-main-0) java.lang.RuntimeException: java.util.concurrent.TimeoutException
java.lang.RuntimeException: java.util.concurrent.TimeoutException
at com.couchbase.client.java.util.Blocking.blockForSingle(Blocking.java:71)
at com.couchbase.client.java.CouchbaseBucket.upsert(CouchbaseBucket.java:354)
at com.couchbase.client.java.CouchbaseBucket.upsert(CouchbaseBucket.java:349)
at App$.main(Application.scala:28)
at App.main(Application.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
Caused by: java.util.concurrent.TimeoutException
at com.couchbase.client.java.util.Blocking.blockForSingle(Blocking.java:71)
at com.couchbase.client.java.CouchbaseBucket.upsert(CouchbaseBucket.java:354)
at com.couchbase.client.java.CouchbaseBucket.upsert(CouchbaseBucket.java:349)
at App$.main(Application.scala:28)
at App.main(Application.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
[trace] Stack trace suppressed: run last compile:run for the full output.
[cb-core-3-1] WARN com.couchbase.client.core.CouchbaseCore - Exception while Handling Response Events null
java.lang.InterruptedException
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:2014)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2048)
at com.couchbase.client.deps.com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:45)
at com.couchbase.client.deps.com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56)
at com.couchbase.client.deps.com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:124)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:137)
at java.lang.Thread.run(Thread.java:745)
[cb-core-3-2] WARN com.couchbase.client.core.CouchbaseCore - Exception while Handling Request Events RequestEvent{request=null}
java.lang.InterruptedException
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:2014)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2048)
at com.couchbase.client.deps.com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:45)
at com.couchbase.client.deps.com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56)
at com.couchbase.client.deps.com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:124)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:137)
at java.lang.Thread.run(Thread.java:745)
java.lang.RuntimeException: Nonzero exit code: 1
at scala.sys.package$.error(package.scala:27)
And this is the code that generated the error
import com.couchbase.client.java._
import com.couchbase.client.core.time._
import com.couchbase.client.java.document._
import com.couchbase.client.java.document.json._
import com.couchbase.client.java.query._
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment
object App {
def main(args: Array[String]): Unit = {
// Initialize the Connection
// Connects to localhost
val env = DefaultCouchbaseEnvironment.builder()
.connectTimeout(5000)
.bootstrapCarrierEnabled(false)
.build()
val cluster = CouchbaseCluster.create(env, "127.0.0.1")
// Opens the "default" bucket
val bucket = cluster.openBucket("default")
// Create a JSON Document
val user: JsonObject = JsonObject.create()
.put("firstname", "Walter")
.put("lastname", "White")
.put("job", "chemistry teacher")
.put("age", 50)
val stored: JsonDocument = bucket.upsert(JsonDocument.create("walter", user));
// Load the Document and print it
// Prints Content and Metadata of the stored Document
println(bucket.get("walter"))
// Just close a single bucket
bucket.close();
// Disconnect and close all buckets
cluster.disconnect();
}
}
EDIT:
I am a little new to this but here is what I managed to get out of the debugger.
this = {DefaultCouchbaseBucketConfig#2385} "DefaultCouchbaseBucketConfig{name='testBucket', locator=VBUCKET, uri='/pools/default/buckets/testBucket?bucket_uuid=54c1356c57dea1d640837c678f87d5e4', streamingUri='/pools/default/bucketsStreaming/testBucket?bucket_uuid=54c1356c57dea1d640837c678f87d5e4', nodeInfo=[NodeInfo{, hostname=localhost/127.0.0.1, configPort=0, directServices={CONFIG=8091, QUERY=8093, VIEW=8092, BINARY=11210}, sslServices={CONFIG=18091, QUERY=18093, VIEW=18092, BINARY=11207}}], partitionInfo=PartitionInfo{numberOfReplicas=1, partitionHosts=[localhost], partitions=[], tainted=false}, tainted=false, rev=23}"
partitionInfo = {CouchbasePartitionInfo#2391} "PartitionInfo{numberOfReplicas=1, partitionHosts=[localhost], partitions=[], tainted=false}"
numberOfReplicas = 1
partitionHosts = {String[1]#2428}
partitions = {ArrayList#2390} size = 0
forwardPartitions = null
tainted = false
partitionHosts = {ArrayList#2396} size = 1
0 = {DefaultNodeInfo#2425} "NodeInfo{, hostname=localhost/127.0.0.1, configPort=8091, directServices={CONFIG=8091, BINARY=11210, VIEW=8092}, sslServices={}}"
nodesWithPrimaryPartitions = {HashSet#2397} size = 0
tainted = false
rev = 23
name = "testBucket"
value = {char[10]#2422}
hash = 1241531676
password = ""
value = {char[0]#2421}
hash = 0
locator = {BucketNodeLocator#2400} "VBUCKET"
name = "VBUCKET"
ordinal = 0
uri = "/pools/default/buckets/testBucket?bucket_uuid=54c1356c57dea1d640837c678f87d5e4"
value = {char[78]#2411}
hash = 0
streamingUri = "/pools/default/bucketsStreaming/testBucket?bucket_uuid=54c1356c57dea1d640837c678f87d5e4"
value = {char[87]#2420}
hash = 0
nodeInfo = {ArrayList#2403} size = 1
0 = {DefaultNodeInfo#2408} "NodeInfo{, hostname=localhost/127.0.0.1, configPort=0, directServices={CONFIG=8091, QUERY=8093, VIEW=8092, BINARY=11210}, sslServices={CONFIG=18091, QUERY=18093, VIEW=18092, BINARY=11207}}"
enabledServices = 15
partition = 7620
useFastForward = false
EDIT 2:
I had a look at the Couchbase Console log and I am constantly getting the following
Service 'memcached' exited with status 1. Restarting. Messages: Failed to open library "/Users/luishreis/Downloads/couchbase-server-enterprise_4/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so": dlopen(/Users/luishreis/Downloads/couchbase-server-enterprise_4/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.dylib, 6): image not found
Unable to load extension /Users/luishreis/Downloads/couchbase-server-enterprise_4/Couchbase Server.app/Contents/Resources/couchbase-core/lib/memcached/stdin_term_handler.so using the config
Any help on the matter would be appreciated.
Many thanks.

Javasist throws javassist.CannotCompileException when instrumenting setString method of org.h2.jdbc.JdbcPreparedStatement

um trying to instrument jdbc methods while a server is running. I have tried it by instrumenting setString, setInt methods and executeQuery method while a simple mysql query is running as it is given in JDBC examples. It works totally fine when i instrument that setString method by injecting following line.
private void injectSetVariableMethods(CtMethod method) {
if (isInEnum(method.getName().toUpperCase(), SetMethods.class)) {
try {
method.insertAt(1, true,
"javaagent.JDBCPublisher.fillArrayList(String.valueOf($2), " +
"Thread.currentThread().getStackTrace()[1].getMethodName().toUpperCase());"
);
} catch (CannotCompileException e) {
e.printStackTrace();
}
}
}
But now, when i run it with the server which use h2, it gives the following exception.
javassist.CannotCompileException: by javassist.bytecode.BadBytecode: setString (ILjava/lang/String;)V in org.h2.jdbc.JdbcPreparedStatement: failed to resolve types
at javassist.CtBehavior.insertAt(CtBehavior.java:1210)
at javaagent.JDBCTransformer.injectSetVariableMethods(JDBCClassTransformer.java:212)
at javaagent.JDBCTransformer.transform(JDBCClassTransformer.java:99)
at sun.instrument.TransformerManager.transform(TransformerManager.java:188)
at sun.instrument.InstrumentationImpl.transform(InstrumentationImpl.java:424)
at java.lang.ClassLoader.defineClass1(Native Method)
at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
at org.eclipse.osgi.internal.baseadaptor.DefaultClassLoader.defineClass(DefaultClassLoader.java:188)
at org.eclipse.osgi.baseadaptor.loader.ClasspathManager.defineClassHoldingLock(ClasspathManager.java:638)
at org.eclipse.osgi.baseadaptor.loader.ClasspathManager.defineClass(ClasspathManager.java:613)
at org.eclipse.osgi.baseadaptor.loader.ClasspathManager.findClassImpl(ClasspathManager.java:574)
at org.eclipse.osgi.baseadaptor.loader.ClasspathManager.findLocalClassImpl(ClasspathManager.java:492)
at org.eclipse.osgi.baseadaptor.loader.ClasspathManager.findLocalClass(ClasspathManager.java:465)
at org.eclipse.osgi.internal.baseadaptor.DefaultClassLoader.findLocalClass(DefaultClassLoader.java:216)
at org.eclipse.osgi.internal.loader.BundleLoader.findLocalClass(BundleLoader.java:395)
at org.eclipse.osgi.internal.loader.BundleLoader.findClassInternal(BundleLoader.java:464)
at org.eclipse.osgi.internal.loader.BundleLoader.findClass(BundleLoader.java:421)
at org.eclipse.osgi.internal.loader.BundleLoader.findClass(BundleLoader.java:412)
at org.eclipse.osgi.internal.baseadaptor.DefaultClassLoader.loadClass(DefaultClassLoader.java:107)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
at org.h2.jdbc.JdbcConnection.prepareStatement(JdbcConnection.java:234)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.tomcat.jdbc.pool.ProxyConnection.invoke(ProxyConnection.java:126)
at org.apache.tomcat.jdbc.pool.JdbcInterceptor.invoke(JdbcInterceptor.java:109)
at org.carbon.ndatasource.rdbms.ConnectionRollbackOnReturnInterceptor.invoke(ConnectionRollbackOnReturnInterceptor.java:51)
at org.apache.tomcat.jdbc.pool.JdbcInterceptor.invoke(JdbcInterceptor.java:109)
at org.apache.tomcat.jdbc.pool.interceptor.AbstractCreateStatementInterceptor.invoke(AbstractCreateStatementInterceptor.java:67)
at org.apache.tomcat.jdbc.pool.JdbcInterceptor.invoke(JdbcInterceptor.java:109)
at org.apache.tomcat.jdbc.pool.interceptor.ConnectionState.invoke(ConnectionState.java:153)
at org.apache.tomcat.jdbc.pool.JdbcInterceptor.invoke(JdbcInterceptor.java:109)
at org.apache.tomcat.jdbc.pool.TrapException.invoke(TrapException.java:41)
at org.apache.tomcat.jdbc.pool.JdbcInterceptor.invoke(JdbcInterceptor.java:109)
at org.apache.tomcat.jdbc.pool.DisposableConnectionFacade.invoke(DisposableConnectionFacade.java:80)
at com.sun.proxy.$Proxy18.prepareStatement(Unknown Source)
at org.carbon.user.core.claim.dao.ClaimDAO.getDialectCount(ClaimDAO.java:160)
at org.carbon.user.core.common.DefaultRealm.populateProfileAndClaimMaps(DefaultRealm.java:429)
at org.carbon.user.core.common.DefaultRealm.init(DefaultRealm.java:105)
at org.carbon.user.core.common.DefaultRealmService.initializeRealm(DefaultRealmService.java:230)
at org.wso2.carbon.user.core.common.DefaultRealmService.<init>(DefaultRealmService.java:96)
at org.wso2.carbon.user.core.common.DefaultRealmService.<init>(DefaultRealmService.java:109)
at org.carbon.user.core.internal.Activator.startDeploy(Activator.java:68)
at org.wso2.carbon.user.core.internal.BundleCheckActivator.start(BundleCheckActivator.java:61)
at org.eclipse.osgi.framework.internal.core.BundleContextImpl$1.run(BundleContextImpl.java:711)
at java.security.AccessController.doPrivileged(Native Method)
at org.eclipse.osgi.framework.internal.core.BundleContextImpl.startActivator(BundleContextImpl.java:702)
at org.eclipse.osgi.framework.internal.core.BundleContextImpl.start(BundleContextImpl.java:683)
at org.eclipse.osgi.framework.internal.core.BundleHost.startWorker(BundleHost.java:381)
at org.eclipse.osgi.framework.internal.core.AbstractBundle.resume(AbstractBundle.java:390)
at org.eclipse.osgi.framework.internal.core.Framework.resumeBundle(Framework.java:1176)
at org.eclipse.osgi.framework.internal.core.StartLevelManager.resumeBundles(StartLevelManager.java:559)
at org.eclipse.osgi.framework.internal.core.StartLevelManager.resumeBundles(StartLevelManager.java:544)
at org.eclipse.osgi.framework.internal.core.StartLevelManager.incFWSL(StartLevelManager.java:457)
at org.eclipse.osgi.framework.internal.core.StartLevelManager.doSetStartLevel(StartLevelManager.java:243)
at org.eclipse.osgi.framework.internal.core.StartLevelManager.dispatchEvent(StartLevelManager.java:438)
at org.eclipse.osgi.framework.internal.core.StartLevelManager.dispatchEvent(StartLevelManager.java:1)
at org.eclipse.osgi.framework.eventmgr.EventManager.dispatchEvent(EventManager.java:230)
at org.eclipse.osgi.framework.eventmgr.EventManager$EventThread.run(EventManager.java:340)
Caused by: javassist.bytecode.BadBytecode: setString (ILjava/lang/String;)V in org.h2.jdbc.JdbcPreparedStatement: failed to resolve types
at javassist.bytecode.stackmap.MapMaker.make(MapMaker.java:111)
at javassist.bytecode.MethodInfo.rebuildStackMap(MethodInfo.java:423)
at javassist.bytecode.MethodInfo.rebuildStackMapIf6(MethodInfo.java:405)
at javassist.CtBehavior.insertAt(CtBehavior.java:1200)
... 59 more
Caused by: javassist.bytecode.BadBytecode: failed to resolve types
at javassist.bytecode.stackmap.MapMaker.make(MapMaker.java:169)
at javassist.bytecode.stackmap.MapMaker.make(MapMaker.java:108)
... 62 more
Caused by: javassist.NotFoundException: org.h2.value.ValueNull
at javassist.ClassPool.get(ClassPool.java:450)
at javassist.bytecode.stackmap.TypeData$TypeVar.fixTypes2(TypeData.java:345)
at javassist.bytecode.stackmap.TypeData$TypeVar.fixTypes(TypeData.java:330)
at javassist.bytecode.stackmap.TypeData$TypeVar.dfs(TypeData.java:274)
at javassist.bytecode.stackmap.MapMaker.fixTypes(MapMaker.java:394)
at javassist.bytecode.stackmap.MapMaker.make(MapMaker.java:167)
... 63 more
What i am doing with my fillArrayList method is, um passing those values into a ArrayList by checking the method name and adding '' for values set with setString. But it looks like it is instrumenting that method at somepoint, because i am getting the reimplemented queries with '?' replaced with respective values (strings with '' and ints as normal). But once the server has started it throws another set of exceptions which also involve h2.
[2015-10-13 17:18:56,600] ERROR {org.carbon.registry.core.jdbc.dao.JDBCLogsDAO} - Failed to get logs. General error: "java.lang.IndexOutOfBoundsException: Index: 1, Size: 1" [50000-140]
org.h2.jdbc.JdbcSQLException: General error: "java.lang.IndexOutOfBoundsException: Index: 1, Size: 1" [50000-140]
at org.h2.message.DbException.getJdbcSQLException(DbException.java:327)
at org.h2.message.DbException.get(DbException.java:156)
at org.h2.message.DbException.convert(DbException.java:279)
at org.h2.message.DbException.toSQLException(DbException.java:252)
at org.h2.message.TraceObject.logAndConvert(TraceObject.java:386)
at org.h2.jdbc.JdbcPreparedStatement.executeQuery(JdbcPreparedStatement.java:104)
at org.carbon.registry.core.jdbc.dao.JDBCLogsDAO.internalGetLogs(JDBCLogsDAO.java:427)
at org.carbon.registry.core.jdbc.dao.JDBCLogsDAO.getLogList(JDBCLogsDAO.java:317)
at org.carbon.registry.core.jdbc.EmbeddedRegistry.getLogs(EmbeddedRegistry.java:2332)
at org.carbon.registry.core.caching.CacheBackedRegistry.getLogs(CacheBackedRegistry.java:402)
at org.carbon.registry.core.session.UserRegistry.getLogsInternal(UserRegistry.java:1806)
at org.carbon.registry.core.session.UserRegistry.access$3600(UserRegistry.java:60)
at org.carbon.registry.core.session.UserRegistry$37.run(UserRegistry.java:1777)
at org.carbon.registry.core.session.UserRegistry$37.run(UserRegistry.java:1774)
at java.security.AccessController.doPrivileged(Native Method)
at org.carbon.registry.core.session.UserRegistry.getLogs(UserRegistry.java:1774)
at org.carbon.registry.indexing.ResourceSubmitter.submitResource(ResourceSubmitter.java:119)
at org.carbon.registry.indexing.ResourceSubmitter.run(ResourceSubmitter.java:76)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:304)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:178)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IndexOutOfBoundsException: Index: 1, Size: 1
at java.util.ArrayList.rangeCheck(ArrayList.java:635)
at java.util.ArrayList.get(ArrayList.java:411)
at javaagent.JDBCPublisher.getArrayList(JDBCAgentPublisher.java:151)
at javaagent.JDBCPublisher.modifyOriginalQuery(JDBCAgentPublisher.java:351)
at org.h2.jdbc.JdbcPreparedStatement.executeQuery(JdbcPreparedStatement.java:84)
... 19 more
[2015-10-13 17:18:56,601] WARN {org.carbon.registry.indexing.ResourceSubmitter} - An error occurred while submitting resources for indexing
org.carbon.registry.core.exceptions.RegistryException: Failed to get logs. General error: "java.lang.IndexOutOfBoundsException: Index: 1, Size: 1" [50000-140]
at org.carbon.registry.core.jdbc.dao.JDBCLogsDAO.internalGetLogs(JDBCLogsDAO.java:465)
at org.carbon.registry.core.jdbc.dao.JDBCLogsDAO.getLogList(JDBCLogsDAO.java:317)
at org.carbon.registry.core.jdbc.EmbeddedRegistry.getLogs(EmbeddedRegistry.java:2332)
at org.carbon.registry.core.caching.CacheBackedRegistry.getLogs(CacheBackedRegistry.java:402)
at org.carbon.registry.core.session.UserRegistry.getLogsInternal(UserRegistry.java:1806)
at org.carbon.registry.core.session.UserRegistry.access$3600(UserRegistry.java:60)
at org.wso2.carbon.registry.core.session.UserRegistry$37.run(UserRegistry.java:1777)
at org.carbon.registry.core.session.UserRegistry$37.run(UserRegistry.java:1774)
at java.security.AccessController.doPrivileged(Native Method)
at org.carbon.registry.core.session.UserRegistry.getLogs(UserRegistry.java:1774)
at org.carbon.registry.indexing.ResourceSubmitter.submitResource(ResourceSubmitter.java:119)
at org.carbon.registry.indexing.ResourceSubmitter.run(ResourceSubmitter.java:76)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:304)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:178)
at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: org.h2.jdbc.JdbcSQLException: General error: "java.lang.IndexOutOfBoundsException: Index: 1, Size: 1" [50000-140]
at org.h2.message.DbException.getJdbcSQLException(DbException.java:327)
at org.h2.message.DbException.get(DbException.java:156)
at org.h2.message.DbException.convert(DbException.java:279)
at org.h2.message.DbException.toSQLException(DbException.java:252)
at org.h2.message.TraceObject.logAndConvert(TraceObject.java:386)
at org.h2.jdbc.JdbcPreparedStatement.executeQuery(JdbcPreparedStatement.java:104)
at org.wso2.carbon.registry.core.jdbc.dao.JDBCLogsDAO.internalGetLogs(JDBCLogsDAO.java:427)
... 18 more
Caused by: java.lang.IndexOutOfBoundsException: Index: 1, Size: 1
at java.util.ArrayList.rangeCheck(ArrayList.java:635)
at java.util.ArrayList.get(ArrayList.java:411)
at javaagent.JDBCPublisher.getArrayList(JDBCAgentPublisher.java:151)
at javaagent.JDBCPublisher.modifyOriginalQuery(JDBCAgentPublisher.java:351)
at org.h2.jdbc.JdbcPreparedStatement.executeQuery(JdbcPreparedStatement.java:84)
... 19 more
It throws bunch of IndexOutOfBoundExceptions repeatedly with a properly assigned sql query. What could have making this issue..... what should i do to correct this?
Find below a stripped down example, which prints the values of each invocation of the method org.h2.jdbc.JdbcPreparedStatement.setString(int, String).
Following directory structure and content is assumed:
./instrumented/
h2-1.4.186.jar
javassist-3.7.ga.jar
SetStringDemo.java
execution of the example
javac -cp javassist-3.7.ga.jar;h2-1.4.186.jar SetStringDemo.java
java -cp .;instrumented/;javassist-3.7.ga.jar;h2-1.4.186.jar SetStringDemo
output
instrument class org.h2.jdbc.JdbcPreparedStatement
create test database and insert some rows
idx: 2 value: 'name 0'
idx: 2 value: 'name 1'
idx: 2 value: 'name 2'
idx: 2 value: 'name 3'
idx: 2 value: 'name 4'
So the problem is most probably in the way you instrument the class.
Code used for the example.
import java.nio.file.Files;
import java.nio.file.Paths;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import javassist.ClassPool;
import javassist.CtClass;
import javassist.CtMethod;
public class SetStringDemo {
// exception handling left out for the PoC
public static void main(String[] args) throws Exception {
if (Files.deleteIfExists(
Paths.get("instrumented/org/h2/jdbc/JdbcPreparedStatement.class")
)) {
System.out.println("previously instrumented class removed");
}
System.out.println("instrument class org.h2.jdbc.JdbcPreparedStatement");
ClassPool pool = ClassPool.getDefault();
CtClass clazz = pool.get("org.h2.jdbc.JdbcPreparedStatement");
CtMethod method = clazz.getDeclaredMethod("setString");
method.insertAt(1,
"System.out.println(\"idx: \" + $1 + \" value: '\" + $2 + \"'\");"
);
clazz.writeFile("instrumented/");
System.out.println("create test database and insert some rows");
try (Connection conn = DriverManager.getConnection("jdbc:h2:mem:", "sa", "")) {
String createTable = "CREATE TABLE TEST_TABLE(ID INT, NAME VARCHAR(1024))";
conn.createStatement().executeUpdate(createTable);
String insertSql = "INSERT INTO TEST_TABLE VALUES(?, ?)";
try (PreparedStatement insertStmnt = conn.prepareStatement(insertSql)) {
for (int i = 0; i < 5; i++) {
insertStmnt.setInt(1, i);
insertStmnt.setString(2, "name " + i);
insertStmnt.executeUpdate();
}
}
}
}
}

java.lang.RuntimeException: java.net.SocketTimeoutException: Read timed out

I am developing a project using Eclipse Java EE, Maven and Geotools. This is the part of the code that I am going to talk about:
Map connectionParameters = new HashMap();
// Connection parameters
connectionParameters.put(WFSDataStoreFactory.URL.key, getCapabilities );
connectionParameters.put(WFSDataStoreFactory.PROTOCOL.key, false );
connectionParameters.put(WFSDataStoreFactory.LENIENT.key, true );
connectionParameters.put(WFSDataStoreFactory.MAXFEATURES.key, 30);
connectionParameters.put(WFSDataStoreFactory.TIMEOUT.key, 600000);
try { // The WFSDataStoreFactory dsf is already created before
WFSDataStore dataStore = dsf.createDataStore(connectionParameters);
// We get the source and then the features from it
SimpleFeatureSource source = dataStore.getFeatureSource("gmgml_AREAOBRA");
FeatureCollection<SimpleFeatureType, SimpleFeature> fc = source.getFeatures();
// We try to go one by one and print to see if it really exists
while(fc.features().hasNext()){
SimpleFeature sf = fc.features().next();
System.out.println(sf.getAttribute("IDOBRA")); } // It crashes
The thing is that I read every post about the next error that gives me after crashing:
> SEVERE: Failed to execute request http://mapa20.ewise.es/WFS_EGIOS_SITUATIONROOM/
service.svc/get?TYPENAME=gmgml%3AAREAOBRA&REQUEST=GetFeature&OUTPUTFORMAT=text%2
Fxml%3B+subtype%3Dgml%2F3.1.1&VERSION=1.1.0&SERVICE=WFS
java.lang.RuntimeException: java.net.SocketTimeoutException: Read timed out
at org.geotools.data.store.ContentFeatureCollection.features(ContentFeatureColl
ection.java:176)
at org.geotools.data.store.ContentFeatureCollection.features(ContentFeatureColl
ection.java:58)
at com.sitep.imi.acefat.server.daemon.InsertarBBDDDaemon.dataAccess(InsertarBBD
DDaemon.java:229)
at com.sitep.imi.acefat.server.daemon.InsertarBBDDDaemon.insertData(InsertarBBD
DDaemon.java:98)
at com.sitep.imi.acefat.App.main(App.java:17)
Caused by: java.net.SocketTimeoutException: Read timed out
at java.net.SocketInputStream.socketRead0(Native Method)
at java.net.SocketInputStream.socketRead(SocketInputStream.java:116)
at java.net.SocketInputStream.read(SocketInputStream.java:170)
at java.net.SocketInputStream.read(SocketInputStream.java:141)
at java.io.BufferedInputStream.fill(BufferedInputStream.java:246)
at java.io.BufferedInputStream.read1(BufferedInputStream.java:286)
at java.io.BufferedInputStream.read(BufferedInputStream.java:345)
at sun.net.www.http.HttpClient.parseHTTPHeader(HttpClient.java:704)
at sun.net.www.http.HttpClient.parseHTTP(HttpClient.java:647)
at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnectio
n.java:1535)
at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection
.java:1440)
at org.geotools.data.ows.SimpleHttpClient$SimpleHTTPResponse.<init>(SimpleHttpC
lient.java:171)
at org.geotools.data.ows.SimpleHttpClient.get(SimpleHttpClient.java:102)
at org.geotools.data.ows.AbstractOpenWebService.internalIssueRequest(AbstractOp
enWebService.java:426)
at org.geotools.data.wfs.internal.WFSClient.internalIssueRequest(WFSClient.java
:286)
at org.geotools.data.wfs.internal.WFSClient.issueRequest(WFSClient.java:326)
at org.geotools.data.wfs.WFSFeatureSource.getReaderInternal(WFSFeatureSource.ja
va:256)
at org.geotools.data.store.ContentFeatureSource.getReader(ContentFeatureSource.
java:634)
at org.geotools.data.store.ContentFeatureCollection.features(ContentFeatureColl
ection.java:173)
But I cannot find a specific answer for my problem.

Categories

Resources