QUARTZ Job Scheduler - JobListener issue [JAVA] - java

I just set up QUARTZ to use with our enterprise applications. The following code snippets are just examples and not taken from the real web applications.
My Trigger/Scheduler class looks like this:
import javax.annotation.PostConstruct;
import org.quartz.Scheduler;
import org.quartz.SchedulerException;
import org.quartz.SchedulerFactory;
import org.quartz.impl.StdSchedulerFactory;
import org.quartz.impl.matchers.GroupMatcher;
public class TriggerXML {
#PostConstruct
public static void main(String[] args) throws SchedulerException {
SchedulerFactory factory = new StdSchedulerFactory();
Scheduler scheduler = factory.getScheduler();
scheduler.getListenerManager().addJobListener(new HelloJobListener(),
GroupMatcher.jobGroupEquals("fg_jobgroup_01"));
scheduler.start();
}
}
My Listener class looks like this:
import org.apache.log4j.Logger;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.quartz.JobListener;
public class HelloJobListener implements JobListener {
public static final String LISTENER_NAME = "HELLO JOB LISTENER";
private static final Logger log = Logger.getLogger(HelloJobListener.class);
#Override
public String getName() {
return LISTENER_NAME;
}
#Override
public void jobToBeExecuted(JobExecutionContext context) {
String jobName = context.getJobDetail().getKey().toString();
log.info("###############################################");
log.info("JOB IS STARTING");
log.info("Job: " + jobName);
log.info("###############################################");
}
#Override
public void jobExecutionVetoed(JobExecutionContext context) {
log.info("###############################################");
log.info("JOB EXECUTION VETOED");
log.info("###############################################");
}
#Override
public void jobWasExecuted(JobExecutionContext context,
JobExecutionException jobException) {
String jobName = context.getJobDetail().getKey().toString();
log.info("###############################################");
log.info("JOB WAS EXECUTED");
log.info("Job: " + jobName);
log.info("###############################################");
if (!jobException.getMessage().equals("")) {
log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
log.info("Exception thrown by: " + jobName);
log.info("Exception: " + jobException.getMessage());
log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
}
}
}
The quartz.properties config is the following:
# Basic config
org.quartz.scheduler.instanceName = DBClusteredScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.skipUpdateCheck = true
org.quartz.scheduler.jobFactory.class = org.quartz.simpl.SimpleJobFactory
# Thread Pool config
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.threadCount = 3
# DataSource config
org.quartz.dataSource.quartzDataSource.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.quartzDataSource.URL = jdbc:mysql://localhost:3306/quartz
org.quartz.dataSource.quartzDataSource.user = <user>
org.quartz.dataSource.quartzDataSource.password = <password>
org.quartz.dataSource.quartzDataSource.maxConnections = 8
# Database config for MySQL JDBC connection
org.quartz.jobStore.dataSource = quartzDataSource
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.isClustered = true
# Load configuration for each trigger
org.quartz.plugin.jobInitializer.class = org.quartz.plugins.xml.XMLSchedulingDataProcessorPlugin
org.quartz.plugin.jobInitializer.fileNames = quartz-config.xml
And finally, my quartz-config.xml:
<?xml version="1.0" encoding="UTF-8"?>
<job-scheduling-data
xmlns="http://www.quartz-scheduler.org/xml/JobSchedulingData"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.quartz-scheduler.org/xml/JobSchedulingData
http://www.quartz-scheduler.org/xml/job_scheduling_data_2_0.xsd"
version="1.8">
<!-- JOB 1 CONFIGURATION -->
<schedule>
<job>
<name>job01</name>
<group>fg_jobgroup_01</group>
<description></description>
<job-class>Job01</job-class>
</job>
<trigger>
<cron>
<name>Job01_TRIGGER</name>
<group>PROCESS_LEAD_TRIGGER_GROUP</group>
<job-name>job01</job-name>
<job-group>fg_jobgroup_01</job-group>
<cron-expression>0/20 * * * * ?</cron-expression>
</cron>
</trigger>
</schedule>
<!-- JOB 2 CONFIGURATION -->
<schedule>
<job>
<name>job02</name>
<group>fg_jobgroup_01</group>
<description></description>
<job-class>Job02</job-class>
</job>
<trigger>
<cron>
<name>Job02_TRIGGER</name>
<group>PROCESS_LEAD_TRIGGER_GROUP</group>
<job-name>job02</job-name>
<job-group>fg_jobgroup_01</job-group>
<cron-expression>15 0/2 * * * ?</cron-expression>
</cron>
</trigger>
</schedule>
</job-scheduling-data>
When I run the program (TriggerXML.java), it gives me the following console output:
2015-01-05 15:04:40,224 INFO (org.quartz.xml.XMLSchedulingDataProcessor.java:471).processFile - Parsing XML file: quartz-config.xml with systemId: quartz-config.xml
2015-01-05 15:04:40,443 INFO (org.quartz.xml.XMLSchedulingDataProcessor.java:996).scheduleJobs - Adding 2 jobs, 2 triggers.
2015-01-05 15:04:40,447 INFO (org.quartz.xml.XMLSchedulingDataProcessor.java:1032).scheduleJobs - Replacing job: fg_jobgroup_01.job01
2015-01-05 15:04:40,505 INFO (org.quartz.xml.XMLSchedulingDataProcessor.java:1032).scheduleJobs - Replacing job: fg_jobgroup_01.job02
2015-01-05 15:04:40,737 INFO (org.quartz.core.QuartzScheduler.java:575).start - Scheduler DBClusteredScheduler_$_US-HB-PC-0011420499079608 started.
2015-01-05 15:04:40,856 INFO (com.freightgate.quartz.listener.HelloJobListener.java:21).jobToBeExecuted - ###############################################
2015-01-05 15:04:40,857 INFO (com.freightgate.quartz.listener.HelloJobListener.java:22).jobToBeExecuted - JOB IS STARTING
2015-01-05 15:04:40,857 INFO (com.freightgate.quartz.listener.HelloJobListener.java:23).jobToBeExecuted - Job: fg_jobgroup_01.job01
2015-01-05 15:04:40,857 INFO (com.freightgate.quartz.listener.HelloJobListener.java:24).jobToBeExecuted - ###############################################
2015-01-05 15:04:40,857 INFO (com.freightgate.quartz.jobs.Job01.java:16).execute - #################################
2015-01-05 15:04:40,857 INFO (com.freightgate.quartz.jobs.Job01.java:17).execute - ############ TEST 01 ############
2015-01-05 15:04:40,857 INFO (com.freightgate.quartz.jobs.Job01.java:18).execute - #################################
2015-01-05 15:04:40,858 INFO (com.freightgate.quartz.listener.HelloJobListener.java:41).jobWasExecuted - ###############################################
2015-01-05 15:04:40,858 INFO (com.freightgate.quartz.listener.HelloJobListener.java:42).jobWasExecuted - JOB WAS EXECUTED
2015-01-05 15:04:40,858 INFO (com.freightgate.quartz.listener.HelloJobListener.java:43).jobWasExecuted - Job: fg_jobgroup_01.job01
2015-01-05 15:04:40,858 INFO (com.freightgate.quartz.listener.HelloJobListener.java:44).jobWasExecuted - ###############################################
2015-01-05 15:04:40,859 ERROR (org.quartz.core.ErrorLogger.java:2425).schedulerError - Unable to notify JobListener(s) of Job that was executed: (error will be ignored). trigger= PROCESS_LEAD_TRIGGER_GROUP.Job01_TRIGGER job= fg_jobgroup_01.job01
org.quartz.SchedulerException: JobListener 'HELLO JOB LISTENER' threw exception: null [See nested exception: java.lang.NullPointerException]
at org.quartz.core.QuartzScheduler.notifyJobListenersWasExecuted(QuartzScheduler.java:1987)
at org.quartz.core.JobRunShell.notifyJobListenersComplete(JobRunShell.java:340)
at org.quartz.core.JobRunShell.run(JobRunShell.java:224)
at org.quartz.simpl.SimpleThreadPool$WorkerThread.run(SimpleThreadPool.java:573)
Caused by: java.lang.NullPointerException
at com.freightgate.quartz.listener.HelloJobListener.jobWasExecuted(HelloJobListener.java:46)
at org.quartz.core.QuartzScheduler.notifyJobListenersWasExecuted(QuartzScheduler.java:1985)
What I don't get is why the output I set within the Listener class is written to the log, but then it gives me that exception anyway. Did a lot of Google searches, doesn't seem to be well documented. Also, I haven't found how to set up a listener in the XML config.
Any help is much appreciated!
Best regards!

You obviously get NullPointerException and even though I don't see the line numbers, this is likely its cause:
if (!jobException.getMessage().equals("")) {
log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
log.info("Exception thrown by: " + jobName);
log.info("Exception: " + jobException.getMessage());
log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!");
}
If the job is executed successfully then JobExecutionException will likely be null. So you need to check it like this:
if (jobException != null) {
// Job failed
}

Related

Quartz Scheduler NOT STARTED

I am creating a spring boot application that is using Quartz this is my quartz.yaml file
org:
quartz:
dataSource:
mySql:
maxIdleTime: '60'
idleConnectionValidationSeconds: '50'
password: test2
user: test2
URL: jdbc:mysql://localhost:3306/schedules?useSSL=false
driver: com.mysql.jdbc.Driver
maxConnections: '10'
validationQuery: select 0 from dual
plugin:
jobHistory:
class: org.quartz.plugins.history.LoggingJobHistoryPlugin
jobToBeFiredMessage: 'Job [{1}.{0}] to be fired by trigger [{4}.{3}], re-fire:
{7}'
jobFailedMessage: 'Job [{1}.{0}] execution failed with exception: {8}'
jobWasVetoedMessage: 'Job [{1}.{0}] was vetoed. It was to be fired by trigger
[{4}.{3}] at: {2, date, dd-MM-yyyy HH:mm:ss.SSS}'
jobSuccessMessage: 'Job [{1}.{0}] execution complete and reports: {8}'
triggerHistory:
class: org.quartz.plugins.history.LoggingTriggerHistoryPlugin
triggerFiredMessage: 'Trigger [{1}.{0}] fired job [{6}.{5}] scheduled at:
{2, date, dd-MM-yyyy HH:mm:ss.SSS}, next scheduled at: {3, date, dd-MM-yyyy
HH:mm:ss.SSS}'
triggerCompleteMessage: 'Trigger [{1}.{0}] completed firing job [{6}.{5}]
with resulting trigger instruction code: {9}. Next scheduled at: {3, date,
dd-MM-yyyy HH:mm:ss.SSS}'
triggerMisfiredMessage: 'Trigger [{1}.{0}] misfired job [{6}.{5}]. Should
have fired at: {3, date, dd-MM-yyyy HH:mm:ss.SSS}'
jobStore:
maxMisfiresToHandleAtATime: '10'
dataSource: mySql
isClustered: 'false'
class: org.quartz.impl.jdbcjobstore.JobStoreTX
useProperties: 'true'
misfireThreshold: '60000'
driverDelegateClass: org.quartz.impl.jdbcjobstore.StdJDBCDelegate
tablePrefix: QRTZ_
threadPool:
threadPriority: '5'
class: org.quartz.simpl.SimpleThreadPool
threadCount: '4'
scheduler:
instanceId: AUTO
instanceName: SampleJobScheduler
idleWaitTime: '10000'
I am trying to use SQL database but its uses ram. This is the error I am getting
Scheduler class: 'org.quartz.core.QuartzScheduler' - running
locally. NOT STARTED. Currently in standby mode. Number of jobs
executed: 0 Using thread pool 'org.quartz.simpl.SimpleThreadPool' -
with 10 threads. Using job-store 'org.quartz.simpl.RAMJobStore' -
which does not support persistence. and is not clustered.
This is my Configuration
#Configuration
public class Config {
#Value("${library.file-path.quartz}")
Resource quartsPath;
#Autowired private ApplicationContext applicationContext;
#Bean
public SchedulerFactoryBean scheduler(JobFactory factory) throws IOException {
SchedulerFactoryBean schedulerFactory = new SchedulerFactoryBean();
schedulerFactory.setQuartzProperties(quartzProperties());
schedulerFactory.setJobFactory(factory);
return schedulerFactory;
}
#Bean
public SpringBeanJobFactory springBeanJobFactory() {
AutoWiringSpringBeanJobFactory jobFactory = new AutoWiringSpringBeanJobFactory();
jobFactory.setApplicationContext(applicationContext);
return jobFactory;
}
public Properties quartzProperties() throws IOException {
PropertiesFactoryBean propertiesFactoryBean = new PropertiesFactoryBean();
propertiesFactoryBean.setLocation(quartsPath);
propertiesFactoryBean.afterPropertiesSet();
return propertiesFactoryBean.getObject();
}
}
I got it working setting the datasource in SchedulerFactoryBean.
dataSource is the DataSource used by the application and inyected by Spring in this configuration class:
#Bean
public SchedulerFactoryBean schedulerFactoryBean() throws IOException {
SchedulerFactoryBean factory = new SchedulerFactoryBean();
factory.setJobFactory(springBeanJobFactory());
factory.setQuartzProperties(quartzProperties());
factory.setDataSource(dataSource);
return factory;
}

Spring Batch with multi - step Spring Cloud Task (PartitionHandler) for Remote Partition

Latest Update (with an image to hope simplify the problem) (thanks for feedback from #Mahmoud)
Relate issue reports for other reference (after this original post created, it seem someone filed issues for Spring Cloud on similar issue, so also update there too):
https://github.com/spring-cloud/spring-cloud-task/issues/793 relate to approach #1
https://github.com/spring-cloud/spring-cloud-task/issues/792 relate to approach #2
Also find a workaround resolution for that issue and update on that github issue, will update this once it is confirmed good by developer
https://github.com/spring-cloud/spring-cloud-task/issues/793#issuecomment-894617929
I am developing an application involved multi-steps using spring batch job but hit some roadblock. Did try to research doc and different attempts, but no success. So thought to check if community can shed light
Spring batch job 1 (received job parameter for setting for step 1/setting for step 2)
Step 1 -> remote partition (partitionhandler (cpu/memory for step 1 + grid) + partitioner) with setting from step1 (job configuration or step configuration)
Step 2 -> remote partition (partitionhandler (cpu/memory for step 2 + grid) + partitioner) with setting from step2 (job configuration or step configuration, and diff from step 1)
The reason we want is to have different step with different k8s setting (like cpu/memory/grid)
Attempts:
Create two partition handler (partitionHandlerReader + partitionHandlerProcessor) and their corresponding launcher (LauncherReader + LauncherProcessor)
Complete Project can be found in
https://github.com/danilko/spring-batch-remote-k8s-paritition-example/tree/attempt_1_two_partitionhandlers
The main class of configuration is try to simplify into one class
https://github.com/danilko/spring-batch-remote-k8s-paritition-example/blob/attempt_1_two_partitionhandlers/src/main/java/com/example/batchprocessing/BatchConfiguration.java
Use one PartitionerHandler + one TaskLauncher but with #StepScope for late binding for dynamic change base on step and job setup
Complete Project can be found in
https://github.com/danilko/spring-batch-remote-k8s-paritition-example/tree/attempt_2_partitionhandler_with_stepscope
The main class of configuration is try to simplify into one class
https://github.com/danilko/spring-batch-remote-k8s-paritition-example/blob/attempt_2_partitionhandler_with_stepscope/src/main/java/com/example/batchprocessing/BatchConfiguration.java
Both Result Following (full trace at above git repo):
During job trigger, it will error (it seem pass initial start up, but error during execution)
Because below will only occur when there are multiple PartitionHandler or when that Bean is at #StepScope or #JobScope
java.lang.NullPointerException: null
at org.springframework.cloud.task.batch.partition.DeployerPartitionHandler.launchWorker(DeployerPartitionHandler.java:347) ~[spring-cloud-task-batch-2.3.1-SNAPSHOT.jar!/:2.3.1-SNAPSHOT]
at org.springframework.cloud.task.batch.partition.DeployerPartitionHandler.launchWorkers(DeployerPartitionHandler.java:313) ~[spring-cloud-task-batch-2.3.1-SNAPSHOT.jar!/:2.3.1-SNAPSHOT]
at org.springframework.cloud.task.batch.partition.DeployerPartitionHandler.handle(DeployerPartitionHandler.java:302) ~[spring-cloud-task-batch-2.3.1-SNAPSHOT.jar!/:2.3.1-SNAPSHOT]
Full Log
. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v2.4.6)
2021-08-06 11:24:29.242 INFO 90294 --- [ main] c.e.b.BatchProcessingApplication : Starting BatchProcessingApplication v0.0.1-SNAPSHOT using Java 11.0.7 on localhost.localdomain with PID 90294 (/home/danilko/IdeaProjects/partition/target/batchprocessing-0.0.1-SNAPSHOT.jar started by danilko in /home/danilko/IdeaProjects/partition)
2021-08-06 11:24:29.244 INFO 90294 --- [ main] c.e.b.BatchProcessingApplication : The following profiles are active: controller
2021-08-06 11:24:29.790 INFO 90294 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'errorChannel' has been explicitly defined. Therefore, a default PublishSubscribeChannel will be created.
2021-08-06 11:24:29.794 INFO 90294 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'taskScheduler' has been explicitly defined. Therefore, a default ThreadPoolTaskScheduler will be created.
2021-08-06 11:24:29.797 INFO 90294 --- [ main] faultConfiguringBeanFactoryPostProcessor : No bean named 'integrationHeaderChannelRegistry' has been explicitly defined. Therefore, a default DefaultHeaderChannelRegistry will be created.
2021-08-06 11:24:29.833 INFO 90294 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'org.springframework.integration.config.IntegrationManagementConfiguration' of type [org.springframework.integration.config.IntegrationManagementConfiguration] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2021-08-06 11:24:29.947 INFO 90294 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'integrationChannelResolver' of type [org.springframework.integration.support.channel.BeanFactoryChannelResolver] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2021-08-06 11:24:29.947 INFO 90294 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'integrationDisposableAutoCreatedBeans' of type [org.springframework.integration.config.annotation.Disposables] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2021-08-06 11:24:29.959 INFO 90294 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'org.springframework.cloud.task.batch.configuration.TaskBatchAutoConfiguration' of type [org.springframework.cloud.task.batch.configuration.TaskBatchAutoConfiguration$$EnhancerBySpringCGLIB$$83e6c2be] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2021-08-06 11:24:29.968 INFO 90294 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'org.springframework.cloud.task.batch.listener.BatchEventAutoConfiguration' of type [org.springframework.cloud.task.batch.listener.BatchEventAutoConfiguration$$EnhancerBySpringCGLIB$$cc3cccc1] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2021-08-06 11:24:30.093 INFO 90294 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Starting...
2021-08-06 11:24:30.160 INFO 90294 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Start completed.
2021-08-06 11:24:30.724 INFO 90294 --- [ main] o.s.b.c.r.s.JobRepositoryFactoryBean : No database type set, using meta data indicating: MYSQL
2021-08-06 11:24:30.736 INFO 90294 --- [ main] o.s.b.c.l.support.SimpleJobLauncher : No TaskExecutor has been set, defaulting to synchronous executor.
2021-08-06 11:24:30.897 INFO 90294 --- [ main] o.s.i.endpoint.EventDrivenConsumer : Adding {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel
2021-08-06 11:24:30.897 INFO 90294 --- [ main] o.s.i.channel.PublishSubscribeChannel : Channel 'application.errorChannel' has 1 subscriber(s).
2021-08-06 11:24:30.897 INFO 90294 --- [ main] o.s.i.endpoint.EventDrivenConsumer : started bean '_org.springframework.integration.errorLogger'
2021-08-06 11:24:30.974 INFO 90294 --- [ main] c.e.b.BatchProcessingApplication : Started BatchProcessingApplication in 2.024 seconds (JVM running for 2.366)
2021-08-06 11:24:30.975 INFO 90294 --- [ main] o.s.b.a.b.JobLauncherApplicationRunner : Running default command line with: []
2021-08-06 11:24:31.010 INFO 90294 --- [ main] o.s.b.c.l.support.SimpleJobLauncher : Job: [SimpleJob: [name=partitionedJob-1538890488]] launched with the following parameters: [{}]
Set readerGridSize == 1
2021-08-06 11:24:31.020 INFO 90294 --- [ main] o.s.c.t.b.l.TaskBatchExecutionListener : The job execution id 22 was run within the task execution 54
2021-08-06 11:24:31.046 INFO 90294 --- [ main] o.s.batch.core.job.SimpleStepHandler : Executing step: [partitionReaderStep]
2021-08-06 11:24:31.101 ERROR 90294 --- [ main] o.s.batch.core.step.AbstractStep : Encountered an error executing step partitionReaderStep in job partitionedJob-1538890488
java.lang.NullPointerException: null
at org.springframework.cloud.task.batch.partition.DeployerPartitionHandler.launchWorker(DeployerPartitionHandler.java:347) ~[spring-cloud-task-batch-2.3.1-SNAPSHOT.jar!/:2.3.1-SNAPSHOT]
at org.springframework.cloud.task.batch.partition.DeployerPartitionHandler.launchWorkers(DeployerPartitionHandler.java:313) ~[spring-cloud-task-batch-2.3.1-SNAPSHOT.jar!/:2.3.1-SNAPSHOT]
at org.springframework.cloud.task.batch.partition.DeployerPartitionHandler.handle(DeployerPartitionHandler.java:302) ~[spring-cloud-task-batch-2.3.1-SNAPSHOT.jar!/:2.3.1-SNAPSHOT]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:na]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:na]
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:na]
at java.base/java.lang.reflect.Method.invoke(Method.java:566) ~[na:na]
at org.springframework.aop.support.AopUtils.invokeJoinpointUsingReflection(AopUtils.java:344) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.framework.ReflectiveMethodInvocation.invokeJoinpoint(ReflectiveMethodInvocation.java:198) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.support.DelegatingIntroductionInterceptor.doProceed(DelegatingIntroductionInterceptor.java:137) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.support.DelegatingIntroductionInterceptor.invoke(DelegatingIntroductionInterceptor.java:124) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.framework.JdkDynamicAopProxy.invoke(JdkDynamicAopProxy.java:215) ~[spring-aop-5.3.7.jar!/:5.3.7]
at com.sun.proxy.$Proxy65.handle(Unknown Source) ~[na:na]
at org.springframework.batch.core.partition.support.PartitionStep.doExecute(PartitionStep.java:106) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.batch.core.step.AbstractStep.execute(AbstractStep.java:208) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.batch.core.job.SimpleStepHandler.handleStep(SimpleStepHandler.java:152) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.batch.core.job.AbstractJob.handleStep(AbstractJob.java:413) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.batch.core.job.SimpleJob.doExecute(SimpleJob.java:136) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.batch.core.job.AbstractJob.execute(AbstractJob.java:320) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.batch.core.launch.support.SimpleJobLauncher$1.run(SimpleJobLauncher.java:149) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.core.task.SyncTaskExecutor.execute(SyncTaskExecutor.java:50) ~[spring-core-5.3.7.jar!/:5.3.7]
at org.springframework.batch.core.launch.support.SimpleJobLauncher.run(SimpleJobLauncher.java:140) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:na]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:na]
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:na]
at java.base/java.lang.reflect.Method.invoke(Method.java:566) ~[na:na]
at org.springframework.aop.support.AopUtils.invokeJoinpointUsingReflection(AopUtils.java:344) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.framework.ReflectiveMethodInvocation.invokeJoinpoint(ReflectiveMethodInvocation.java:198) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.batch.core.configuration.annotation.SimpleBatchConfiguration$PassthruAdvice.invoke(SimpleBatchConfiguration.java:128) ~[spring-batch-core-4.3.3.jar!/:4.3.3]
at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) ~[spring-aop-5.3.7.jar!/:5.3.7]
at org.springframework.aop.framework.JdkDynamicAopProxy.invoke(JdkDynamicAopProxy.java:215) ~[spring-aop-5.3.7.jar!/:5.3.7]
at com.sun.proxy.$Proxy51.run(Unknown Source) ~[na:na]
at org.springframework.boot.autoconfigure.batch.JobLauncherApplicationRunner.execute(JobLauncherApplicationRunner.java:199) ~[spring-boot-autoconfigure-2.4.6.jar!/:2.4.6]
at org.springframework.boot.autoconfigure.batch.JobLauncherApplicationRunner.executeLocalJobs(JobLauncherApplicationRunner.java:173) ~[spring-boot-autoconfigure-2.4.6.jar!/:2.4.6]
at org.springframework.boot.autoconfigure.batch.JobLauncherApplicationRunner.launchJobFromProperties(JobLauncherApplicationRunner.java:160) ~[spring-boot-autoconfigure-2.4.6.jar!/:2.4.6]
at org.springframework.boot.autoconfigure.batch.JobLauncherApplicationRunner.run(JobLauncherApplicationRunner.java:155) ~[spring-boot-autoconfigure-2.4.6.jar!/:2.4.6]
at org.springframework.boot.autoconfigure.batch.JobLauncherApplicationRunner.run(JobLauncherApplicationRunner.java:150) ~[spring-boot-autoconfigure-2.4.6.jar!/:2.4.6]
at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:799) ~[spring-boot-2.4.6.jar!/:2.4.6]
at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:789) ~[spring-boot-2.4.6.jar!/:2.4.6]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:346) ~[spring-boot-2.4.6.jar!/:2.4.6]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1329) ~[spring-boot-2.4.6.jar!/:2.4.6]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1318) ~[spring-boot-2.4.6.jar!/:2.4.6]
at com.example.batchprocessing.BatchProcessingApplication.main(BatchProcessingApplication.java:10) ~[classes!/:0.0.1-SNAPSHOT]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:na]
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:na]
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:na]
at java.base/java.lang.reflect.Method.invoke(Method.java:566) ~[na:na]
at org.springframework.boot.loader.MainMethodRunner.run(MainMethodRunner.java:49) ~[batchprocessing-0.0.1-SNAPSHOT.jar:0.0.1-SNAPSHOT]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:108) ~[batchprocessing-0.0.1-SNAPSHOT.jar:0.0.1-SNAPSHOT]
at org.springframework.boot.loader.Launcher.launch(Launcher.java:58) ~[batchprocessing-0.0.1-SNAPSHOT.jar:0.0.1-SNAPSHOT]
at org.springframework.boot.loader.JarLauncher.main(JarLauncher.java:88) ~[batchprocessing-0.0.1-SNAPSHOT.jar:0.0.1-SNAPSHOT]
Study/Reference:
Most tutorial I found online only involved one partition step.
https://dataflow.spring.io/docs/feature-guides/batch/partitioning/
Thanks for info/helps in advance
Is above even possible setup?
yes, nothing prevents you from having two partitioned steps in a single Spring Batch job.
Is it possible to use JobScope/StepScope to pass info to the partitionhandler
yes, it is possible for the partition handler to be declared as a job/step scoped bean if it needs the late-binding feature to be configured.
Updated on 08/14/2021 by #DanilKo
The original answer is correct in high - level. However, to actually achieve the partition handeler to be step scoped, a code modification is required
Below is the analyze + my proposed workaround/fix (maybe eventually code maintainer will have better way to make it work, but so far below fix is working for me)
Issue being continued to discuss at:
https://github.com/spring-cloud/spring-cloud-task/issues/793 (multiple partitioner handler discussion)
https://github.com/spring-cloud/spring-cloud-task/issues/792
(which this fix is based up to use partitionerhandler at step scope to configure different worker steps + resources + max worker)
Root cause analyze (hypothesis)
The problem is DeployerPartitionHandler utilize annoation #BeforeTask to force task to pass in TaskExecution object as part of Task setup
But as this partionerHandler is now at #StepScope (instead of directly at #Bean level with #Enable Task) or there are two partitionHandler, that setup is no longer triggered, as #EnableTask seem not able to locate one partitionhandler during creation.
https://github.com/spring-cloud/spring-cloud-task/blob/main/spring-cloud-task-batch/src/main/java/org/springframework/cloud/task/batch/partition/DeployerPartitionHandler.java # 269
Resulted created DeployerHandler faced a null with taskExecution when trying to launch (as it is never setup)
https://github.com/spring-cloud/spring-cloud-task/blob/main/spring-cloud-task-batch/src/main/java/org/springframework/cloud/task/batch/partition/DeployerPartitionHandler.java # 347
Workaround Resolution
Below is essentially a workaround to use the current job execution id to retrieve the associated task execution id
From there, got that task execution and passed to deploy handler to fulfill its need of taskExecution reference
It seem to work, but still not clear if there is other side effect (so far during test not found any)
Full code can be found in https://github.com/danilko/spring-batch-remote-k8s-paritition-example/tree/attempt_2_partitionhandler_with_stepscope_workaround_resolution
In the partitionHandler method
#Bean
#StepScope
public PartitionHandler partitionHandler(TaskLauncher taskLauncher,
JobExplorer jobExplorer,
#Value("#{stepExecution}") StepExecution stepExecution) throws Exception {
...
// After the declaration of partitionhandler
DeployerPartitionHandler partitionHandler =
new DeployerPartitionHandler(taskLauncher, jobExplorer, resource,
stepExecution.getJobExecution().getExecutionContext().getString(step + "WorkerStep")
, taskRepository);
// Issue https://github.com/spring-cloud/spring-cloud-task/issues/793
// Perform the setting of execution as this partitioner now not created at task level so #beforetask is no longer vaild
// The problem is DeployerPartitionHandler utilize annoation #BeforeTask to force task to pass in TaskExecution object as part of Task setup
// But as this partionerHandler is now at #StepScope (instead of directly at #Bean level with #Enable Task), that setup is no longer triggered
// Resulted created DeployerHandler faced a null
// Below is essentially a workaround to use the current job execution id to retrieve the associated task execution id
// From there, got that task execution and passed to deploy handler to fulfill its need of taskExecution reference
// It seem to work, but still not clear if there is other side effect (so far during test not found any)
long executionId = taskExplorer.getTaskExecutionIdByJobExecutionId(stepExecution.getJobExecutionId());
System.out.println("Current execution job to task execution id " + executionId);
TaskExecution taskExecution = taskExplorer.getTaskExecution(taskExplorer.getTaskExecutionIdByJobExecutionId(stepExecution.getJobExecutionId()));
System.out.println("Current execution job to task execution is not null: " + (taskExecution != null));
partitionHandler.beforeTask(taskExecution);
...
// rest of code continue
(note it utilize stepExecution context to find out the current trigger step name and therefore assign different worker step)
Worker name in this case is coming from pre-defined job execution, but may able to come from jobparameter or another place too)
That job context is populated with job listner
Job is configured with job listener
#Bean(name = "partitionedJob")
#Profile("!worker")
public Job partitionedJob()throws Exception {
Random random = new Random();
return jobBuilderFactory.get("partitionedJob" + random.nextInt())
.start(partitionReaderStep())
.listener(jobExecutionListener())
.next(partitionProcessorStep())
.build();
}
In job listener populated it
#Bean
public JobExecutionListener jobExecutionListener() {
JobExecutionListener listener = new JobExecutionListener(){
#Override
public void beforeJob(JobExecution jobExecution)
{
jobExecution.getExecutionContext().putString("readerCPURequest", "1");
jobExecution.getExecutionContext().putString("readerCPULimit", "2");
jobExecution.getExecutionContext().putString("readerWorkerGridSize", "1");
// For now using same image for reader/processor, but if it work, can split them
jobExecution.getExecutionContext().putString("readerWorkerImage", "worker:latest");
jobExecution.getExecutionContext().putString("readerWorkerStep", "workerStepReader");
jobExecution.getExecutionContext().putString("processorCPURequest", "3");
jobExecution.getExecutionContext().putString("processorCPULimit", "4");
jobExecution.getExecutionContext().putString("processorWorkerGridSize", "2");
// For now using same image for reader/processor, but if it work, will split them
jobExecution.getExecutionContext().putString("processorWorkerImage", "worker:latest");
jobExecution.getExecutionContext().putString("processorWorkerStep", "workerStepProcessor");
System.out.println("Set readerGridSize == " + jobExecution.getExecutionContext().getString("readerGridSize", "IT IS NULL WHICH IS INCORRECT"));
}
#Override
public void afterJob(JobExecution jobExecution) {
}
};
return listener;
}
Full code (can also be found in my code github after the workaround fix is being applied): https://github.com/danilko/spring-batch-remote-k8s-paritition-example/blob/main/src/main/java/com/example/batchprocessing/BatchConfiguration.java
package com.example.batchprocessing;
import io.fabric8.kubernetes.api.model.DeletionPropagation;
import io.fabric8.kubernetes.api.model.batch.JobList;
import io.fabric8.kubernetes.api.model.batch.JobSpec;
import io.fabric8.kubernetes.api.model.batch.JobStatus;
import io.fabric8.kubernetes.client.KubernetesClient;
import org.springframework.batch.core.*;
import org.springframework.batch.core.configuration.JobRegistry;
import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
import org.springframework.batch.core.configuration.annotation.JobBuilderFactory;
import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
import org.springframework.batch.core.configuration.annotation.StepScope;
import org.springframework.batch.core.explore.JobExplorer;
import org.springframework.batch.core.partition.PartitionHandler;
import org.springframework.batch.core.partition.support.Partitioner;
import org.springframework.batch.core.repository.JobRepository;
import org.springframework.batch.core.scope.context.ChunkContext;
import org.springframework.batch.core.step.tasklet.Tasklet;
import org.springframework.batch.item.ExecutionContext;
import org.springframework.batch.repeat.RepeatStatus;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.cloud.deployer.resource.docker.DockerResource;
import org.springframework.cloud.deployer.resource.support.DelegatingResourceLoader;
import org.springframework.cloud.deployer.spi.kubernetes.*;
import org.springframework.cloud.deployer.spi.task.TaskLauncher;
import org.springframework.cloud.task.batch.partition.*;
import org.springframework.cloud.task.configuration.EnableTask;
import org.springframework.cloud.task.repository.TaskExecution;
import org.springframework.cloud.task.repository.TaskExplorer;
import org.springframework.cloud.task.repository.TaskRepository;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Profile;
import org.springframework.core.env.Environment;
import org.springframework.core.env.SystemEnvironmentPropertySource;
import org.springframework.core.io.Resource;
import org.springframework.core.task.TaskExecutor;
import org.springframework.core.task.TaskRejectedException;
import org.springframework.util.StringUtils;
import java.util.*;
#Configuration
#EnableBatchProcessing
#EnableTask
public class BatchConfiguration {
private static int BACK_OFF_LIMIT = 6;
// Set the kuberentes job name
private String taskName_prefix="partitionedbatchjob";
#Autowired
public JobBuilderFactory jobBuilderFactory;
#Autowired
public StepBuilderFactory stepBuilderFactory;
#Autowired
public JobExplorer jobExplorer;
#Autowired
public JobRepository jobRepository;
#Autowired
public TaskExecutor taskExecutor;
#Autowired
public TaskRepository taskRepository;
#Autowired
public TaskExplorer taskExplorer;
#Autowired
private ConfigurableApplicationContext context;
#Autowired
private DelegatingResourceLoader resourceLoader;
#Autowired
private Environment environment;
#Bean
#StepScope
public Partitioner partitioner( #Value("#{stepExecution}") StepExecution stepExecution) {
return new Partitioner() {
#Override
public Map<String, ExecutionContext> partition(int gridSize) {
Map<String, ExecutionContext> partitions = new HashMap<>(gridSize);
int targetGridSize = 0;
String step = "";
if(stepExecution.getStepName().equalsIgnoreCase("partitionReaderStep"))
{
step = "reader";
}
else
{
step = "processor";
}
targetGridSize = Integer.parseInt(stepExecution.getJobExecution().getExecutionContext().getString(step + "WorkerGridSize"));
for (int i = 0; i < targetGridSize; i++) {
ExecutionContext context1 = new ExecutionContext();
context1.put("partitionNumber", i);
partitions.put("partition" + i, context1);
}
return partitions;
}
};
}
#Bean
public KubernetesClient kuberentesClient()
{
KubernetesDeployerProperties kubernetesDeployerProperties = new KubernetesDeployerProperties();
return KubernetesClientFactory.getKubernetesClient(kubernetesDeployerProperties);
}
#Bean
#StepScope
public TaskLauncher taskLauncher( #Value("#{stepExecution}") StepExecution stepExecution)
{
KubernetesDeployerProperties kubernetesDeployerProperties = new KubernetesDeployerProperties();
kubernetesDeployerProperties.setNamespace("default");
kubernetesDeployerProperties.setCreateJob(true);
// Database setup to reference configmap for database info
List<KubernetesDeployerProperties.ConfigMapKeyRef> configMapKeyRefList = new ArrayList<KubernetesDeployerProperties.ConfigMapKeyRef>();
KubernetesDeployerProperties.ConfigMapKeyRef configMapKeyRef = new KubernetesDeployerProperties.ConfigMapKeyRef();
configMapKeyRef.setConfigMapName("mariadb");
configMapKeyRef.setDataKey("SPRING_DATASOURCE_URL");
configMapKeyRef.setEnvVarName("SPRING_DATASOURCE_URL");
configMapKeyRefList.add(configMapKeyRef);
configMapKeyRef = new KubernetesDeployerProperties.ConfigMapKeyRef();
configMapKeyRef.setConfigMapName("mariadb");
configMapKeyRef.setDataKey("SPRING_DATASOURCE_USERNAME");
configMapKeyRef.setEnvVarName("SPRING_DATASOURCE_USERNAME");
configMapKeyRefList.add(configMapKeyRef);
configMapKeyRef = new KubernetesDeployerProperties.ConfigMapKeyRef();
configMapKeyRef.setConfigMapName("mariadb");
configMapKeyRef.setDataKey("SPRING_DATASOURCE_PASSWORD");
configMapKeyRef.setEnvVarName("SPRING_DATASOURCE_PASSWORD");
configMapKeyRefList.add(configMapKeyRef);
configMapKeyRef = new KubernetesDeployerProperties.ConfigMapKeyRef();
configMapKeyRef.setConfigMapName("mariadb");
configMapKeyRef.setDataKey("SPRING_DATASOURCE_DRIVERCLASSNAME");
configMapKeyRef.setEnvVarName("SPRING_DATASOURCE_DRIVERCLASSNAME");
configMapKeyRefList.add(configMapKeyRef);
configMapKeyRef = new KubernetesDeployerProperties.ConfigMapKeyRef();
configMapKeyRef.setConfigMapName("mariadb");
configMapKeyRef.setDataKey("SPRING_PROFILES_ACTIVE");
configMapKeyRef.setEnvVarName("SPRING_PROFILES_ACTIVE");
configMapKeyRefList.add(configMapKeyRef);
kubernetesDeployerProperties.setConfigMapKeyRefs(configMapKeyRefList);
// Set request resource
KubernetesDeployerProperties.RequestsResources request = new KubernetesDeployerProperties.RequestsResources();
KubernetesDeployerProperties.LimitsResources limit = new KubernetesDeployerProperties.LimitsResources();
String step = "";
if(stepExecution.getStepName().equalsIgnoreCase("partitionReaderStep"))
{
step="reader";
}
else
{
step="processor";
}
request.setCpu(stepExecution.getJobExecution().getExecutionContext().getString(step + "CPURequest"));
request.setMemory("2000Mi");
limit.setCpu(stepExecution.getJobExecution().getExecutionContext().getString(step +"CPULimit"));
limit.setMemory("3000Mi");
kubernetesDeployerProperties.setRequests(request);
kubernetesDeployerProperties.setLimits(limit);
// as build on local image, so need to use local
kubernetesDeployerProperties.setImagePullPolicy(ImagePullPolicy.IfNotPresent);
// Set task launcher properties to not repeat and not restart
KubernetesTaskLauncherProperties kubernetesTaskLauncherProperties = new KubernetesTaskLauncherProperties();
// https://kubernetes.io/docs/concepts/workloads/controllers/job/
// Set to never to create new pod on restart
kubernetesTaskLauncherProperties.setBackoffLimit(BACK_OFF_LIMIT);
kubernetesTaskLauncherProperties.setRestartPolicy(RestartPolicy.Never);
KubernetesTaskLauncher kubernetesTaskLauncher = new KubernetesTaskLauncher(kubernetesDeployerProperties,
kubernetesTaskLauncherProperties, kuberentesClient());
return kubernetesTaskLauncher;
}
#Bean(name = "partitionedJob")
#Profile("!worker")
public Job partitionedJob()throws Exception {
Random random = new Random();
return jobBuilderFactory.get("partitionedJob" + random.nextInt())
.start(partitionReaderStep())
.listener(jobExecutionListener())
.next(partitionProcessorStep())
.build();
}
#Bean(name = "partitionReaderStep")
public Step partitionReaderStep() throws Exception {
return stepBuilderFactory.get("partitionReaderStep")
.partitioner(workerStepReader().getName(), partitioner( null))
.step(workerStepReader())
.partitionHandler(partitionHandler(
taskLauncher( null),
jobExplorer, null))
.build();
}
#Bean(name = "partitionProcessorStep")
public Step partitionProcessorStep() throws Exception {
return stepBuilderFactory.get("partitionProcessorStep")
.partitioner(workerStepProcessor().getName(), partitioner( null))
.step(workerStepProcessor())
.partitionHandler(partitionHandler(
taskLauncher( null),
jobExplorer, null))
.build();
}
#Bean
#StepScope
public PartitionHandler partitionHandler(TaskLauncher taskLauncher,
JobExplorer jobExplorer,
#Value("#{stepExecution}") StepExecution stepExecution) throws Exception {
String step ="processor";
if(stepExecution.getStepName().equalsIgnoreCase("partitionReaderStep")) {
step = "reader";
}
// Use local build image
DockerResource resource = new DockerResource(stepExecution.getJobExecution().getExecutionContext().getString(step + "WorkerImage"));
DeployerPartitionHandler partitionHandler =
new DeployerPartitionHandler(taskLauncher, jobExplorer, resource,
stepExecution.getJobExecution().getExecutionContext().getString(step + "WorkerStep")
, taskRepository);
// Issue https://github.com/spring-cloud/spring-cloud-task/issues/793
// Perform the setting of execution as this partitioner now not created at task level so #beforetask is no longer vaild
// The problem is DeployerPartitionHandler utilize annoation #BeforeTask to force task to pass in TaskExecution object as part of Task setup
// But as this partionerHandler is now at #StepScope (instead of directly at #Bean level with #Enable Task), that setup is no longer triggered
// Resulted created DeployerHandler faced a null
// Below is essentially a workaround to use the current job execution id to retrieve the associated task execution id
// From there, got that task execution and passed to deploy handler to fulfill its need of taskExecution reference
// It seem to work, but still not clear if there is other side effect (so far during test not found any)
long executionId = taskExplorer.getTaskExecutionIdByJobExecutionId(stepExecution.getJobExecutionId());
System.out.println("Current execution job to task execution id " + executionId);
TaskExecution taskExecution = taskExplorer.getTaskExecution(taskExplorer.getTaskExecutionIdByJobExecutionId(stepExecution.getJobExecutionId()));
System.out.println("Current execution job to task execution is not null: " + (taskExecution != null));
partitionHandler.beforeTask(taskExecution);
List<String> commandLineArgs = new ArrayList<>(3);
commandLineArgs.add("--spring.profiles.active=worker");
commandLineArgs.add("--spring.cloud.task.initialize.enable=false");
commandLineArgs.add("--spring.batch.initializer.enabled=false");
partitionHandler
.setCommandLineArgsProvider(new PassThroughCommandLineArgsProvider(commandLineArgs));
partitionHandler.setEnvironmentVariablesProvider(new NoOpEnvironmentVariablesProvider());
partitionHandler.setMaxWorkers(Integer.parseInt(stepExecution.getJobExecution().getExecutionContext().getString(step + "WorkerGridSize")));
partitionHandler.setApplicationName(taskName_prefix + step);
return partitionHandler;
}
#Bean
public JobExecutionListener jobExecutionListener() {
JobExecutionListener listener = new JobExecutionListener(){
#Override
public void beforeJob(JobExecution jobExecution)
{
jobExecution.getExecutionContext().putString("readerCPURequest", "1");
jobExecution.getExecutionContext().putString("readerCPULimit", "2");
jobExecution.getExecutionContext().putString("readerWorkerGridSize", "1");
// For now using same image for reader/processor, but if it work, can split them
jobExecution.getExecutionContext().putString("readerWorkerImage", "worker:latest");
jobExecution.getExecutionContext().putString("readerWorkerStep", "workerStepReader");
jobExecution.getExecutionContext().putString("processorCPURequest", "3");
jobExecution.getExecutionContext().putString("processorCPULimit", "4");
jobExecution.getExecutionContext().putString("processorWorkerGridSize", "2");
// For now using same image for reader/processor, but if it work, will split them
jobExecution.getExecutionContext().putString("processorWorkerImage", "worker:latest");
jobExecution.getExecutionContext().putString("processorWorkerStep", "workerStepProcessor");
System.out.println("Set readerGridSize == " + jobExecution.getExecutionContext().getString("readerGridSize", "IT IS NULL WHICH IS INCORRECT"));
}
#Override
public void afterJob(JobExecution jobExecution) {
}
};
return listener;
}
#Bean
#Profile("worker")
public DeployerStepExecutionHandler stepExecutionHandler(JobExplorer jobExplorer) {
return new DeployerStepExecutionHandler(this.context, jobExplorer, this.jobRepository);
}
#Bean(name = "workerStepReader")
public Step workerStepReader() {
return this.stepBuilderFactory.get("workerStepReader")
.tasklet(workerTaskletReader(null))
.build();
}
#Bean(name = "workerStepProcessor")
public Step workerStepProcessor() {
return this.stepBuilderFactory.get("workerStepProcessor")
.tasklet(workerTaskletProcessor(null))
.build();
}
#Bean
#StepScope
public Tasklet workerTaskletReader(
final #Value("#{stepExecution}") StepExecution stepExecution) {
return new Tasklet() {
#Override
public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) throws Exception {
Integer partitionNumber = stepExecution.getExecutionContext().getInt("partitionNumber");
System.out.println("This workerTaskletReader ran partition: " + partitionNumber);
return RepeatStatus.FINISHED;
}
};
}
#Bean
#StepScope
public Tasklet workerTaskletProcessor(
final #Value("#{stepExecution}") StepExecution stepExecution) {
return new Tasklet() {
#Override
public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) throws Exception {
Integer partitionNumber = stepExecution.getExecutionContext().getInt("partitionNumber");
System.out.println("This workerTaskletProcessor ran partition: " + partitionNumber);
return RepeatStatus.FINISHED;
}
};
}
}

Executing Sample Flink kafka consummer

I'm trying to create a simple Flink Kafka consumer
public class ReadFromKafka {
public static void main(String[] args) throws Exception {
// create execution environment
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
Properties properties = new Properties();
properties.setProperty("bootstrap.servers", "localhost:9092");
properties.setProperty("group.id", "flink_consumer");
DataStream<String> stream = env
.addSource(new FlinkKafkaConsumer09<>("test", new SimpleStringSchema(), properties));
stream.map(new MapFunction<String, String>() {
private static final long serialVersionUID = -6867736771747690202L;
#Override
public String map(String value) throws Exception {
return "Stream Value: " + value;
}
}).print();
env.execute();
}
}
It is giving me this error :
INFO org.apache.kafka.common.utils.AppInfoParser - Kafka version: 2.3.0
16:47:28,448 INFO org.apache.kafka.common.utils.AppInfoParser - Kafka commitId: fc1aaa116b661c8a
16:47:28,448 INFO org.apache.kafka.common.utils.AppInfoParser - Kafka startTimeMs: 1563029248441
16:47:28,451 INFO org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer09 - Trying to get partitions for topic test
16:47:28,775 INFO org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-1, groupId=flink_consumer] Cluster ID: 4rz71KZCS_CSasZMrFBNKw
16:47:29,858 INFO org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer09 - Got 1 partitions from these topics: [test]
16:47:29,859 INFO org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase - Consumer is going to read the following topics (with number of partitions):
Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/flink/api/java/operators/Keys
at org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.addSource(StreamExecutionEnvironment.java:994)
at org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.addSource(StreamExecutionEnvironment.java:955)
at myflink.ReadFromKafka.main(ReadFromKafka.java:43)
Caused by: java.lang.ClassNotFoundException: org.apache.flink.api.java.operators.Keys
at java.base/jdk.internal.loader.BuiltinClassLoader.loadClass(BuiltinClassLoader.java:583)
at java.base/jdk.internal.loader.ClassLoaders$AppClassLoader.loadClass(ClassLoaders.java:178)
at java.base/java.lang.ClassLoader.loadClass(ClassLoader.java:521)
... 3 more
Process finished with exit code 1
According to your stack-trace, java could not find a class.
Caused by: java.lang.ClassNotFoundException: org.apache.flink.api.java.operators.Keys
This class is in the flink-java_2.11 jar file which you might have missed from your dependencies.
https://www.javadoc.io/doc/org.apache.flink/flink-java_2.11/0.10.2

Quartz scheduler with jdbc : **job not saved in database**

I have used the JDBC job store technique for saving the jobs.
My quartz.properties file is here
==========================================
Configure Main Scheduler Properties
===========================================
org.quartz.scheduler.instanceName = ScheduleReport
//its having a method scheduler.scheduleJob(jobDetail, simpleTrigger);
org.quartz.scheduler.instanceId = AUTO
============================================
Configure ThreadPool
============================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
============================================
Configure JobStore
=============================================
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.oracle.OracleDelegate
org.quartz.jobStore.useProperties = true
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.clusterCheckinInterval = 20000
org.quartz.jobStore.dataSource = qzDS
org.quartz.dataSource.qzDS.driver = oracle.jdbc.driver.OracleDriver
org.quartz.dataSource.qzDS.URL = jdbc:oracle:thin:#localhost:1521:abc
org.quartz.dataSource.qzDS.user = system
org.quartz.dataSource.qzDS.password = abc
org.quartz.dataSource.qzDS.maxConnections = 30
Java file implements jobs :
public class ScheduleReport implements Job {
public void execute(JobExecutionContext context) throws JobExecutionException {
JobDataMap dataMap = context.getJobDetail().getJobDataMap();
exportReportAsType();
}
}
Another java file with a schedule job :
JobDetail jobweek = JobBuilder.newJob(ScheduleReport.class)
.withIdentity(jobname+"_"+jobid, "week").build();
jobweek.getJobDataMap().put("reportid", reportid);
jobweek.getJobDataMap().put("jobid", jobid);
jobweek.getJobDataMap().put("reportname", reportname);
jobweek.getJobDataMap().put("dateTime", dateTime);
jobweek.getJobDataMap().put("dateTimeType", sdf1.parse(dateTime));
jobweek.getJobDataMap().put("schedulerService",
schedulerService);
CronTrigger trigger3 = TriggerBuilder
.newTrigger()
.withIdentity(jobname+"_"+jobid, "week")
.startAt(sdf1.parse(dateTime))
.withSchedule(CronScheduleBuilder.cronSchedule(weekSch))
.endAt(sdf1.parse(enddateTime)).build();
// scheduler.start();
scheduler.scheduleJob(jobweek, trigger3);
This is my configuration but I'm not able to store jobs in DB. When I saw the table in db QRTZ_LOCKS & QRTZ_SCHEDULER_STATE only contain data and other tables are blank.

Why The ProcessExecutionEngine always sends null inputs parameters to the web service?

I'm trying to execute a web service based on its owls description file using owls-api-3.1.
The web service is a simple jax-ws service deployed using grizzly, the owls file is generated using the WSDL2OWLS class found in the code examples (downloaded and extracted from the src jar), the code is hosted in this github repository.
(the web service is well tested using soapUI)
The web service definition
#WebService(serviceName = "Hello", targetNamespace = HelloService.WSDL_FILE)
public class HelloService {
public static final String ROUTE = "/hello";
public static final String OWLS_FILE = Bootstrap.OWLS_DIR + "/hello.owl";
public static final String WSDL_FILE = "HTTP://127.0.0.1/hello?wsdl";
/**
* This is a sample web service operation
*
* #param name
* #return
*/
#WebMethod(operationName = "hello")
public String hello(#WebParam(name = "name") String name) {
return "Hello " + name;
}
}
The web service deployment
HttpServer httpServer = new HttpServer();
NetworkListener networkListener = new NetworkListener("grizzly", "0.0.0.0", 8080);
httpServer.addListener(networkListener);
httpServer.getServerConfiguration().addHttpHandler(new CLStaticHttpHandler(Bootstrap.class.getClassLoader(), "static/"), "/");
httpServer.getServerConfiguration().addHttpHandler(new JaxwsHandler(new HelloService()), HelloService.ROUTE);
httpServer.start();
Thread.sleep(2 * 1000); // The services are up and running
System.out.println(" --- OWLS client --- ");
new HelloServiceOWLSClient().start();
Thread.currentThread().join();
The OWLS client
public class HelloServiceOWLSClient {
private static final Logger LOG = Logger.getLogger(HelloServiceOWLSClient.class.getName());
public void start() {
try {
OWLKnowledgeBase kb = OWLFactory.createKB();
Service service = kb.readService(URI.create(HelloService.OWLS_FILE));
Process process = service.getProcess();
ProcessExecutionEngine executionEngine = OWLSFactory.createExecutionEngine();
ValueMap<Input, OWLValue> inputs = new ValueMap<>();
inputs.setValue(process.getInput("name"), kb.createDataValue("tarrsalah"));
LOG.log(Level.INFO, inputs.debugString());
ValueMap<Output, OWLValue> outputs = executionEngine.execute(process, inputs, kb);
LOG.log(Level.INFO, outputs.debugString());
} catch (IOException | ExecutionException ex) {
LOG.log(Level.SEVERE, ex.toString());
} finally {
}
}
}
The complete stack trace
May 25, 2014 1:34:33 AM org.glassfish.grizzly.http.server.NetworkListener start
INFO: Started listener bound to [0.0.0.0:8080]
May 25, 2014 1:34:34 AM org.glassfish.grizzly.http.server.HttpServer start
INFO: [HttpServer] Started.
--- OWLS client ---
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/Service.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/Profile.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/ActorDefault.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/ServiceParameter.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/ServiceCategory.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/Process.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/generic/ObjectList.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/generic/Expression.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://www.daml.org/services/owl-s/1.2/Grounding.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://on.cs.unibas.ch/owl-s/1.2/MoreGroundings.owl# ...
INFO [org.tarrsalah.owls.examples.Bootstrap.main()] (Vocabulary.java:118) - Loading ontology http://on.cs.unibas.ch/owl-s/1.2/FLAService.owl# ...
May 25, 2014 1:34:38 AM org.tarrsalah.owls.examples.HelloServiceOWLSClient start
INFO: (name = tarrsalah)
May 25, 2014 1:34:39 AM org.tarrsalah.owls.examples.HelloServiceOWLSClient start
INFO: (return = Hello null)
-----
In the last line, I was expected Hello tarrsalah instead of Hello null
The complete owls file generated
<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:grounding="http://www.daml.org/services/owl-s/1.2/Grounding.owl#"
xmlns="http://www.example.org/service.owl"
xmlns:owl="http://www.w3.org/2002/07/owl#"
xmlns:list="http://www.daml.org/services/owl-s/1.2/generic/ObjectList.owl#"
xmlns:expr="http://www.daml.org/services/owl-s/1.2/generic/Expression.owl#"
xmlns:swrl="http://www.w3.org/2003/11/swrl#"
xmlns:service="http://www.daml.org/services/owl-s/1.2/Service.owl#"
xmlns:profile="http://www.daml.org/services/owl-s/1.2/Profile.owl#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:process="http://www.daml.org/services/owl-s/1.2/Process.owl#"
xmlns:xsd="http://www.w3.org/2001/XMLSchema#"
xml:base="http://www.example.org/service.owl">
<owl:Ontology rdf:about="">
<owl:imports rdf:resource="http://www.daml.org/services/owl-s/1.2/Grounding.owl"/>
<owl:imports rdf:resource="http://www.daml.org/services/owl-s/1.2/Profile.owl"/>
</owl:Ontology>
<service:Service rdf:ID="helloService">
<service:supports>
<grounding:WsdlGrounding rdf:ID="helloGrounding"/>
</service:supports>
<service:describedBy>
<process:AtomicProcess rdf:ID="helloProcess"/>
</service:describedBy>
<service:presents>
<profile:Profile rdf:ID="helloProfile"/>
</service:presents>
</service:Service>
<profile:Profile rdf:about="#helloProfile">
<profile:hasOutput>
<process:Output rdf:ID="return">
<process:parameterType rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>http://www.w3.org/2001/XMLSchema#string</process:parameterType>
<rdfs:label>return</rdfs:label>
</process:Output>
</profile:hasOutput>
<profile:hasInput>
<process:Input rdf:ID="name">
<process:parameterType rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>http://www.w3.org/2001/XMLSchema#string</process:parameterType>
<rdfs:label>name</rdfs:label>
</process:Input>
</profile:hasInput>
<profile:textDescription>Auto generated from HTTP://127.0.0.1/hello?wsdl</profile:textDescription>
<profile:serviceName>hello</profile:serviceName>
<service:presentedBy rdf:resource="#helloService"/>
</profile:Profile>
<process:AtomicProcess rdf:about="#helloProcess">
<process:hasOutput rdf:resource="#return"/>
<process:hasInput rdf:resource="#name"/>
<service:describes rdf:resource="#helloService"/>
<rdfs:label>helloProcess</rdfs:label>
</process:AtomicProcess>
<grounding:WsdlGrounding rdf:about="#helloGrounding">
<grounding:hasAtomicProcessGrounding>
<grounding:WsdlAtomicProcessGrounding rdf:ID="helloAtomicProcessGrounding"/>
</grounding:hasAtomicProcessGrounding>
<service:supportedBy rdf:resource="#helloService"/>
</grounding:WsdlGrounding>
<grounding:WsdlAtomicProcessGrounding rdf:about="#helloAtomicProcessGrounding">
<grounding:wsdlOutput>
<grounding:WsdlOutputMessageMap>
<grounding:wsdlMessagePart rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>HTTP://127.0.0.1/hello?wsdl#return</grounding:wsdlMessagePart>
<grounding:owlsParameter rdf:resource="#return"/>
</grounding:WsdlOutputMessageMap>
</grounding:wsdlOutput>
<grounding:wsdlInput>
<grounding:WsdlInputMessageMap>
<grounding:wsdlMessagePart rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>HTTP://127.0.0.1/hello?wsdl#name</grounding:wsdlMessagePart>
<grounding:owlsParameter rdf:resource="#name"/>
</grounding:WsdlInputMessageMap>
</grounding:wsdlInput>
<grounding:wsdlOutputMessage rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>http://127.0.0.1/hello?wsdl#helloResponse</grounding:wsdlOutputMessage>
<grounding:wsdlInputMessage rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>http://127.0.0.1/hello?wsdl#hello</grounding:wsdlInputMessage>
<grounding:wsdlDocument rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>HTTP://127.0.0.1/hello?wsdl</grounding:wsdlDocument>
<grounding:wsdlOperation>
<grounding:WsdlOperationRef>
<grounding:operation rdf:datatype="http://www.w3.org/2001/XMLSchema#anyURI"
>HTTP://127.0.0.1/hello?wsdl#hello</grounding:operation>
</grounding:WsdlOperationRef>
</grounding:wsdlOperation>
<grounding:owlsProcess rdf:resource="#helloProcess"/>
</grounding:WsdlAtomicProcessGrounding>
</rdf:RDF>
It seems that axis 1.4 use rpc soap binding to call a web service, Setting The SOAPBinding to Style.RPC in the web service declaration solves the problem.
WebService(serviceName = "Hello", targetNamespace = "http://127.0.0.1/hello")
#SOAPBinding(style = Style.RPC)
public class HelloService {
public static final String ROUTE = "/hello";
public static final String OWLS_FILE = Bootstrap.OWLS_DIR + "/hello.owl";
public static final String WSDL_FILE = "http://127.0.0.1/hello?wsdl";
/**
* This is a sample web service operation
*
* #param name
* #return
*/
#WebMethod(operationName = "hello")
#WebResult(name="greeting")
public String hello(#WebParam(name = "name") String name) {
return "Hello " + name;
}
}

Categories

Resources