-
Notifications
You must be signed in to change notification settings - Fork 3.3k
Support timeout job #1923
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Support timeout job #1923
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -24,19 +24,24 @@ | |||||||||
| import org.apache.shardingsphere.elasticjob.executor.context.ExecutorContext; | ||||||||||
| import org.apache.shardingsphere.elasticjob.executor.item.JobItemExecutor; | ||||||||||
| import org.apache.shardingsphere.elasticjob.executor.item.JobItemExecutorFactory; | ||||||||||
| import org.apache.shardingsphere.elasticjob.infra.concurrent.BlockUtils; | ||||||||||
| import org.apache.shardingsphere.elasticjob.infra.env.IpUtils; | ||||||||||
| import org.apache.shardingsphere.elasticjob.infra.exception.ExceptionUtils; | ||||||||||
| import org.apache.shardingsphere.elasticjob.infra.exception.JobExecutionEnvironmentException; | ||||||||||
| import org.apache.shardingsphere.elasticjob.infra.listener.ShardingContexts; | ||||||||||
| import org.apache.shardingsphere.elasticjob.tracing.event.JobExecutionEvent; | ||||||||||
| import org.apache.shardingsphere.elasticjob.tracing.event.JobExecutionEvent.ExecutionSource; | ||||||||||
| import org.apache.shardingsphere.elasticjob.tracing.event.JobStatusTraceEvent.State; | ||||||||||
|
|
||||||||||
| import java.util.Collection; | ||||||||||
| import java.util.LinkedList; | ||||||||||
| import java.util.Map; | ||||||||||
| import java.util.Queue; | ||||||||||
| import java.util.concurrent.CancellationException; | ||||||||||
| import java.util.concurrent.ConcurrentHashMap; | ||||||||||
| import java.util.concurrent.CountDownLatch; | ||||||||||
| import java.util.concurrent.ExecutionException; | ||||||||||
| import java.util.concurrent.ExecutorService; | ||||||||||
| import java.util.concurrent.Future; | ||||||||||
|
|
||||||||||
| /** | ||||||||||
| * ElasticJob executor. | ||||||||||
|
|
@@ -136,27 +141,52 @@ private void execute(final JobConfiguration jobConfig, final ShardingContexts sh | |||||||||
|
|
||||||||||
| private void process(final JobConfiguration jobConfig, final ShardingContexts shardingContexts, final ExecutionSource executionSource) { | ||||||||||
| Collection<Integer> items = shardingContexts.getShardingItemParameters().keySet(); | ||||||||||
| if (1 == items.size()) { | ||||||||||
| int item = shardingContexts.getShardingItemParameters().keySet().iterator().next(); | ||||||||||
| JobExecutionEvent jobExecutionEvent = new JobExecutionEvent(IpUtils.getHostName(), IpUtils.getIp(), shardingContexts.getTaskId(), jobConfig.getJobName(), executionSource, item); | ||||||||||
| process(jobConfig, shardingContexts, item, jobExecutionEvent); | ||||||||||
| return; | ||||||||||
| } | ||||||||||
| Queue<Future<Boolean>> futures = new LinkedList<>(); | ||||||||||
| CountDownLatch latch = new CountDownLatch(items.size()); | ||||||||||
| for (int each : items) { | ||||||||||
| JobExecutionEvent jobExecutionEvent = new JobExecutionEvent(IpUtils.getHostName(), IpUtils.getIp(), shardingContexts.getTaskId(), jobConfig.getJobName(), executionSource, each); | ||||||||||
| ExecutorService executorService = executorContext.get(ExecutorService.class); | ||||||||||
| if (executorService.isShutdown()) { | ||||||||||
| return; | ||||||||||
| } | ||||||||||
| executorService.submit(() -> { | ||||||||||
| Future<Boolean> future = executorService.submit(() -> { | ||||||||||
| try { | ||||||||||
| process(jobConfig, shardingContexts, each, jobExecutionEvent); | ||||||||||
| } finally { | ||||||||||
| latch.countDown(); | ||||||||||
| } | ||||||||||
| return true; | ||||||||||
| }); | ||||||||||
| futures.offer(future); | ||||||||||
| } | ||||||||||
|
|
||||||||||
| int sumWaitTime = 0; | ||||||||||
| int maxRunTime = jobConfig.getMaxRunTimeSeconds() > 0 ? jobConfig.getMaxRunTimeSeconds() * 1000 : 0; | ||||||||||
| while (!futures.isEmpty()) { | ||||||||||
| //waiting process Done | ||||||||||
|
||||||||||
| //waiting process Done | |
| // Waiting for process to complete |
Copilot
AI
Sep 6, 2025
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The magic number 100 (milliseconds) is hardcoded and used in multiple places. Consider extracting it as a named constant like TIMEOUT_CHECK_INTERVAL_MILLIS = 100 to improve maintainability.
Copilot
AI
Sep 6, 2025
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Comment has grammatical errors. Should be 'ignore process exceptions and cancellations for futures' and 'TODO: Consider tracking job failure status and how to handle overall job failure loops'.
| // ignore process Exception and Canceled for future | |
| // TODO Consider increasing the status of job failure, and how to handle the overall loop of job failure | |
| // ignore process exceptions and cancellations for futures | |
| // TODO: Consider tracking job failure status and how to handle overall job failure loops |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The comment has grammatical errors. Should be 'If job runs for a long time more than it, interrupting will be enabled.' Also, the documentation is inconsistent - it says '0 means no timeout' but the default value is -1.