|
24 | 24 | package com.dtolabs.rundeck.plugin.resources.ec2;
|
25 | 25 |
|
26 | 26 | import com.amazonaws.services.ec2.AmazonEC2;
|
| 27 | +import com.amazonaws.services.ec2.AmazonEC2Client; |
27 | 28 | import com.amazonaws.services.ec2.model.*;
|
28 | 29 | import com.dtolabs.rundeck.core.common.INodeEntry;
|
29 | 30 | import com.dtolabs.rundeck.core.common.NodeEntryImpl;
|
|
33 | 34 | import org.slf4j.LoggerFactory;
|
34 | 35 |
|
35 | 36 | import java.util.*;
|
| 37 | +import java.util.concurrent.*; |
36 | 38 | import java.util.regex.Matcher;
|
37 | 39 | import java.util.regex.Pattern;
|
38 | 40 | import java.util.stream.Collectors;
|
@@ -69,28 +71,67 @@ class InstanceToNodeMapper {
|
69 | 71 | * Perform the query and return the set of instances
|
70 | 72 | *
|
71 | 73 | */
|
72 |
| - public NodeSetImpl performQuery() { |
| 74 | + public NodeSetImpl performQuery(boolean queryNodeInstancesInParallel) { |
73 | 75 | final NodeSetImpl nodeSet = new NodeSetImpl();
|
74 | 76 |
|
75 | 77 | Set<Instance> instances = new HashSet<>();
|
76 | 78 |
|
77 | 79 | DescribeInstancesRequest request = new DescribeInstancesRequest().withFilters(buildFilters()).withMaxResults(maxResults);
|
78 | 80 |
|
79 | 81 | if(getEndpoint() != null) {
|
80 |
| - for (String endpoint : determineEndpoints()) { |
81 |
| - AmazonEC2 ec2 = ec2Supplier.getEC2ForEndpoint(endpoint); |
82 |
| - zones = ec2.describeAvailabilityZones(); |
83 |
| - |
84 |
| - final Set<Instance> newInstances = addExtraMappingAttribute(ec2, query(ec2, request)); |
85 |
| - |
86 |
| - if (newInstances != null && !newInstances.isEmpty()) { |
87 |
| - instances.addAll(newInstances); |
| 82 | + ExecutorService executor = null; |
| 83 | + Collection<Future<Set<Instance>>> futures = new LinkedList<Future<Set<Instance>>>(); |
| 84 | + Set<Callable<Set<Instance>>> tasks = new HashSet<>(); |
| 85 | + List<String> endpoints = determineEndpoints(); |
| 86 | + for (String endpoint : endpoints) { |
| 87 | + if(queryNodeInstancesInParallel) { |
| 88 | + if(executor == null){ |
| 89 | + logger.info("Creating thread pool for {} regions", endpoints.size() ); |
| 90 | + executor = Executors.newFixedThreadPool(endpoints.size()); |
| 91 | + } |
| 92 | + tasks.add(new Callable<Set<Instance>>() { |
| 93 | + @Override |
| 94 | + public Set<Instance> call() throws Exception { |
| 95 | + return getInstancesByRegion(endpoint); |
| 96 | + }; |
| 97 | + }); |
| 98 | + }else{ |
| 99 | + instances.addAll(getInstancesByRegion(endpoint)); |
| 100 | + } |
| 101 | + } |
| 102 | + if(queryNodeInstancesInParallel) { |
| 103 | + try { |
| 104 | + logger.info("Querying {} regions in parallel", endpoints.size() ); |
| 105 | + futures = executor.invokeAll(tasks); |
| 106 | + } catch (InterruptedException e) { |
| 107 | + throw new RuntimeException(e); |
| 108 | + } finally { |
| 109 | + try { |
| 110 | + for (Future<Set<Instance>> future : futures) { |
| 111 | + if (future != null) { |
| 112 | + instances.addAll(future.get()); |
| 113 | + } |
| 114 | + } |
| 115 | + logger.info("Finished querying {} regions in parallel", endpoints.size() ); |
| 116 | + executor.shutdown(); |
| 117 | + } catch (InterruptedException e) { |
| 118 | + throw new RuntimeException(e); |
| 119 | + } catch (ExecutionException e) { |
| 120 | + throw new RuntimeException(e); |
| 121 | + } |
88 | 122 | }
|
89 |
| - |
90 | 123 | try {
|
91 |
| - Thread.sleep(100); |
92 |
| - } catch (InterruptedException ex) { |
| 124 | + // Wait for 90 seconds for all tasks to finish |
| 125 | + logger.info("Waiting for {} seconds for all tasks to finish", 90); |
| 126 | + executor.awaitTermination(90, TimeUnit.SECONDS); |
| 127 | + } catch (InterruptedException ignored) { |
| 128 | + // Restore interrupted status |
| 129 | + logger.warn("Thread interrupted while waiting for tasks to finish", ignored); |
93 | 130 | Thread.currentThread().interrupt();
|
| 131 | + } finally { |
| 132 | + // Force shutdown if not already done |
| 133 | + logger.warn("Forcing shutdown of thread pool"); |
| 134 | + executor.shutdownNow(); |
94 | 135 | }
|
95 | 136 | }
|
96 | 137 | }
|
@@ -137,6 +178,21 @@ private List<String> determineEndpoints() {
|
137 | 178 | return endpoints;
|
138 | 179 | }
|
139 | 180 |
|
| 181 | + private Set<Instance> getInstancesByRegion(String endpoint) { |
| 182 | + Set<Instance> allInstances = new HashSet<>(); |
| 183 | + AmazonEC2 ec2 = ec2Supplier.getEC2ForEndpoint(endpoint); |
| 184 | + zones = ec2.describeAvailabilityZones(); |
| 185 | + final ArrayList<Filter> filters = buildFilters(); |
| 186 | + |
| 187 | + final Set<Instance> newInstances = addExtraMappingAttribute(ec2, query(ec2, new DescribeInstancesRequest().withFilters(filters).withMaxResults(maxResults))); |
| 188 | + |
| 189 | + if (!newInstances.isEmpty() && newInstances != null) { |
| 190 | + allInstances.addAll(newInstances); |
| 191 | + } |
| 192 | + |
| 193 | + return allInstances; |
| 194 | + } |
| 195 | + |
140 | 196 | private Set<Instance> query(final AmazonEC2 ec2, final DescribeInstancesRequest request) {
|
141 | 197 | //create "running" filter
|
142 | 198 | final Set<Instance> instances = new HashSet<>();
|
|
0 commit comments