11
11
import stroom .index .shared .LuceneIndexDoc ;
12
12
import stroom .node .api .NodeInfo ;
13
13
import stroom .security .api .SecurityContext ;
14
+ import stroom .util .NullSafe ;
14
15
import stroom .util .logging .LambdaLogger ;
15
16
import stroom .util .logging .LambdaLoggerFactory ;
17
+ import stroom .util .logging .LogUtil ;
16
18
import stroom .util .shared .ResultPage ;
17
19
18
20
import jakarta .inject .Inject ;
21
23
22
24
import java .io .UncheckedIOException ;
23
25
import java .util .ArrayList ;
26
+ import java .util .EnumSet ;
24
27
import java .util .List ;
25
28
import java .util .concurrent .atomic .AtomicInteger ;
26
29
import java .util .concurrent .locks .ReentrantLock ;
@@ -78,6 +81,7 @@ private ActiveShards create(final IndexShardKey indexShardKey) {
78
81
throw new IndexException ("Unable to find index with UUID: " + indexShardKey .getIndexUuid ());
79
82
}
80
83
84
+ LOGGER .debug ("Creating ActiveShards for node: {}, indexShardKey: {}" , nodeInfo , indexShardKey );
81
85
return new ActiveShards (
82
86
nodeInfo ,
83
87
indexShardWriterCache ,
@@ -89,9 +93,24 @@ private ActiveShards create(final IndexShardKey indexShardKey) {
89
93
});
90
94
}
91
95
96
+
97
+ // --------------------------------------------------------------------------------
98
+
99
+
92
100
public static class ActiveShards {
93
101
94
- private static final int MAX_ATTEMPTS = 10000 ;
102
+ private static final LambdaLogger LOGGER = LambdaLoggerFactory .getLogger (ActiveShards .class );
103
+
104
+ // All shards are CLOSED on boot, but if this cache is cleared or items age off
105
+ // then we may shards in other states.
106
+ private static final EnumSet <IndexShardStatus > REQUIRED_SHARD_STATES = EnumSet .of (
107
+ IndexShardStatus .NEW ,
108
+ IndexShardStatus .OPEN ,
109
+ IndexShardStatus .OPENING ,
110
+ IndexShardStatus .CLOSED ,
111
+ IndexShardStatus .CLOSING );
112
+
113
+ private static final int MAX_ATTEMPTS = 10_000 ;
95
114
96
115
private final NodeInfo nodeInfo ;
97
116
private final IndexShardWriterCache indexShardWriterCache ;
@@ -130,6 +149,7 @@ public void addDocument(final IndexDocument document) {
130
149
131
150
// Attempt under lock if we failed to add.
132
151
if (!success ) {
152
+ LOGGER .debug ("Trying again under lock" );
133
153
// If we failed then try under lock to make sure we get a new writer.
134
154
addDocumentUnderLock (document );
135
155
}
@@ -220,13 +240,16 @@ private boolean addDocument(final IndexDocument document,
220
240
221
241
private List <IndexShard > getIndexShards () {
222
242
List <IndexShard > indexShards = this .indexShards ;
243
+
223
244
if (indexShards .size () < shardsPerPartition ) {
224
245
indexShards = ensureShards ();
225
246
}
226
247
return indexShards ;
227
248
}
228
249
229
250
private synchronized List <IndexShard > ensureShards () {
251
+ LOGGER .debug (() -> LogUtil .message (
252
+ "ensureShards, indexShards size before: {}" , NullSafe .size (indexShards )));
230
253
List <IndexShard > list = indexShards ;
231
254
if (list .size () < shardsPerPartition ) {
232
255
list = new ArrayList <>(list );
@@ -235,10 +258,13 @@ private synchronized List<IndexShard> ensureShards() {
235
258
}
236
259
}
237
260
indexShards = list ;
261
+ LOGGER .debug (() -> LogUtil .message (
262
+ "ensureShards, indexShards size after: {}" , NullSafe .size (indexShards )));
238
263
return list ;
239
264
}
240
265
241
266
private synchronized void addActiveShard (final IndexShardKey indexShardKey ) {
267
+ LOGGER .debug ("Adding shard for key {}" , indexShardKey );
242
268
final IndexShard indexShard = createNewShard (indexShardKey );
243
269
final List <IndexShard > list = new ArrayList <>(indexShards );
244
270
list .add (indexShard );
@@ -263,21 +289,34 @@ private List<IndexShard> getExistingShards(final IndexShardKey indexShardKey) {
263
289
264
290
final List <IndexShard > indexShards = new ArrayList <>();
265
291
final ResultPage <IndexShard > indexShardResultPage = indexShardDao .find (criteria );
292
+ LOGGER .debug (() -> LogUtil .message (
293
+ "getExistingShards(), found {} un-filtered shards, maxDocsPerShard: {}" ,
294
+ NullSafe .getOrElse (indexShardResultPage , ResultPage ::size , 0 ),
295
+ maxDocsPerShard ));
266
296
for (final IndexShard indexShard : indexShardResultPage .getValues ()) {
267
297
// Look for non deleted, non-full, non-corrupt index shards.
268
- if (IndexShardStatus .CLOSED .equals (indexShard .getStatus ()) &&
269
- indexShard .getDocumentCount () < maxDocsPerShard ) {
298
+ final IndexShardStatus status = indexShard .getStatus ();
299
+ if (status != null
300
+ && REQUIRED_SHARD_STATES .contains (status )
301
+ && indexShard .getDocumentCount () < maxDocsPerShard ) {
270
302
indexShards .add (indexShard );
303
+ } else {
304
+ LOGGER .debug (() -> LogUtil .message ("Ignoring shard {} with status: {}, docCount: {}" ,
305
+ indexShard .getId (), status , indexShard .getDocumentCount ()));
271
306
}
272
307
}
308
+ LOGGER .debug (() -> LogUtil .message (
309
+ "getExistingShards(), indexShards size: {}" , NullSafe .size (indexShards )));
273
310
return indexShards ;
274
311
}
275
312
276
313
/**
277
314
* Creates a new index shard writer for the specified key and opens a writer for it.
278
315
*/
279
316
private IndexShard createNewShard (final IndexShardKey indexShardKey ) {
280
- return indexShardCreator .createIndexShard (indexShardKey , nodeInfo .getThisNodeName ());
317
+ final String thisNodeName = nodeInfo .getThisNodeName ();
318
+ LOGGER .debug ("Creating shard for key {} on {}" , indexShardKey , thisNodeName );
319
+ return indexShardCreator .createIndexShard (indexShardKey , thisNodeName );
281
320
}
282
321
}
283
322
}
0 commit comments