Skip to content

Commit b32581a

Browse files
committed
gh-4610 Fix expected.yml and CS
1 parent 1fc7bc9 commit b32581a

File tree

6 files changed

+72
-52
lines changed

6 files changed

+72
-52
lines changed

stroom-config/stroom-config-app/src/test/resources/stroom/config/app/expected.yaml

+2
Original file line numberDiff line numberDiff line change
@@ -607,6 +607,8 @@ appConfig:
607607
refreshAfterWrite: null
608608
statisticsMode: "INTERNAL"
609609
referenceData:
610+
autoPurgeEnabled: true
611+
compactAfterPurgeEnabled: false
610612
effectiveStreamCache:
611613
expireAfterAccess: null
612614
expireAfterWrite: "PT10M"

stroom-lmdb/src/main/java/stroom/lmdb/LmdbEnv.java

+15-13
Original file line numberDiff line numberDiff line change
@@ -607,29 +607,31 @@ private void compact(final long currSizeOnDisk) {
607607
tempEnvDir = Files.createTempDirectory(localDir, "lmdb-env-clone-");
608608
LOGGER.info("Starting compacting copy of LMDB env '{}' from {} to {}, current size on disk: {}",
609609
name, localDir, tempEnvDir, ByteSize.ofBytes(currSizeOnDisk));
610-
DurationTimer timer = DurationTimer.start();
611-
// Copy the whole env, compacting as it goes
612-
env.copy(tempEnvDir.toFile(), CopyFlags.MDB_CP_COMPACT);
613-
// Close the source env and its dbis
614-
close();
615-
// Delete the source
616-
deleteEnvFiles(localDir);
617-
// Move temp env back to source
618-
moveEnvFiles(tempEnvDir, localDir);
619-
// Open it back up
620-
reOpenEnv();
610+
final Path finalTempEnvDir = tempEnvDir;
611+
final Duration compactDuration = DurationTimer.measure(() -> {
612+
// Copy the whole env, compacting as it goes
613+
env.copy(finalTempEnvDir.toFile(), CopyFlags.MDB_CP_COMPACT);
614+
// Close the source env and its dbis
615+
close();
616+
// Delete the source
617+
deleteEnvFiles(localDir);
618+
// Move temp env back to source
619+
moveEnvFiles(finalTempEnvDir, localDir);
620+
// Open it back up
621+
reOpenEnv();
622+
});
621623

622624
long newSizeOnDisk = getSizeOnDisk();
623625
double pct = ((1 - (((double) newSizeOnDisk) / currSizeOnDisk))) * 100;
624-
DecimalFormat decimalFormat = new DecimalFormat("0.0");
626+
final DecimalFormat decimalFormat = new DecimalFormat("0.0");
625627
// 0% bad, 100% good
626628
LOGGER.info(
627629
"Completed compacting copy of LMDB env '{}' from {} to {} in {}, " +
628630
"size on disk: {} => {} (compaction {}%)",
629631
name,
630632
localDir,
631633
tempEnvDir,
632-
timer,
634+
compactDuration,
633635
ByteSize.ofBytes(currSizeOnDisk),
634636
ByteSize.ofBytes(newSizeOnDisk),
635637
decimalFormat.format(pct));

stroom-lmdb/src/test/java/stroom/lmdb/TestLmdbEnv.java

+12-11
Original file line numberDiff line numberDiff line change
@@ -116,20 +116,21 @@ void testSingleThreaded(@TempDir Path dbDir) {
116116

117117
// We have no semaphore protection for max readers so make sure LMDB throws and
118118
// doesn't seg fault.
119-
Assertions.assertThatThrownBy(() -> {
120-
lmdbEnv.doWithReadTxn(readTxn1 -> {
121-
lmdbEnv.doWithReadTxn(readTxn2 -> {
122-
lmdbEnv.doWithReadTxn(readTxn3 -> {
123-
lmdbEnv.doWithReadTxn(readTxn4 -> {
124-
lmdbEnv.doWithReadTxn(readTxn5 -> {
125-
assertThat(db.get("bar"))
126-
.hasValue("BAR");
119+
Assertions.assertThatThrownBy(
120+
() -> {
121+
lmdbEnv.doWithReadTxn(readTxn1 -> {
122+
lmdbEnv.doWithReadTxn(readTxn2 -> {
123+
lmdbEnv.doWithReadTxn(readTxn3 -> {
124+
lmdbEnv.doWithReadTxn(readTxn4 -> {
125+
lmdbEnv.doWithReadTxn(readTxn5 -> {
126+
assertThat(db.get("bar"))
127+
.hasValue("BAR");
128+
});
129+
});
127130
});
128131
});
129132
});
130-
});
131-
});
132-
})
133+
})
133134
.isInstanceOf(RuntimeException.class)
134135
.hasMessageContaining("maxreaders reached");
135136
}

stroom-pipeline/src/main/java/stroom/pipeline/refdata/ReferenceDataConfig.java

+24-19
Original file line numberDiff line numberDiff line change
@@ -81,27 +81,30 @@ public ReferenceDataConfig(@JsonProperty("maxPutsBeforeCommit") final int maxPut
8181
}
8282

8383
@Min(0)
84-
@JsonPropertyDescription("The maximum number of puts into the store (in a single load) before the " +
85-
"transaction is committed. There is only one write transaction available at a time so reducing " +
86-
"this value allows multiple loads to potentially each load a chunk at a time. However, load times " +
87-
"increase rapidly with values below around 2,000. For maximum performance of a single load set this " +
88-
"value to 0 to only commit at the very end of the load.")
84+
@JsonPropertyDescription(
85+
"The maximum number of puts into the store (in a single load) before the " +
86+
"transaction is committed. There is only one write transaction available at a time so reducing " +
87+
"this value allows multiple loads to potentially each load a chunk at a time. However, load times " +
88+
"increase rapidly with values below around 2,000. For maximum performance of a single load set this " +
89+
"value to 0 to only commit at the very end of the load.")
8990
public int getMaxPutsBeforeCommit() {
9091
return maxPutsBeforeCommit;
9192
}
9293

9394
@Min(0)
94-
@JsonPropertyDescription("The maximum number of entries in one reference stream to purge before the " +
95-
"transaction is committed. A value high enough to purge all entries in one transaction is " +
96-
"preferable but for large reference streams this may result in errors due to the transaction " +
97-
"being too large.")
95+
@JsonPropertyDescription(
96+
"The maximum number of entries in one reference stream to purge before the " +
97+
"transaction is committed. A value high enough to purge all entries in one transaction is " +
98+
"preferable but for large reference streams this may result in errors due to the transaction " +
99+
"being too large.")
98100
public int getMaxPurgeDeletesBeforeCommit() {
99101
return maxPurgeDeletesBeforeCommit;
100102
}
101103

102-
@JsonPropertyDescription("The time to retain reference data for in the off heap store. The time is taken " +
103-
"from the time that the reference stream was last accessed, e.g. a lookup was made against it. " +
104-
"In ISO-8601 duration format, e.g. 'P1DT12H'. Used by job '" + RefDataPurge.JOB_NAME + "'.")
104+
@JsonPropertyDescription(
105+
"The time to retain reference data for in the off heap store. The time is taken " +
106+
"from the time that the reference stream was last accessed, e.g. a lookup was made against it. " +
107+
"In ISO-8601 duration format, e.g. 'P1DT12H'. Used by job '" + RefDataPurge.JOB_NAME + "'.")
105108
public StroomDuration getPurgeAge() {
106109
return purgeAge;
107110
}
@@ -111,19 +114,21 @@ public boolean isAutoPurgeEnabled() {
111114
return autoPurgeEnabled;
112115
}
113116

114-
@JsonPropertyDescription("If true a compaction process will be run after a successful purge to free " +
115-
"up disk space. If compaction is not run, space will be freed up inside the store for " +
116-
"future loads of that feed, but disk space will not be freed up. For compaction to " +
117-
"work, property lmdb.readerBlockedByWriter must also be set to true.")
117+
@JsonPropertyDescription(
118+
"If true a compaction process will be run after a successful purge to free " +
119+
"up disk space. If compaction is not run, space will be freed up inside the store for " +
120+
"future loads of that feed, but disk space will not be freed up. For compaction to " +
121+
"work, property lmdb.readerBlockedByWriter must also be set to true.")
118122
public boolean isCompactAfterPurgeEnabled() {
119123
return compactAfterPurgeEnabled;
120124
}
121125

122126
@Min(2)
123127
@RequiresRestart(RestartScope.SYSTEM)
124-
@JsonPropertyDescription("The number of lock stripes used for preventing multiple pipeline processes " +
125-
"from loading the same reference stream at the same time. Values should be a power of 2. " +
126-
"Lower values will mean it is more likely for two different streams from blocking one another.")
128+
@JsonPropertyDescription(
129+
"The number of lock stripes used for preventing multiple pipeline processes " +
130+
"from loading the same reference stream at the same time. Values should be a power of 2. " +
131+
"Lower values will mean it is more likely for two different streams from blocking one another.")
127132
public int getLoadingLockStripes() {
128133
return loadingLockStripes;
129134
}

stroom-pipeline/src/main/java/stroom/pipeline/refdata/ReferenceDataResource.java

+6-5
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,9 @@ public interface ReferenceDataResource extends RestResource {
4646
@Path(ENTRIES_SUB_PATH)
4747
@Operation(
4848
summary = "List entries from the reference data store on the node called.",
49-
description = "This is primarily intended for small scale debugging in non-production environments. If " +
50-
"no limit is set a default limit is applied else the results will be limited to limit entries.",
49+
description = "This is primarily intended for small scale debugging in non-production " +
50+
"environments. If no limit is set a default limit is applied else the results " +
51+
"will be limited to limit entries.",
5152
operationId = "getReferenceStoreEntries")
5253
List<RefStoreEntry> entries(@QueryParam("limit") final Integer limit,
5354
@QueryParam("refStreamId") final Long refStreamId,
@@ -57,9 +58,9 @@ List<RefStoreEntry> entries(@QueryParam("limit") final Integer limit,
5758
@Path(REF_STREAM_INFO_SUB_PATH)
5859
@Operation(
5960
summary = "List processing info entries for all ref streams",
60-
description = "This is primarily intended for small scale debugging in non-production environments. If " +
61-
"no limit is set a default limit is applied else the results will be limited to limit entries. " +
62-
"Performed on this node only.",
61+
description = "This is primarily intended for small scale debugging in non-production " +
62+
"environments. If no limit is set a default limit is applied else the results " +
63+
"will be limited to limit entries. Performed on this node only.",
6364
operationId = "getReferenceStreamProcessingInfoEntries")
6465
List<ProcessingInfoResponse> refStreamInfo(@QueryParam("limit") final Integer limit,
6566
@QueryParam("refStreamId") final Long refStreamId,

stroom-pipeline/src/main/java/stroom/pipeline/refdata/store/offheapstore/OffHeapStoreInfo.java

+13-4
Original file line numberDiff line numberDiff line change
@@ -137,10 +137,19 @@ public boolean equals(final Object o) {
137137
return false;
138138
}
139139
final OffHeapStoreInfo that = (OffHeapStoreInfo) o;
140-
return sizeOnDisk == that.sizeOnDisk && sizeInUse == that.sizeInUse && osBytesFree == that.osBytesFree && osBytesTotal == that.osBytesTotal && keyValueEntries == that.keyValueEntries && rangeValueEntries == that.rangeValueEntries && streamCount == that.streamCount && distinctValuesCount == that.distinctValuesCount && infoSnapshotEpochMs == that.infoSnapshotEpochMs && Objects.equals(
141-
storeName,
142-
that.storeName) && Objects.equals(nodeName, that.nodeName) && Objects.equals(feedName,
143-
that.feedName) && Objects.equals(localDir, that.localDir);
140+
return sizeOnDisk == that.sizeOnDisk
141+
&& sizeInUse == that.sizeInUse
142+
&& osBytesFree == that.osBytesFree
143+
&& osBytesTotal == that.osBytesTotal
144+
&& keyValueEntries == that.keyValueEntries
145+
&& rangeValueEntries == that.rangeValueEntries
146+
&& streamCount == that.streamCount
147+
&& distinctValuesCount == that.distinctValuesCount
148+
&& infoSnapshotEpochMs == that.infoSnapshotEpochMs
149+
&& Objects.equals(storeName, that.storeName)
150+
&& Objects.equals(nodeName, that.nodeName)
151+
&& Objects.equals(feedName, that.feedName)
152+
&& Objects.equals(localDir, that.localDir);
144153
}
145154

146155
@Override

0 commit comments

Comments
 (0)