Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .github/actions/sbt/upload_logs/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,16 @@ runs:
pigz -r log/* || true
fi

- name: Move potential additional debugging artifacts to log directory
shell: bash
run: |
mkdir -p log
# When Java crashes, it sometimes saves an error report such as hs_err_pid*.log to the working directory.
mv *.log log/ || true
# Possible locations of core dumps (/ is apparently a default, but who knows)
mv core* log/ || true
mv /core* log/ || true

# Certain characters are disallowed in artifact filenames in GHA, so we need to sanitize them
- name: Sanitize filenames
# Runs in nix to have access to `rename`
Expand Down
2 changes: 1 addition & 1 deletion LATEST_RELEASE
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.4.0
0.4.1
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.4.1
0.4.2
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,6 @@ class AppUpgradeIntegrationTest
spliceProcs.stopBundledSplice("sv2-node")
startAllSync(sv2Backend, sv2ScanBackend, sv2ValidatorBackend)
spliceProcs.stopBundledSplice("sv3-node")
// No scan for sv3
startAllSync(sv3Backend, sv3ScanBackend, sv3ValidatorBackend)
}

Expand Down Expand Up @@ -435,7 +434,7 @@ class AppUpgradeIntegrationTest

// SV4 can join after the upgrade.
clue("SV4 can join after upgrade") {
startAllSync(sv4Backend, sv4ValidatorBackend)
startAllSync(sv4Backend, sv4ScanBackend, sv4ValidatorBackend)
}

clue("Splitwell works") {
Expand Down Expand Up @@ -657,7 +656,8 @@ object AppUpgradeIntegrationTest {
"SV1_URL" -> "http://127.0.0.1:5114",
"SV1_SCAN_URL" -> "http://127.0.0.1:5012",
"SV2_SCAN_URL" -> "http://127.0.0.1:5112",
"SV3_SCAN_URL" -> "http://127.0.0.1:5112",
"SV3_SCAN_URL" -> "http://127.0.0.1:5212",
"SV4_SCAN_URL" -> "http://127.0.0.1:5312",
).!
if (result != 0) {
throw new RuntimeException(s"Command $cmd returned: $result")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1072,7 +1072,7 @@ class DecentralizedSynchronizerMigrationIntegrationTest

val backfilledUpdates =
sv1ScanLocalBackend.appState.store.updateHistory
.getUpdates(None, includeImportUpdates = true, PageLimit.tryCreate(1000))
.getAllUpdates(None, PageLimit.tryCreate(1000))
.futureValue
backfilledUpdates.collect {
case TreeUpdateWithMigrationId(tree, migrationId)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ class ScanHistoryBackfillingIntegrationTest

sv2ScanBackend.appState.store.updateHistory
.getBackfillingState()
.futureValue should be(BackfillingState.InProgress)
.futureValue should be(BackfillingState.InProgress(false, false))
sv2ScanBackend.getBackfillingStatus().complete shouldBe false
assertThrowsAndLogsCommandFailures(
readUpdateHistoryFromScan(sv2ScanBackend),
Expand Down Expand Up @@ -500,7 +500,7 @@ class ScanHistoryBackfillingIntegrationTest
private def allUpdatesFromScanBackend(scanBackend: ScanAppBackendReference) = {
// Need to use the store directly, as the HTTP endpoint refuses to return data unless it's completely backfilled
scanBackend.appState.store.updateHistory
.getUpdates(None, includeImportUpdates = true, PageLimit.tryCreate(1000))
.getAllUpdates(None, PageLimit.tryCreate(1000))
.futureValue
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import com.digitalasset.canton.topology.transaction.VettedPackage
import com.digitalasset.daml.lf.data.Ref.PackageId
import monocle.macros.syntax.lens.*
import org.lfdecentralizedtrust.splice.integration.plugins.TokenStandardCliSanityCheckPlugin
import org.lfdecentralizedtrust.splice.sv.config.SvOnboardingConfig.InitialPackageConfig
import org.slf4j.event.Level

import scala.math.Ordering.Implicits.*
Expand All @@ -53,7 +54,7 @@ class SvTimeBasedRewardCouponIntegrationTest
config
.focus(_.svApps)
.modify(_.map { case (name, svConfig) =>
// sv4 gives part of its reward to alice
// sv4 gives part of its reward to aliceValidator
val newConfig = if (name.unwrap == "sv4") {
val aliceParticipant =
ConfigTransforms
Expand Down Expand Up @@ -82,13 +83,41 @@ class SvTimeBasedRewardCouponIntegrationTest
_.withPausedTrigger[ReceiveFaucetCouponTrigger]
)(config)
)
.addConfigTransforms((_, config) =>
updateAutomationConfig(ConfigurableApp.Sv)(
// needs to be disabled until alice has vetted the latest packages (to be checked at the beginning of the test)
_.withPausedTrigger[ReceiveSvRewardCouponTrigger]
)(config)
)
.withTrafficTopupsDisabled

private val feesUpperBoundCC = walletUsdToAmulet(smallAmount)

"SVs" should {

"receive and claim SvRewardCoupons" in { implicit env =>
// ensure alice has vetted the latest packages
val expectedVettedPackages = ReceiveSvRewardCouponTrigger.svLatestVettedPackages(
InitialPackageConfig.defaultInitialPackageConfig.toPackageConfig
)
eventually() {
val vettedByAlice =
aliceValidatorBackend.participantClientWithAdminToken.topology.vetted_packages
.list()
.flatMap(
_.item.packages.map(_.packageId)
)
forAll(expectedVettedPackages) { expectedPackage =>
vettedByAlice should contain(expectedPackage)
}
}
// now that we know that alice has vetted the latest packages, we can resume the trigger for the rest of the test
Seq(sv1Backend, sv2Backend, sv3Backend, sv4Backend).foreach(
_.dsoAutomation
.trigger[ReceiveSvRewardCouponTrigger]
.resume()
)

val openRounds = eventually() {
val openRounds = sv1ScanBackend
.getOpenAndIssuingMiningRounds()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ trait UpdateHistoryTestUtil extends TestCommon {
updateHistoryFromParticipant(ledgerBegin, updateHistory.updateStreamParty, participant)

val recordedUpdates = updateHistory
.getUpdates(
.getAllUpdates(
Some(
(
0L,
Expand All @@ -104,7 +104,6 @@ trait UpdateHistoryTestUtil extends TestCommon {
actualUpdates.head.update.recordTime.addMicros(-1L),
)
),
includeImportUpdates = true,
PageLimit.tryCreate(actualUpdates.size),
)
.futureValue
Expand Down Expand Up @@ -139,9 +138,8 @@ trait UpdateHistoryTestUtil extends TestCommon {
scanClient: ScanAppClientReference,
): Assertion = {
val historyFromStore = scanBackend.appState.store.updateHistory
.getUpdates(
.getAllUpdates(
None,
includeImportUpdates = true,
PageLimit.tryCreate(1000),
)
.futureValue
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
-- Only SVs that joined the network at any point in the initial migration have all import updates
-- This statement was already part of the migration script V036__backfilling_import_updates.sql.
-- It needs to be executed again because Scala code for backfilling import updates
-- was reverted and re-applied between these two migrations.
update update_history_backfilling as bf
set import_updates_complete = true
where
bf.complete = true and
bf.joining_migration_id = (
select min(migration_id)
from update_history_transactions as tx
where bf.history_id = tx.history_id
);
Original file line number Diff line number Diff line change
Expand Up @@ -285,15 +285,20 @@ object HistoryBackfilling {
* None if the given migration id is the beginning of known history.
* @param recordTimeRange All domains that produced history items in the given migration id,
* along with the record time of the newest and oldest history item associated with each domain.
* @param lastImportUpdateId The id of the last import update (where import updates are sorted by update id)
* for the given migration id, if any.
* @param complete True if the backfilling for the given migration id is complete,
* i.e., the history knows the first item for each domain in the given migration id.
* We need this to decide when the backfilling is complete, because it might be difficult to
* identify the first item of a migration otherwise.
* @param importUpdatesComplete True if the import updates for the given migration id are complete.
*/
final case class SourceMigrationInfo(
previousMigrationId: Option[Long],
recordTimeRange: Map[SynchronizerId, DomainRecordTimeRange],
lastImportUpdateId: Option[String],
complete: Boolean,
importUpdatesComplete: Boolean,
)

/** Information about the point at which backfilling is currently inserting data.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,39 @@ class HistoryMetrics(metricsFactory: LabeledMetricsFactory)(implicit
)(metricsContext)
}

object ImportUpdatesBackfilling {
private val importUpdatesBackfillingPrefix: MetricName = prefix :+ "import-updates-backfilling"

val latestMigrationId: Gauge[Long] =
metricsFactory.gauge(
MetricInfo(
name = importUpdatesBackfillingPrefix :+ "latest-record-time",
summary = "The migration id of the latest backfilled import update",
Traffic,
),
initial = -1L,
)(metricsContext)

val contractCount: Counter =
metricsFactory.counter(
MetricInfo(
name = importUpdatesBackfillingPrefix :+ "contract-count",
summary = "The number of contracts that have been backfilled",
Traffic,
)
)(metricsContext)

val completed: Gauge[Int] =
metricsFactory.gauge(
MetricInfo(
name = importUpdatesBackfillingPrefix :+ "completed",
summary = "Whether it was completed (1) or not (0)",
Debug,
),
initial = 0,
)(metricsContext)
}

object UpdateHistory {
private val updateHistoryPrefix: MetricName = prefix :+ "updates"

Expand Down
Loading
Loading