From a910e4c519317e7c1941760da5d9ff99e89bc153 Mon Sep 17 00:00:00 2001 From: DA Automation Date: Fri, 6 Jun 2025 07:56:52 +0000 Subject: [PATCH 1/4] [ci] Added v2/updates/ Signed-off-by: DA Automation --- .../splice/console/ScanAppReference.scala | 27 ++- .../UpdateHistorySanityCheckPlugin.scala | 10 +- ...canHistoryBackfillingIntegrationTest.scala | 43 +++- apps/scan/src/main/openapi/scan.yaml | 174 +++++++++++++- .../client/commands/HttpScanAppClient.scala | 36 ++- .../scan/admin/http/HttpScanHandler.scala | 220 ++++++++++++------ .../scan/admin/http/ScanHttpEncodings.scala | 24 +- .../app_dev/scan_api/scan_bulk_data_api.rst | 21 +- docs/src/release_notes.rst | 3 + 9 files changed, 447 insertions(+), 111 deletions(-) diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/ScanAppReference.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/ScanAppReference.scala index 069f3d1735..54b124a35a 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/ScanAppReference.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/ScanAppReference.scala @@ -25,7 +25,11 @@ import org.lfdecentralizedtrust.splice.codegen.java.splice.ans.AnsRules import org.lfdecentralizedtrust.splice.config.NetworkAppClientConfig import org.lfdecentralizedtrust.splice.environment.SpliceConsoleEnvironment import org.lfdecentralizedtrust.splice.http.v0.definitions -import org.lfdecentralizedtrust.splice.http.v0.definitions.GetDsoInfoResponse +import org.lfdecentralizedtrust.splice.http.v0.definitions.{ + GetDsoInfoResponse, + UpdateHistoryItem, + UpdateHistoryItemV2, +} import org.lfdecentralizedtrust.splice.scan.{ScanApp, ScanAppBootstrap} import org.lfdecentralizedtrust.splice.scan.automation.ScanAutomationService import org.lfdecentralizedtrust.splice.scan.admin.api.client.commands.HttpScanAppClient @@ -457,24 +461,39 @@ abstract class ScanAppReference( count: Int, after: Option[(Long, String)], lossless: Boolean, - ) = { + ): Seq[UpdateHistoryItem] = { consoleEnvironment.run { httpCommand( HttpScanAppClient.GetUpdateHistoryV0(count, after, lossless) ) } } + + @deprecated(message = "Use getUpdateHistory instead", since = "0.4.2") + def getUpdateHistoryV1( + count: Int, + after: Option[(Long, String)], + encoding: definitions.DamlValueEncoding, + ): Seq[UpdateHistoryItem] = { + consoleEnvironment.run { + httpCommand( + HttpScanAppClient.GetUpdateHistoryV1(count, after, encoding) + ) + } + } + def getUpdateHistory( count: Int, after: Option[(Long, String)], encoding: definitions.DamlValueEncoding, - ) = { + ): Seq[UpdateHistoryItemV2] = { consoleEnvironment.run { httpCommand( - HttpScanAppClient.GetUpdateHistory(count, after, encoding) + HttpScanAppClient.GetUpdateHistoryV2(count, after, encoding) ) } } + def getUpdate(updateId: String, encoding: definitions.DamlValueEncoding) = { consoleEnvironment.run { httpCommand( diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala index 6bc9216a49..6c35ab7d1a 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala @@ -7,8 +7,8 @@ import org.lfdecentralizedtrust.splice.config.SpliceConfig import org.lfdecentralizedtrust.splice.console.ScanAppBackendReference import org.lfdecentralizedtrust.splice.environment.SpliceEnvironment import org.lfdecentralizedtrust.splice.http.v0.definitions.DamlValueEncoding.members.CompactJson -import org.lfdecentralizedtrust.splice.http.v0.definitions.{AcsResponse, UpdateHistoryItem} -import org.lfdecentralizedtrust.splice.http.v0.definitions.UpdateHistoryItem.members +import org.lfdecentralizedtrust.splice.http.v0.definitions.{AcsResponse, UpdateHistoryItemV2} +import org.lfdecentralizedtrust.splice.http.v0.definitions.UpdateHistoryItemV2.members import org.lfdecentralizedtrust.splice.http.v0.definitions.UpdateHistoryReassignment.Event.members as reassignmentMembers import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.SpliceTestConsoleEnvironment import org.lfdecentralizedtrust.splice.scan.automation.AcsSnapshotTrigger @@ -90,13 +90,13 @@ class UpdateHistorySanityCheckPlugin( private def paginateHistory( scan: ScanAppBackendReference, after: Option[(Long, String)], - acc: Chain[UpdateHistoryItem], - ): Chain[UpdateHistoryItem] = { + acc: Chain[UpdateHistoryItemV2], + ): Chain[UpdateHistoryItemV2] = { val result = scan.getUpdateHistory(10, after, encoding = CompactJson) val newAcc = acc ++ Chain.fromSeq(result) result.lastOption match { case None => acc // done - case Some(members.UpdateHistoryTransaction(last)) => + case Some(members.UpdateHistoryTransactionV2(last)) => paginateHistory( scan, Some((last.migrationId, last.recordTime)), diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala index a9038abf64..47b6fdae2e 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala @@ -2,19 +2,13 @@ package org.lfdecentralizedtrust.splice.integration.tests import com.daml.ledger.javaapi.data.TransactionTree import org.lfdecentralizedtrust.splice.config.ConfigTransforms -import org.lfdecentralizedtrust.splice.config.ConfigTransforms.{ - ConfigurableApp, - updateAutomationConfig, -} +import org.lfdecentralizedtrust.splice.config.ConfigTransforms.{ConfigurableApp, updateAutomationConfig} import org.lfdecentralizedtrust.splice.console.ScanAppBackendReference import org.lfdecentralizedtrust.splice.environment.ledger.api.TransactionTreeUpdate import org.lfdecentralizedtrust.splice.http.v0.definitions import org.lfdecentralizedtrust.splice.http.v0.definitions.DamlValueEncoding.members.CompactJson import org.lfdecentralizedtrust.splice.integration.EnvironmentDefinition -import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.{ - IntegrationTest, - SpliceTestConsoleEnvironment, -} +import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.{IntegrationTest, SpliceTestConsoleEnvironment} import org.lfdecentralizedtrust.splice.scan.admin.http.ProtobufJsonScanHttpEncodings import org.lfdecentralizedtrust.splice.scan.automation.ScanHistoryBackfillingTrigger import org.lfdecentralizedtrust.splice.store.{PageLimit, TreeUpdateWithMigrationId} @@ -27,12 +21,14 @@ import scala.math.BigDecimal.javaBigDecimal2bigDecimal import com.digitalasset.canton.{HasActorSystem, HasExecutionContext} import org.lfdecentralizedtrust.splice.automation.TxLogBackfillingTrigger import org.lfdecentralizedtrust.splice.http.v0.definitions.TransactionHistoryRequest.SortOrder +import org.lfdecentralizedtrust.splice.http.v0.definitions.UpdateHistoryItemV2 import org.lfdecentralizedtrust.splice.scan.store.TxLogEntry import org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.TxLogBackfillingState import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingState import org.scalactic.source.Position import scala.annotation.nowarn +import scala.collection.immutable.SortedMap import scala.jdk.CollectionConverters.* import scala.jdk.OptionConverters.* @@ -377,6 +373,24 @@ class ScanHistoryBackfillingIntegrationTest } clue("Compare scan histories with each other using the v1 HTTP endpoint") { + // The v1 endpoint is deprecated, but we still have users using it + @nowarn("cat=deprecation") + val sv1HttpUpdates = + sv1ScanBackend.getUpdateHistoryV1(1000, None, encoding = CompactJson) + + @nowarn("cat=deprecation") + val sv2HttpUpdates = + sv2ScanBackend.getUpdateHistoryV1(1000, None, encoding = CompactJson) + + // Compare common prefix, as there might be concurrent activity + val commonLength = sv1HttpUpdates.length min sv2HttpUpdates.length + commonLength should be > 10 + val sv1Items = sv1HttpUpdates.take(commonLength) + val sv2Items = sv2HttpUpdates.take(commonLength) + sv1Items should contain theSameElementsInOrderAs sv2Items + } + + clue("Compare scan histories with each other using the v2 HTTP endpoint") { val sv1HttpUpdates = readUpdateHistoryFromScan(sv1ScanBackend) val sv2HttpUpdates = @@ -387,6 +401,19 @@ class ScanHistoryBackfillingIntegrationTest commonLength should be > 10 val sv1Items = sv1HttpUpdates.take(commonLength) val sv2Items = sv2HttpUpdates.take(commonLength) + def collectEventsById(items: Seq[UpdateHistoryItemV2]) = items.collect { + case definitions.UpdateHistoryItemV2.members.UpdateHistoryTransactionV2(http) => + http.eventsById + } + val eventsByIdSv1 = collectEventsById(sv1Items) + val eventsByIdSv2 = collectEventsById(sv2Items) + + def assertOrderedEventsById(eventsByIdSeq: Seq[SortedMap[String, definitions.TreeEvent]]) = + forAll(eventsByIdSeq) { eventsById => + eventsById.keys.toSeq should contain theSameElementsInOrderAs SortedMap.from(eventsById).keys.toSeq + } + assertOrderedEventsById(eventsByIdSv1) + assertOrderedEventsById(eventsByIdSv2) sv1Items should contain theSameElementsInOrderAs sv2Items } diff --git a/apps/scan/src/main/openapi/scan.yaml b/apps/scan/src/main/openapi/scan.yaml index b7233568d7..6d2ff01fbc 100644 --- a/apps/scan/src/main/openapi/scan.yaml +++ b/apps/scan/src/main/openapi/scan.yaml @@ -267,10 +267,79 @@ paths: application/json: schema: "$ref": "#/components/schemas/GetOpenAndIssuingMiningRoundsResponse" + /v2/updates: + post: + tags: [external, scan] + x-jvm-package: scan + operationId: "getUpdateHistoryV2" + description: | + Returns the update history in ascending order, paged, from ledger begin or optionally starting after a record time. + Compared to `/v1/updates`, the `/v2/updates` removes the `offset` field in responses, + which was hardcoded to 1 in `/v1/updates` for compatibility, and is now removed. + `/v2/updates` sorts events lexicographically in `events_by_id` by `ID` for convenience, which should not be confused with the + order of events in the transaction, for this you should rely on the order of `root_event_ids` and `child_event_ids`. + Updates are ordered lexicographically by `(migration id, record time)`. + For a given migration id, each update has a unique record time. + The record time ranges of different migrations may overlap, i.e., + it is not guaranteed that the maximum record time of one migration is smaller than the minimum record time of the next migration, + and there may be two updates with the same record time but different migration ids. + requestBody: + required: true + content: + application/json: + schema: + "$ref": "#/components/schemas/UpdateHistoryRequestV2" + responses: + "200": + description: ok + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateHistoryResponseV2" + "400": + $ref: "../../../../common/src/main/openapi/common-external.yaml#/components/responses/400" + "500": + $ref: "../../../../common/src/main/openapi/common-external.yaml#/components/responses/500" + + /v2/updates/{update_id}: + get: + tags: [external, scan] + x-jvm-package: scan + operationId: "getUpdateByIdV2" + description: | + Returns the update with the given update_id. + Compared to `/v1/updates/{update_id}`, the `/v2/updates/{update_id}` removes the `offset` field in responses, + which was hardcoded to 1 in `/v1/updates/{update_id}` for compatibility, and is now removed. + `/v2/updates/{update_id}` sorts events lexicographically in `events_by_id` by `ID` for convenience, which should not be confused with the + order of events in the transaction, for this you should rely on the order of `root_event_ids` and `child_event_ids`. + parameters: + - name: "update_id" + in: "path" + required: true + schema: + type: string + - name: "daml_value_encoding" + in: "query" + schema: + $ref: "#/components/schemas/DamlValueEncoding" + responses: + "200": + description: ok + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateHistoryItemV2" + "400": + $ref: "../../../../common/src/main/openapi/common-external.yaml#/components/responses/400" + "404": + $ref: "../../../../common/src/main/openapi/common-external.yaml#/components/responses/404" + "500": + $ref: "../../../../common/src/main/openapi/common-external.yaml#/components/responses/500" /v1/updates: post: - tags: [external, scan] + deprecated: true + tags: [deprecated] x-jvm-package: scan operationId: "getUpdateHistoryV1" description: | @@ -283,6 +352,7 @@ paths: The record time ranges of different migrations may overlap, i.e., it is not guaranteed that the maximum record time of one migration is smaller than the minimum record time of the next migration, and there may be two updates with the same record time but different migration ids. + The order of items in events_by_id is not defined. requestBody: required: true content: @@ -303,13 +373,15 @@ paths: /v1/updates/{update_id}: get: - tags: [external, scan] + deprecated: true + tags: [ deprecated ] x-jvm-package: scan operationId: "getUpdateByIdV1" description: | Returns the update with the given update_id. Unlike /v0/updates/{update_id}, this endpoint returns responses that are consistent across different scan instances. Event ids returned by this endpoint are not comparable to event ids returned by /v0/updates. + The order of items in events_by_id is not defined. parameters: - name: "update_id" in: "path" @@ -1387,7 +1459,7 @@ paths: x-jvm-package: scan operationId: "getUpdateHistory" description: | - **Deprecated**, use /v1/updates instead. + **Deprecated**, use /v2/updates instead. Returns the update history in ascending order, paged, from ledger begin or optionally starting after a record time. requestBody: required: true @@ -1414,7 +1486,7 @@ paths: x-jvm-package: scan operationId: "getUpdateById" description: | - **Deprecated**, use /v1/updates/{update_id} instead. + **Deprecated**, use /v2/updates/{update_id} instead. parameters: - name: "update_id" in: "path" @@ -2035,6 +2107,33 @@ components: format: int32 daml_value_encoding: $ref: "#/components/schemas/DamlValueEncoding" + UpdateHistoryRequestV2: + type: object + required: + - page_size + properties: + after: + $ref: "#/components/schemas/UpdateHistoryRequestAfter" + description: | + The transactions returned will either have a higher migration id or + the same migration id and a record_time greater than the migration id and record time + specified. + page_size: + description: | + The maximum number of transactions returned for this request. + type: integer + format: int32 + daml_value_encoding: + $ref: "#/components/schemas/DamlValueEncoding" + UpdateHistoryResponseV2: + type: object + required: + - transactions + properties: + transactions: + type: array + items: + $ref: "#/components/schemas/UpdateHistoryItemV2" UpdateHistoryResponse: type: object required: @@ -2044,6 +2143,13 @@ components: type: array items: $ref: "#/components/schemas/UpdateHistoryItem" + UpdateHistoryItemV2: + type: object + description: | + An individual item in the update history. May be a transaction or a contract reassignment. + oneOf: + - $ref: "#/components/schemas/UpdateHistoryTransactionV2" + - $ref: "#/components/schemas/UpdateHistoryReassignment" UpdateHistoryItem: type: object description: | @@ -2228,6 +2334,66 @@ components: type: object additionalProperties: $ref: "#/components/schemas/TreeEvent" + UpdateHistoryTransactionV2: + type: object + required: + - update_id + - migration_id + - workflow_id + - command_id + - record_time + - synchronizer_id + - effective_at + - root_event_ids + - events_by_id + properties: + update_id: + description: | + The id of the update. This is not comparable to other updates; it's + meant for correlating with server logs. + type: string + migration_id: + description: | + The migration id of the synchronizer. + type: integer + format: int64 + workflow_id: + description: | + This transaction's Daml workflow ID; a workflow ID can be associated + with multiple transactions. If empty, no workflow ID was set. + type: string + record_time: + description: | + The time at which the transaction was sequenced, with microsecond + resolution, using ISO-8601 representation. + type: string + synchronizer_id: + description: | + The id of the synchronizer through which this transaction was sequenced. + type: string + effective_at: + description: | + Ledger effective time, using ISO-8601 representation. This is the time + returned by `getTime` for all Daml executed as part of this transaction, + both by the submitting participant and all confirming participants. + type: string + root_event_ids: + description: | + Roots of the transaction tree. These are guaranteed to occur as keys + of the `events_by_id` object. + type: array + items: + type: string + events_by_id: + x-scala-map-type: "scala.collection.immutable.SortedMap" + description: | + Changes to the ledger that were caused by this transaction, keyed by ID and sorted by ID for display consistency. + Values are nodes of the transaction tree. + Within a transaction, IDs may be referenced by `root_event_ids` or + `child_event_ids` in `ExercisedEvent` herein, which are sorted in the order as they occurred in the transaction. + type: object + additionalProperties: + $ref: "#/components/schemas/TreeEvent" TreeEvent: type: object description: | diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala index 7a1b68d719..6f17d20259 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala @@ -1258,7 +1258,41 @@ object HttpScanAppClient { } } - case class GetUpdateHistory( + case class GetUpdateHistoryV2( + count: Int, + after: Option[(Long, String)], + damlValueEncoding: definitions.DamlValueEncoding, + ) extends InternalBaseCommand[http.GetUpdateHistoryV2Response, Seq[ + definitions.UpdateHistoryItemV2 + ]] { + override def submitRequest( + client: http.ScanClient, + headers: List[HttpHeader], + ): EitherT[Future, Either[ + Throwable, + HttpResponse, + ], http.GetUpdateHistoryV2Response] = { + client.getUpdateHistoryV2( + // the request is the same as for V1 + definitions.UpdateHistoryRequestV2( + after = after.map { case (migrationId, recordTime) => + definitions.UpdateHistoryRequestAfter(migrationId, recordTime) + }, + pageSize = count, + damlValueEncoding = Some(damlValueEncoding), + ), + headers, + ) + } + + override def handleOk()(implicit decoder: TemplateJsonDecoder) = { + case http.GetUpdateHistoryV2Response.OK(response) => + Right(response.transactions) + } + } + + @deprecated(message = "Use GetUpdateHistory instead", since = "0.4.2") + case class GetUpdateHistoryV1( count: Int, after: Option[(Long, String)], damlValueEncoding: definitions.DamlValueEncoding, diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala index fa4d11237e..5360bbf406 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala @@ -33,10 +33,13 @@ import org.lfdecentralizedtrust.splice.environment.{ import org.lfdecentralizedtrust.splice.http.v0.definitions.{ AcsRequest, BatchListVotesByVoteRequestsRequest, + DamlValueEncoding, HoldingsStateRequest, HoldingsSummaryRequest, ListVoteResultsRequest, MaybeCachedContractWithState, + UpdateHistoryItemV2, + UpdateHistoryRequestV2, } import org.lfdecentralizedtrust.splice.http.v0.scan.ScanResource import org.lfdecentralizedtrust.splice.http.v0.{definitions, scan as v0} @@ -87,6 +90,8 @@ import org.lfdecentralizedtrust.splice.scan.config.BftSequencerConfig import org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.TxLogBackfillingState import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingState +import scala.collection.immutable.SortedMap + class HttpScanHandler( svParty: PartyId, svUserName: String, @@ -713,97 +718,158 @@ class HttpScanHandler( extracted: TraceContext, ): Future[Vector[definitions.UpdateHistoryItem]] = { implicit val tc: TraceContext = extracted - withSpan(s"$workflowId.getUpdateHistory") { _ => _ => - val updateHistory = store.updateHistory - val afterO = after.map { after => - val afterRecordTime = parseTimestamp(after.afterRecordTime) - ( - after.afterMigrationId, - afterRecordTime, - ) - } - updateHistory - .getBackfillingState() - .flatMap { - case BackfillingState.NotInitialized => - throw Status.UNAVAILABLE - .withDescription( - "This scan instance has not yet loaded its updates history. Wait a short time and retry." - ) - .asRuntimeException() - case BackfillingState.InProgress(_, _) => - throw Status.UNAVAILABLE - .withDescription( - "This scan instance has not yet replicated all data. This process can take an extended period of time to complete. " + - "Wait until replication is complete, or connect to a different scan instance." - ) - .asRuntimeException() - case BackfillingState.Complete => - for { - txs <- - if (includeImportUpdates) - updateHistory.getAllUpdates( - afterO, - PageLimit.tryCreate(pageSize), - ) - else - updateHistory.getUpdatesWithoutImportUpdates( - afterO, - PageLimit.tryCreate(pageSize), - ) - } yield txs - .map( - ScanHttpEncodings.encodeUpdate( - _, - encoding = encoding, - version = if (consistentResponses) ScanHttpEncodings.V1 else ScanHttpEncodings.V0, + val updateHistory = store.updateHistory + val afterO = after.map { after => + val afterRecordTime = parseTimestamp(after.afterRecordTime) + ( + after.afterMigrationId, + afterRecordTime, + ) + } + updateHistory + .getBackfillingState() + .flatMap { + case BackfillingState.NotInitialized => + throw Status.UNAVAILABLE + .withDescription( + "This scan instance has not yet loaded its updates history. Wait a short time and retry." + ) + .asRuntimeException() + case BackfillingState.InProgress(_, _) => + throw Status.UNAVAILABLE + .withDescription( + "This scan instance has not yet replicated all data. This process can take an extended period of time to complete. " + + "Wait until replication is complete, or connect to a different scan instance." + ) + .asRuntimeException() + case BackfillingState.Complete => + for { + txs <- + if (includeImportUpdates) + updateHistory.getAllUpdates( + afterO, + PageLimit.tryCreate(pageSize), ) + else + updateHistory.getUpdatesWithoutImportUpdates( + afterO, + PageLimit.tryCreate(pageSize), + ) + } yield txs + .map( + ScanHttpEncodings.encodeUpdate( + _, + encoding = encoding, + version = if (consistentResponses) ScanHttpEncodings.V1 else ScanHttpEncodings.V0, ) - .toVector - } - } + ) + .toVector + } } override def getUpdateHistory(respond: v0.ScanResource.GetUpdateHistoryResponse.type)( request: definitions.UpdateHistoryRequest )(extracted: TraceContext): Future[v0.ScanResource.GetUpdateHistoryResponse] = { - val encoding = - if (request.lossless.contains(true)) { - definitions.DamlValueEncoding.ProtobufJson - } else { - definitions.DamlValueEncoding.CompactJson - } - getUpdateHistory( - after = request.after, - pageSize = request.pageSize, - encoding = encoding, - consistentResponses = false, - // Originally this endpoint included import updates. This is changed in the V1 endpoint. - // Almost all clients will want to filter them out to prevent duplicate contracts - // (once from the actual create event and once from the import update). - // Also, all import updates have a record time of 0 and thus don't work with pagination by record time. - // In this v0 version, we keep `includeImportUpdates = true` to maintain backward compatibility. - includeImportUpdates = true, - extracted, - ).map( - definitions.UpdateHistoryResponse(_) - ) + implicit val tc: TraceContext = extracted + withSpan(s"$workflowId.getUpdateHistoryV0") { _ => _ => + val encoding = + if (request.lossless.contains(true)) { + definitions.DamlValueEncoding.ProtobufJson + } else { + definitions.DamlValueEncoding.CompactJson + } + getUpdateHistory( + after = request.after, + pageSize = request.pageSize, + encoding = encoding, + consistentResponses = false, + // Originally this endpoint included import updates. This is changed in the V1 endpoint. + // Almost all clients will want to filter them out to prevent duplicate contracts + // (once from the actual create event and once from the import update). + // Also, all import updates have a record time of 0 and thus don't work with pagination by record time. + // In this v0 version, we keep `includeImportUpdates = true` to maintain backward compatibility. + includeImportUpdates = true, + extracted, + ).map( + definitions.UpdateHistoryResponse(_) + ) + } } override def getUpdateHistoryV1(respond: v0.ScanResource.GetUpdateHistoryV1Response.type)( request: definitions.UpdateHistoryRequestV1 - )(extracted: TraceContext): Future[v0.ScanResource.GetUpdateHistoryV1Response] = - getUpdateHistory( - after = request.after, - pageSize = request.pageSize, - encoding = request.damlValueEncoding.getOrElse(definitions.DamlValueEncoding.CompactJson), + )(extracted: TraceContext): Future[v0.ScanResource.GetUpdateHistoryV1Response] = { + implicit val tc: TraceContext = extracted + withSpan(s"$workflowId.getUpdateHistoryV1") { _ => _ => + getUpdateHistory( + after = request.after, + pageSize = request.pageSize, + encoding = request.damlValueEncoding.getOrElse(definitions.DamlValueEncoding.CompactJson), + consistentResponses = true, + includeImportUpdates = false, + extracted, + ) + .map( + definitions.UpdateHistoryResponse(_) + ) + } + } + + override def getUpdateHistoryV2(respond: ScanResource.GetUpdateHistoryV2Response.type)( + request: UpdateHistoryRequestV2 + )(extracted: TraceContext): Future[ScanResource.GetUpdateHistoryV2Response] = { + implicit val tc: TraceContext = extracted + withSpan(s"$workflowId.getUpdateHistoryV2") { _ => _ => + getUpdateHistory( + after = request.after, + pageSize = request.pageSize, + encoding = request.damlValueEncoding.getOrElse(definitions.DamlValueEncoding.CompactJson), + consistentResponses = true, + includeImportUpdates = false, + extracted, + ) + .map(items => definitions.UpdateHistoryResponseV2(items.map(toUpdateV2))) + } + } + + private def toUpdateV2(update: definitions.UpdateHistoryItem): definitions.UpdateHistoryItemV2 = + update match { + case definitions.UpdateHistoryItem.members.UpdateHistoryReassignment(r) => + UpdateHistoryItemV2( + definitions.UpdateHistoryItemV2.members.UpdateHistoryReassignment(r) + ) + case definitions.UpdateHistoryItem.members.UpdateHistoryTransaction(t) => + UpdateHistoryItemV2( + definitions.UpdateHistoryTransactionV2( + updateId = t.updateId, + migrationId = t.migrationId, + workflowId = t.workflowId, + recordTime = t.recordTime, + synchronizerId = t.synchronizerId, + effectiveAt = t.effectiveAt, + rootEventIds = t.rootEventIds, + eventsById = SortedMap.from(t.eventsById), + ) + ) + } + + override def getUpdateByIdV2(respond: ScanResource.GetUpdateByIdV2Response.type)( + updateId: String, + damlValueEncoding: Option[DamlValueEncoding], + )(extracted: TraceContext): Future[ScanResource.GetUpdateByIdV2Response] = { + getUpdateById( + updateId = updateId, + encoding = damlValueEncoding.getOrElse(definitions.DamlValueEncoding.members.CompactJson), consistentResponses = true, - includeImportUpdates = false, extracted, ) - .map( - definitions.UpdateHistoryResponse(_) - ) + .map { + case Left(error) => + ScanResource.GetUpdateByIdV2Response.NotFound(error) + case Right(update) => + ScanResource.GetUpdateByIdV2Response.OK(toUpdateV2(update)) + } + } override def listActivity( respond: v0.ScanResource.ListActivityResponse.type diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala index 4e1e43e98c..2a38df8d9d 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/ScanHttpEncodings.scala @@ -185,13 +185,35 @@ sealed trait ScanHttpEncodings { ) } + def httpToLapiUpdate(http: httpApi.UpdateHistoryItemV2): TreeUpdateWithMigrationId = http match { + case httpApi.UpdateHistoryItemV2.members.UpdateHistoryTransactionV2(httpTransaction) => + httpToLapiTransaction(httpTransaction) + case httpApi.UpdateHistoryItemV2.members.UpdateHistoryReassignment(httpReassignment) => + httpToLapiReassignment(httpReassignment) + } + def httpToLapiUpdate(http: httpApi.UpdateHistoryItem): TreeUpdateWithMigrationId = http match { case httpApi.UpdateHistoryItem.members.UpdateHistoryTransaction(httpTransaction) => httpToLapiTransaction(httpTransaction) case httpApi.UpdateHistoryItem.members.UpdateHistoryReassignment(httpReassignment) => httpToLapiReassignment(httpReassignment) } - + private def httpToLapiTransaction( + httpV2: httpApi.UpdateHistoryTransactionV2 + ): TreeUpdateWithMigrationId = { + val http = httpApi.UpdateHistoryTransaction( + updateId = httpV2.updateId, + migrationId = httpV2.migrationId, + workflowId = httpV2.workflowId, + recordTime = httpV2.recordTime, + synchronizerId = httpV2.synchronizerId, + effectiveAt = httpV2.effectiveAt, + offset = LegacyOffset.Api.fromLong(1L), // not used in v2 + rootEventIds = httpV2.rootEventIds, + eventsById = httpV2.eventsById, + ) + httpToLapiTransaction(http) + } private def httpToLapiTransaction( http: httpApi.UpdateHistoryTransaction ): TreeUpdateWithMigrationId = { diff --git a/docs/src/app_dev/scan_api/scan_bulk_data_api.rst b/docs/src/app_dev/scan_api/scan_bulk_data_api.rst index 01373f04f1..923b03daa5 100644 --- a/docs/src/app_dev/scan_api/scan_bulk_data_api.rst +++ b/docs/src/app_dev/scan_api/scan_bulk_data_api.rst @@ -63,9 +63,9 @@ The below table provides a quick overview of the endpoints that the Scan Bulk Da * - Endpoint - Description - * - `POST /v1/updates `_ + * - `POST /v2/updates `_ - Returns the update history - * - `GET /v0/updates/\{update_id\} `_ + * - `GET /v2/updates/\{update_id\} `_ - Returns the update with the given update_id * - `GET /v0/state/acs/snapshot-timestamp `_ - Returns the timestamp of the most recent snapshot @@ -77,7 +77,7 @@ If you would rather read the yaml Open API specification file directly, this can Example URLs for accessing the Scan Bulk Data API are: -- |gsf_scan_url|/api/scan/v1/updates +- |gsf_scan_url|/api/scan/v2/updates - |gsf_scan_url|/api/scan/v0/state/acs/snapshot-timestamp Please note the `api/scan` prefix in the URLs, which is the base path for the Scan API. @@ -93,17 +93,17 @@ An update can be one of two things: They will begin to appear in the update stream as the global synchronizer introduces rolling upgrades later in 2025 or early 2026; for this reason we'll omit further details for now, you can safely ignore reassignments and only handle transactions. -`/v1/updates `_ +`/v2/updates `_ provides a JSON encoded version of the recorded update history. Once you have an ``update_id`` for a specific update, you can retrieve the details by using -`/v0/updates/\{update_id\} `_. +`/v2/updates/\{update_id\} `_. -.. _v1_updates: +.. _v2_updates: -POST /v1/updates +POST /v2/updates ^^^^^^^^^^^^^^^^ Post a paged update history request to get all updates up to ``page_size``. -Please see `POST /v1/updates `_ for more details. +Please see `POST /v2/updates `_ for more details. Requesting all updates """""""""""""""""""""" @@ -193,7 +193,6 @@ An example list of transactions response for the beginning of the network is sho "record_time": "2024-09-20T13:31:28.405180Z", "synchronizer_id": "global-domain::122084177677350389dd0710d6516f700a33fe348c5f2702dffef6d36e1dedcbfc17", "effective_at": "2024-09-20T13:31:29.552807Z", - "offset": "000000000000000001", "root_event_ids": [ "1220e04f50c4b00024dd3a225611ad96441abd854e461c144b872c0eedac1dc784c7:0", "1220e04f50c4b00024dd3a225611ad96441abd854e461c144b872c0eedac1dc784c7:1" @@ -483,7 +482,7 @@ POST /v0/state/acs/force .. note:: This is a **development environment only** endpoint, and is unavailable in production environments. During testing, the :ref:`last snapshot timestamp ` can be inconveniently old. -A production app must be able to deal with this by using :ref:`v1_updates`, but an app's ability to deal with data in the snapshot is important too. +A production app must be able to deal with this by using :ref:`v2_updates`, but an app's ability to deal with data in the snapshot is important too. Therefore, on properly-configured testing Scans, `/v0/state/acs/force `_ will cause Scan to immediately snapshot the ACS, returning the new snapshot time in the ``record_time`` property. But most environments will return an error, as this endpoint is disabled. @@ -601,7 +600,7 @@ If the ``exercised_event`` is consuming, the contract is removed from the ``acti .. note:: To build up an ACS snapshot for any ``record_time``, first get a periodic snapshot using the `/v0/state/acs `_ endpoint, store the ACS in a dictionary keyed by ``contract_id`` - and then process the updates from the timestamp of that snapshot via the ``/v1/updates``, adding + and then process the updates from the timestamp of that snapshot via the ``/v2/updates``, adding ``created_event``\ s to the dictionary under its ``contract_id`` key and remove the contract from the dictionary by ``contract_id`` if ``exercised_event``\ s are consuming. diff --git a/docs/src/release_notes.rst b/docs/src/release_notes.rst index d5e6b49a19..57302e4ab1 100644 --- a/docs/src/release_notes.rst +++ b/docs/src/release_notes.rst @@ -15,6 +15,9 @@ Upcoming - Fix a typo in the `splice-participant` Helm chart that caused the participant container to be named `participant-1` instead of `participant`. - Java 21 replaces Java 17 in all Docker images and as the base JDK for building Splice apps. + - `/v2/updates` endpoints are now available on the Scan app, `/v1/updates` endpoints are deprecated. + The `/v2/updates` endpoints no longer return the `offset` field in responses, + and `events_by_id` are now lexicographically ordered by ID for conveniently viewing JSON results. 0.4.1 ----- From 84d6a5e8eefd10abc0fd36bbec5004ec7b915085 Mon Sep 17 00:00:00 2001 From: Raymond Roestenburg <98821776+ray-roestenburg-da@users.noreply.github.com> Date: Wed, 11 Jun 2025 00:18:55 +0200 Subject: [PATCH 2/4] Update apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala Co-authored-by: Stephen Compall Signed-off-by: Raymond Roestenburg <98821776+ray-roestenburg-da@users.noreply.github.com> --- .../tests/ScanHistoryBackfillingIntegrationTest.scala | 9 --------- 1 file changed, 9 deletions(-) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala index 47b6fdae2e..7e30ab37c4 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala @@ -405,15 +405,6 @@ class ScanHistoryBackfillingIntegrationTest case definitions.UpdateHistoryItemV2.members.UpdateHistoryTransactionV2(http) => http.eventsById } - val eventsByIdSv1 = collectEventsById(sv1Items) - val eventsByIdSv2 = collectEventsById(sv2Items) - - def assertOrderedEventsById(eventsByIdSeq: Seq[SortedMap[String, definitions.TreeEvent]]) = - forAll(eventsByIdSeq) { eventsById => - eventsById.keys.toSeq should contain theSameElementsInOrderAs SortedMap.from(eventsById).keys.toSeq - } - assertOrderedEventsById(eventsByIdSv1) - assertOrderedEventsById(eventsByIdSv2) sv1Items should contain theSameElementsInOrderAs sv2Items } From 114526406feab8ad093a870d20e1f05385340242 Mon Sep 17 00:00:00 2001 From: Raymond Roestenburg <98821776+ray-roestenburg-da@users.noreply.github.com> Date: Wed, 11 Jun 2025 00:19:17 +0200 Subject: [PATCH 3/4] Update apps/scan/src/main/openapi/scan.yaml Co-authored-by: Stephen Compall Signed-off-by: Raymond Roestenburg <98821776+ray-roestenburg-da@users.noreply.github.com> --- apps/scan/src/main/openapi/scan.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/scan/src/main/openapi/scan.yaml b/apps/scan/src/main/openapi/scan.yaml index 6d2ff01fbc..07631d8525 100644 --- a/apps/scan/src/main/openapi/scan.yaml +++ b/apps/scan/src/main/openapi/scan.yaml @@ -2387,7 +2387,7 @@ components: events_by_id: x-scala-map-type: "scala.collection.immutable.SortedMap" description: | - Changes to the ledger that were caused by this transaction, keyed by ID and sorted by ID for display consistency. + Changes to the ledger that were caused by this transaction, keyed by ID and sorted lexicographically by ID for display consistency. Values are nodes of the transaction tree. Within a transaction, IDs may be referenced by `root_event_ids` or `child_event_ids` in `ExercisedEvent` herein, which are sorted in the order as they occurred in the transaction. From 5de7411acdd69a54ed582cb7abe036d8d614397f Mon Sep 17 00:00:00 2001 From: DA Automation Date: Wed, 11 Jun 2025 00:22:13 +0200 Subject: [PATCH 4/4] [ci] Fix formatting. Signed-off-by: DA Automation --- ...canHistoryBackfillingIntegrationTest.scala | 16 +-- .../client/commands/HttpScanAppClient.scala | 20 +-- .../scan/admin/http/HttpScanHandler.scala | 127 ++++++++++-------- 3 files changed, 88 insertions(+), 75 deletions(-) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala index 7e30ab37c4..4f44bc9e91 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala @@ -2,13 +2,19 @@ package org.lfdecentralizedtrust.splice.integration.tests import com.daml.ledger.javaapi.data.TransactionTree import org.lfdecentralizedtrust.splice.config.ConfigTransforms -import org.lfdecentralizedtrust.splice.config.ConfigTransforms.{ConfigurableApp, updateAutomationConfig} +import org.lfdecentralizedtrust.splice.config.ConfigTransforms.{ + ConfigurableApp, + updateAutomationConfig, +} import org.lfdecentralizedtrust.splice.console.ScanAppBackendReference import org.lfdecentralizedtrust.splice.environment.ledger.api.TransactionTreeUpdate import org.lfdecentralizedtrust.splice.http.v0.definitions import org.lfdecentralizedtrust.splice.http.v0.definitions.DamlValueEncoding.members.CompactJson import org.lfdecentralizedtrust.splice.integration.EnvironmentDefinition -import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.{IntegrationTest, SpliceTestConsoleEnvironment} +import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.{ + IntegrationTest, + SpliceTestConsoleEnvironment, +} import org.lfdecentralizedtrust.splice.scan.admin.http.ProtobufJsonScanHttpEncodings import org.lfdecentralizedtrust.splice.scan.automation.ScanHistoryBackfillingTrigger import org.lfdecentralizedtrust.splice.store.{PageLimit, TreeUpdateWithMigrationId} @@ -21,14 +27,12 @@ import scala.math.BigDecimal.javaBigDecimal2bigDecimal import com.digitalasset.canton.{HasActorSystem, HasExecutionContext} import org.lfdecentralizedtrust.splice.automation.TxLogBackfillingTrigger import org.lfdecentralizedtrust.splice.http.v0.definitions.TransactionHistoryRequest.SortOrder -import org.lfdecentralizedtrust.splice.http.v0.definitions.UpdateHistoryItemV2 import org.lfdecentralizedtrust.splice.scan.store.TxLogEntry import org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.TxLogBackfillingState import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingState import org.scalactic.source.Position import scala.annotation.nowarn -import scala.collection.immutable.SortedMap import scala.jdk.CollectionConverters.* import scala.jdk.OptionConverters.* @@ -401,10 +405,6 @@ class ScanHistoryBackfillingIntegrationTest commonLength should be > 10 val sv1Items = sv1HttpUpdates.take(commonLength) val sv2Items = sv2HttpUpdates.take(commonLength) - def collectEventsById(items: Seq[UpdateHistoryItemV2]) = items.collect { - case definitions.UpdateHistoryItemV2.members.UpdateHistoryTransactionV2(http) => - http.eventsById - } sv1Items should contain theSameElementsInOrderAs sv2Items } diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala index 6f17d20259..cde52db0c5 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/api/client/commands/HttpScanAppClient.scala @@ -1259,16 +1259,16 @@ object HttpScanAppClient { } case class GetUpdateHistoryV2( - count: Int, - after: Option[(Long, String)], - damlValueEncoding: definitions.DamlValueEncoding, - ) extends InternalBaseCommand[http.GetUpdateHistoryV2Response, Seq[ - definitions.UpdateHistoryItemV2 - ]] { - override def submitRequest( - client: http.ScanClient, - headers: List[HttpHeader], - ): EitherT[Future, Either[ + count: Int, + after: Option[(Long, String)], + damlValueEncoding: definitions.DamlValueEncoding, + ) extends InternalBaseCommand[http.GetUpdateHistoryV2Response, Seq[ + definitions.UpdateHistoryItemV2 + ]] { + override def submitRequest( + client: http.ScanClient, + headers: List[HttpHeader], + ): EitherT[Future, Either[ Throwable, HttpResponse, ], http.GetUpdateHistoryV2Response] = { diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala index 5360bbf406..9158883bb9 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala @@ -853,24 +853,6 @@ class HttpScanHandler( ) } - override def getUpdateByIdV2(respond: ScanResource.GetUpdateByIdV2Response.type)( - updateId: String, - damlValueEncoding: Option[DamlValueEncoding], - )(extracted: TraceContext): Future[ScanResource.GetUpdateByIdV2Response] = { - getUpdateById( - updateId = updateId, - encoding = damlValueEncoding.getOrElse(definitions.DamlValueEncoding.members.CompactJson), - consistentResponses = true, - extracted, - ) - .map { - case Left(error) => - ScanResource.GetUpdateByIdV2Response.NotFound(error) - case Right(update) => - ScanResource.GetUpdateByIdV2Response.OK(toUpdateV2(update)) - } - } - override def listActivity( respond: v0.ScanResource.ListActivityResponse.type )( @@ -1484,24 +1466,22 @@ class HttpScanHandler( extracted: TraceContext, ): Future[Either[definitions.ErrorResponse, definitions.UpdateHistoryItem]] = { implicit val tc = extracted - withSpan(s"$workflowId.getUpdateById") { _ => _ => - for { - tx <- store.updateHistory.getUpdate(updateId) - } yield { - tx.fold[Either[definitions.ErrorResponse, definitions.UpdateHistoryItem]]( - Left( - definitions.ErrorResponse(s"Transaction with id $updateId not found") - ) - )(txWithMigration => - Right( - ScanHttpEncodings.encodeUpdate( - txWithMigration, - encoding = encoding, - version = if (consistentResponses) ScanHttpEncodings.V1 else ScanHttpEncodings.V0, - ) + for { + tx <- store.updateHistory.getUpdate(updateId) + } yield { + tx.fold[Either[definitions.ErrorResponse, definitions.UpdateHistoryItem]]( + Left( + definitions.ErrorResponse(s"Transaction with id $updateId not found") + ) + )(txWithMigration => + Right( + ScanHttpEncodings.encodeUpdate( + txWithMigration, + encoding = encoding, + version = if (consistentResponses) ScanHttpEncodings.V1 else ScanHttpEncodings.V0, ) ) - } + ) } } @@ -1510,18 +1490,27 @@ class HttpScanHandler( )(updateId: String, lossless: Option[Boolean])( extracted: TraceContext ): Future[ScanResource.GetUpdateByIdResponse] = { - val encoding = if (lossless.getOrElse(false)) { - definitions.DamlValueEncoding.ProtobufJson - } else { - definitions.DamlValueEncoding.CompactJson - } - getUpdateById(updateId = updateId, encoding = encoding, consistentResponses = false, extracted) - .map { - case Left(error) => - ScanResource.GetUpdateByIdResponse.NotFound(error) - case Right(update) => - ScanResource.GetUpdateByIdResponse.OK(update) + implicit val tc = extracted + // in openAPI the operationID for /v0/updates/{update_id} is `getUpdateById`, logging as `getUpdateByIdV0` for clarity + withSpan(s"$workflowId.getUpdateByIdV0") { _ => _ => + val encoding = if (lossless.getOrElse(false)) { + definitions.DamlValueEncoding.ProtobufJson + } else { + definitions.DamlValueEncoding.CompactJson } + getUpdateById( + updateId = updateId, + encoding = encoding, + consistentResponses = false, + extracted, + ) + .map { + case Left(error) => + ScanResource.GetUpdateByIdResponse.NotFound(error) + case Right(update) => + ScanResource.GetUpdateByIdResponse.OK(update) + } + } } override def getUpdateByIdV1( @@ -1529,18 +1518,42 @@ class HttpScanHandler( )(updateId: String, damlValueEncoding: Option[definitions.DamlValueEncoding])( extracted: TraceContext ): Future[ScanResource.GetUpdateByIdV1Response] = { - getUpdateById( - updateId = updateId, - encoding = damlValueEncoding.getOrElse(definitions.DamlValueEncoding.members.CompactJson), - consistentResponses = true, - extracted, - ) - .map { - case Left(error) => - ScanResource.GetUpdateByIdV1Response.NotFound(error) - case Right(update) => - ScanResource.GetUpdateByIdV1Response.OK(update) - } + implicit val tc = extracted + withSpan(s"$workflowId.getUpdateByIdV1") { _ => _ => + getUpdateById( + updateId = updateId, + encoding = damlValueEncoding.getOrElse(definitions.DamlValueEncoding.members.CompactJson), + consistentResponses = true, + extracted, + ) + .map { + case Left(error) => + ScanResource.GetUpdateByIdV1Response.NotFound(error) + case Right(update) => + ScanResource.GetUpdateByIdV1Response.OK(update) + } + } + } + + override def getUpdateByIdV2(respond: ScanResource.GetUpdateByIdV2Response.type)( + updateId: String, + damlValueEncoding: Option[DamlValueEncoding], + )(extracted: TraceContext): Future[ScanResource.GetUpdateByIdV2Response] = { + implicit val tc = extracted + withSpan(s"$workflowId.getUpdateByIdV2") { _ => _ => + getUpdateById( + updateId = updateId, + encoding = damlValueEncoding.getOrElse(definitions.DamlValueEncoding.members.CompactJson), + consistentResponses = true, + extracted, + ) + .map { + case Left(error) => + ScanResource.GetUpdateByIdV2Response.NotFound(error) + case Right(update) => + ScanResource.GetUpdateByIdV2Response.OK(toUpdateV2(update)) + } + } } private def ensureValidRange[T](start: Long, end: Long, maxRounds: Int)(