Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
ALTER TABLE validator_internal_config
RENAME TO key_value_store;

ALTER TABLE key_value_store
RENAME COLUMN config_key TO key;

ALTER TABLE key_value_store
RENAME COLUMN config_value TO value;

ALTER TABLE key_value_store
RENAME CONSTRAINT uc_validator_internal_config TO uc_key_value_store;
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package org.lfdecentralizedtrust.splice.store

import cats.data.OptionT
import cats.implicits.catsSyntaxOptionId
import com.digitalasset.canton.lifecycle.CloseContext
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.resource.DbStorage
import com.digitalasset.canton.tracing.TraceContext
import io.circe.{Decoder, Encoder}
import org.lfdecentralizedtrust.splice.store.db.{DbKeyValueStore, StoreDescriptor}

import scala.concurrent.{ExecutionContext, Future}

trait KeyValueStore extends NamedLogging {

def setValue[T](key: String, value: T)(implicit
tc: TraceContext,
encoder: Encoder[T],
): Future[Unit]

def getValue[T](
key: String
)(implicit tc: TraceContext, decoder: Decoder[T]): OptionT[Future, Decoder.Result[T]]

def deleteKey(key: String)(implicit
tc: TraceContext
): Future[Unit]

def readValueAndLogOnDecodingFailure[T: Decoder](
key: String
)(implicit tc: TraceContext, ec: ExecutionContext): OptionT[Future, T] = {
getValue[T](key)
.subflatMap(
_.fold(
{ failure =>
logger.warn(s"Failed to decode $key from the db $failure")
None
},
_.some,
)
)
}

}

object KeyValueStore {

def apply(
descriptor: StoreDescriptor,
storage: DbStorage,
loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext,
lc: ErrorLoggingContext,
cc: CloseContext,
tc: TraceContext,
): Future[KeyValueStore] = {
storage match {
case storage: DbStorage =>
DbKeyValueStore(
descriptor,
storage,
loggerFactory,
)

case storageType => throw new RuntimeException(s"Unsupported storage type $storageType")
}
}
}
Original file line number Diff line number Diff line change
@@ -1,50 +1,47 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package org.lfdecentralizedtrust.splice.validator.store.db
package org.lfdecentralizedtrust.splice.store.db

import cats.data.OptionT
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown}
import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging}
import com.digitalasset.canton.discard.Implicits.DiscardOps
import com.digitalasset.canton.resource.DbStorage
import com.digitalasset.canton.tracing.TraceContext
import org.lfdecentralizedtrust.splice.util.FutureUnlessShutdownUtil.futureUnlessShutdownToFuture
import com.digitalasset.canton.topology.{ParticipantId, PartyId}
import org.lfdecentralizedtrust.splice.validator.store.ValidatorInternalStore
import io.circe.syntax.EncoderOps
import io.circe.{Decoder, Encoder, Json}
import org.lfdecentralizedtrust.splice.store.db.AcsJdbcTypes
import org.lfdecentralizedtrust.splice.store.KeyValueStore
import org.lfdecentralizedtrust.splice.util.FutureUnlessShutdownUtil.futureUnlessShutdownToFuture
import slick.jdbc.JdbcProfile
import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton

import scala.concurrent.{ExecutionContext, Future}
import org.lfdecentralizedtrust.splice.store.db.{StoreDescriptor, StoreDescriptorStore}

class DbValidatorInternalStore private (
class DbKeyValueStore private (
storage: DbStorage,
val storeId: Int,
val loggerFactory: NamedLoggerFactory,
)(implicit
val ec: ExecutionContext,
val loggingContext: ErrorLoggingContext,
val closeContext: CloseContext,
) extends ValidatorInternalStore
) extends KeyValueStore
with AcsJdbcTypes
with NamedLogging {

val profile: JdbcProfile = storage.profile.jdbc

override def setConfig[T](key: String, value: T)(implicit
override def setValue[T](key: String, value: T)(implicit
tc: TraceContext,
encoder: Encoder[T],
): Future[Unit] = {
val jsonValue: Json = value.asJson

val action = sql"""INSERT INTO validator_internal_config (config_key, config_value, store_id)
val action = sql"""INSERT INTO key_value_store (key, value, store_id)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

changing the table name, what about existing data? (does this matter for the validator store version?)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We're only renaming a table and some columns/constraints, that shouldn't affect existing data, no?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The table itself is also renamed accordingly in flyway, so data would be preserved

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thanks missed that (flyway)

VALUES ($key, $jsonValue, $storeId)
ON CONFLICT (store_id, config_key) DO UPDATE
SET config_value = excluded.config_value""".asUpdate

ON CONFLICT (store_id, key) DO UPDATE
SET value = excluded.value""".asUpdate
val updateAction = storage.update(action, "set-validator-internal-config")

logger.debug(
Expand All @@ -53,15 +50,15 @@ class DbValidatorInternalStore private (
updateAction.map(_ => ())
}

override def getConfig[T](
override def getValue[T](
key: String
)(implicit tc: TraceContext, decoder: Decoder[T]): OptionT[Future, Decoder.Result[T]] = {

logger.debug(s"Retrieving config key $key")

val queryAction = sql"""SELECT config_value
FROM validator_internal_config
WHERE config_key = $key AND store_id = $storeId
val queryAction = sql"""SELECT value
FROM key_value_store
WHERE key = $key AND store_id = $storeId
""".as[Json].headOption

val jsonOptionT: OptionT[FutureUnlessShutdown, Json] =
Expand All @@ -87,50 +84,38 @@ class DbValidatorInternalStore private (
OptionT(futureUnlessShutdownToFuture(resultOptionT.value))
}

override def deleteConfig(key: String)(implicit
override def deleteKey(key: String)(implicit
tc: TraceContext
): Future[Unit] = {
logger.debug(
s"Deleting config key $key"
)
storage
.update(
sqlu"delete from validator_internal_config WHERE config_key = $key AND store_id = $storeId",
sqlu"delete from key_value_store WHERE key = $key AND store_id = $storeId",
"delete config key",
)
.map(_.discard)
}
}

object DbValidatorInternalStore {
object DbKeyValueStore {

def apply(
participant: ParticipantId,
validatorParty: PartyId,
storeDescriptor: StoreDescriptor,
storage: DbStorage,
loggerFactory: NamedLoggerFactory,
)(implicit
ec: ExecutionContext,
lc: ErrorLoggingContext,
cc: CloseContext,
tc: TraceContext,
): Future[DbValidatorInternalStore] = {
): Future[DbKeyValueStore] = {

StoreDescriptorStore
.getStoreIdForDescriptor(
StoreDescriptor(
version = 2,
name = "DbValidatorInternalConfigStore",
party = validatorParty,
participant = participant,
key = Map(
"validatorParty" -> validatorParty.toProtoPrimitive
),
),
storage,
)
.getStoreIdForDescriptor(storeDescriptor, storage)
.map(storeId => {
new DbValidatorInternalStore(
new DbKeyValueStore(
storage,
storeId,
loggerFactory,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package org.lfdecentralizedtrust.splice.store

import com.digitalasset.canton.lifecycle.FutureUnlessShutdown
import com.digitalasset.canton.resource.DbStorage
import com.digitalasset.canton.tracing.TraceContext
import com.digitalasset.canton.HasExecutionContext
import org.lfdecentralizedtrust.splice.store.db.{SplicePostgresTest, StoreDescriptor}
import org.scalatest.matchers.should.Matchers

class KeyValueStoreTest
extends StoreTest
with Matchers
with HasExecutionContext
with SplicePostgresTest {
private val storeDescriptor =
StoreDescriptor(
version = 1,
name = "DbMultiDomainAcsStoreTest",
party = dsoParty,
participant = mkParticipantId("participant"),
key = Map(),
)
private val storeDescriptor2 =
StoreDescriptor(
version = 2,
name = "DbMultiDomainAcsStoreTest",
party = dsoParty,
participant = mkParticipantId("participant"),
key = Map(),
)

private def mkStore(descriptor: StoreDescriptor) = KeyValueStore(
descriptor,
storage,
loggerFactory,
)

"KeyValueStore" should {

val configKey = "key1"
val configValue = "jsonPayload1"
val otherKey = "key2"
val otherValue = "jsonPayload2"

"set and get a payload successfully" in {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggestion for one more test: create two stores with different descriptors, check that they don't affect each others data.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍
(note that these tests are not new, it's just refactoring of the existing ones, separating the generic key-store ones from the ones specific for validator data, but I'll add a test nevertheless)

for {
store <- mkStore(storeDescriptor)
_ <- store.setValue(configKey, configValue)
retrievedValue <- store.getValue[String](configKey).value
} yield {
retrievedValue.value.value shouldBe configValue
}
}

"return None for a non-existent key" in {
for {
store <- mkStore(storeDescriptor)
retrievedValue <- store.getValue[String]("non-existent-key").value
} yield {
retrievedValue shouldBe None
}
}

"update an existing payload" in {
for {
store <- mkStore(storeDescriptor)
_ <- store.setValue(configKey, configValue)
_ <- store.setValue(configKey, otherValue)
retrievedValue <- store.getValue[String](configKey).value
} yield {
retrievedValue.value.value shouldBe otherValue
}
}

"handle multiple different keys independently" in {
for {
store <- mkStore(storeDescriptor)
_ <- store.setValue(configKey, configValue)
_ <- store.setValue(otherKey, otherValue)

configKeyValue <- store.getValue[String](configKey).value
otherKeyValue <- store.getValue[String](otherKey).value
} yield {
configKeyValue.value.value shouldBe configValue
otherKeyValue.value.value shouldBe otherValue
}
}

"delete single key" in {
for {
store <- mkStore(storeDescriptor)
_ <- store.setValue(configKey, configValue)
_ <- store.setValue(otherKey, otherValue)

_ <- store.deleteKey(configKey)
configKeyValue <- store.getValue[String](configKey).value
otherKeyValue <- store.getValue[String](otherKey).value
} yield {
configKeyValue shouldBe None
otherKeyValue.value.value shouldBe otherValue
}
}

"different storeDescriptors should not interfere" in {
for {
store1 <- mkStore(storeDescriptor)
store2 <- mkStore(storeDescriptor2)
_ <- store1.setValue("key", "value1")
_ <- store2.setValue("key", "value2")
_ <- store2.setValue("key2", "something")

k1s1 <- store1.getValue[String]("key").value
k2s1 <- store1.getValue[String]("key2").value

k1s2 <- store2.getValue[String]("key").value
k2s2 <- store2.getValue[String]("key2").value
} yield {
k1s1.value.value shouldBe "value1"
k2s1 shouldBe None
k1s2.value.value shouldBe "value2"
k2s2.value.value shouldBe "something"
}
}
}

override protected def cleanDb(
storage: DbStorage
)(implicit traceContext: TraceContext): FutureUnlessShutdown[?] =
resetAllAppTables(storage)
}
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ trait SpliceDbTest extends DbTest with BeforeAndAfterAll { this: Suite =>
acs_snapshot,
scan_verdict_store,
scan_verdict_transaction_view_store,
validator_internal_config
key_value_store
RESTART IDENTITY CASCADE""".asUpdate
_ <- debugPrintPgActivity()
} yield (),
Expand Down
Loading
Loading