diff --git a/cluster/pulumi/canton-network/src/bigQuery.ts b/cluster/pulumi/canton-network/src/bigQuery.ts index 3adbc454c3..2d66e02470 100644 --- a/cluster/pulumi/canton-network/src/bigQuery.ts +++ b/cluster/pulumi/canton-network/src/bigQuery.ts @@ -311,8 +311,9 @@ function createPostgresReplicatorUser( postgres: CloudPostgres, password: PostgresPassword ): gcp.sql.User { + const name = `${postgres.namespace.logicalName}-user-${replicatorUserName}`; return new gcp.sql.User( - `${postgres.namespace.logicalName}-user-${replicatorUserName}`, + name, { instance: postgres.databaseInstance.name, name: replicatorUserName, @@ -321,12 +322,51 @@ function createPostgresReplicatorUser( { parent: postgres, deletedWith: postgres.databaseInstance, + retainOnDelete: true, protect: protectCloudSql, dependsOn: [postgres.databaseInstance, password.secret], } ); } +function databaseCommandBracket(postgres: CloudPostgres) { + return { + header: pulumi.interpolate` + set -e + TMP_BUCKET="da-cn-tmp-sql-$(date +%s)-$RANDOM" + TMP_SQL_FILE="$(mktemp tmp_pub_rep_slots_XXXXXXXXXX.sql --tmpdir)" + GCS_URI="gs://$TMP_BUCKET/$(basename "$TMP_SQL_FILE")" + + # create temporary bucket + gsutil mb --pap enforced -p "${privateNetwork.project}" \ + -l "${cloudsdkComputeRegion()}" "gs://$TMP_BUCKET" + + # grant DB service account access to the bucket + gsutil iam ch "serviceAccount:${postgres.databaseInstance.serviceAccountEmailAddress}:roles/storage.objectAdmin" \ + "gs://$TMP_BUCKET" + + cat > "$TMP_SQL_FILE" <<'EOT' + `, + footer: pulumi.interpolate` +EOT + + # upload SQL to temporary bucket + gsutil cp "$TMP_SQL_FILE" "$GCS_URI" + + # then import into Cloud SQL + gcloud sql import sql ${postgres.databaseInstance.name} "$GCS_URI" \ + --database="${scanAppDatabaseName(postgres)}" \ + --user="${postgres.user.name}" \ + --quiet + + # cleanup: remove the file from GCS, delete the bucket, remove the local file + gsutil rm "$GCS_URI" + gsutil rb "gs://$TMP_BUCKET" + rm "$TMP_SQL_FILE" + `, + }; +} + /* For the SQL below to apply, the user/operator applying the pulumi needs the 'Cloud SQL Editor' IAM role in the relevant GCP project @@ -339,6 +379,7 @@ function createPublicationAndReplicationSlots( ) { const dbName = scanAppDatabaseName(postgres); const schemaName = dbName; + const { header, footer } = databaseCommandBracket(postgres); return new command.local.Command( `${postgres.namespace.logicalName}-${replicatorUserName}-pub-replicate-slots`, { @@ -346,20 +387,7 @@ function createPublicationAndReplicationSlots( // ---- // from https://cloud.google.com/datastream/docs/configure-cloudsql-psql create: pulumi.interpolate` - set -e - TMP_BUCKET="da-cn-tmp-sql-$(date +%s)-$RANDOM" - TMP_SQL_FILE="$(mktemp tmp_pub_rep_slots_XXXXXXXXXX.sql --tmpdir)" - GCS_URI="gs://$TMP_BUCKET/$(basename "$TMP_SQL_FILE")" - - # create temporary bucket - gsutil mb --pap enforced -p "${privateNetwork.project}" \ - -l "${cloudsdkComputeRegion()}" "gs://$TMP_BUCKET" - - # grant DB service account access to the bucket - gsutil iam ch "serviceAccount:${postgres.databaseInstance.serviceAccountEmailAddress}:roles/storage.objectAdmin" \ - "gs://$TMP_BUCKET" - - cat > "$TMP_SQL_FILE" <<'EOT' + ${header} DO $$ DECLARE migration_complete BOOLEAN := FALSE; @@ -414,21 +442,23 @@ function createPublicationAndReplicationSlots( ALTER DEFAULT PRIVILEGES IN SCHEMA ${schemaName} GRANT SELECT ON TABLES TO ${replicatorUserName}; COMMIT; -EOT - - # upload SQL to temporary bucket - gsutil cp "$TMP_SQL_FILE" "$GCS_URI" - - # then import into Cloud SQL - gcloud sql import sql ${postgres.databaseInstance.name} "$GCS_URI" \ - --database="${scanAppDatabaseName(postgres)}" \ - --user="${postgres.user.name}" \ - --quiet - - # cleanup: remove the file from GCS, delete the bucket, remove the local file - gsutil rm "$GCS_URI" - gsutil rb "gs://$TMP_BUCKET" - rm "$TMP_SQL_FILE" + ${footer} + `, + delete: pulumi.interpolate` + ${header} + DO $$ + BEGIN + IF EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '${replicationSlotName}') THEN + PERFORM PG_DROP_REPLICATION_SLOT('${replicationSlotName}'); + END IF; + END $$; + DO $$ + BEGIN + IF EXISTS (SELECT 1 FROM pg_publication WHERE pubname = '${publicationName}') THEN + DROP PUBLICATION ${publicationName}; + END IF; + END $$; + ${footer} `, }, { diff --git a/cluster/pulumi/common-sv/src/svConfigs.ts b/cluster/pulumi/common-sv/src/svConfigs.ts index b35d541048..ede91cdf56 100644 --- a/cluster/pulumi/common-sv/src/svConfigs.ts +++ b/cluster/pulumi/common-sv/src/svConfigs.ts @@ -15,6 +15,8 @@ import { StaticSvConfig } from './config'; import { dsoSize } from './dsoConfig'; import { cometbftRetainBlocks } from './synchronizer/cometbftConfig'; +const sv1ScanBigQuery = spliceEnvConfig.envFlag('SV1_SCAN_BIGQUERY', false); + const svCometBftSecrets: pulumi.Output[] = isMainNet ? [svCometBftKeysFromSecret('sv1-cometbft-keys')] : [ @@ -58,6 +60,9 @@ export const svConfigs: StaticSvConfig[] = isMainNet }, }, sweep: sweepConfigFromEnv('SV1'), + ...(sv1ScanBigQuery + ? { scanBigQuery: { dataset: 'mainnet_da2_scan', prefix: 'da2' } } + : {}), }, ] : [ @@ -83,6 +88,7 @@ export const svConfigs: StaticSvConfig[] = isMainNet }, }, sweep: sweepConfigFromEnv('SV1'), + ...(sv1ScanBigQuery ? { scanBigQuery: { dataset: 'devnet_da2_scan', prefix: 'da2' } } : {}), }, { // TODO(#12169): consider making nodeName and ingressName the same (also for all other SVs)