Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 60 additions & 30 deletions cluster/pulumi/canton-network/src/bigQuery.ts
Original file line number Diff line number Diff line change
Expand Up @@ -311,8 +311,9 @@ function createPostgresReplicatorUser(
postgres: CloudPostgres,
password: PostgresPassword
): gcp.sql.User {
const name = `${postgres.namespace.logicalName}-user-${replicatorUserName}`;
return new gcp.sql.User(
`${postgres.namespace.logicalName}-user-${replicatorUserName}`,
name,
{
instance: postgres.databaseInstance.name,
name: replicatorUserName,
Expand All @@ -321,12 +322,51 @@ function createPostgresReplicatorUser(
{
parent: postgres,
deletedWith: postgres.databaseInstance,
retainOnDelete: true,
protect: protectCloudSql,
dependsOn: [postgres.databaseInstance, password.secret],
}
);
}

Copy link

Copilot AI Jun 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] Add a doc comment above databaseCommandBracket explaining its purpose and the structure of the header/footer shapes to improve readability and maintainability.

Suggested change
/**
* Generates a set of shell commands for managing temporary SQL files and importing them into a Cloud SQL database.
*
* @param postgres - The CloudPostgres instance containing database and service account details.
* @returns An object with `header` and `footer` properties:
* - `header`: A shell script segment that sets up a temporary GCS bucket, grants access, and prepares a SQL file.
* - `footer`: A shell script segment that uploads the SQL file to GCS, imports it into the database, and cleans up resources.
*/

Copilot uses AI. Check for mistakes.
function databaseCommandBracket(postgres: CloudPostgres) {
return {
header: pulumi.interpolate`
set -e
TMP_BUCKET="da-cn-tmp-sql-$(date +%s)-$RANDOM"
TMP_SQL_FILE="$(mktemp tmp_pub_rep_slots_XXXXXXXXXX.sql --tmpdir)"
GCS_URI="gs://$TMP_BUCKET/$(basename "$TMP_SQL_FILE")"

# create temporary bucket
gsutil mb --pap enforced -p "${privateNetwork.project}" \
-l "${cloudsdkComputeRegion()}" "gs://$TMP_BUCKET"

# grant DB service account access to the bucket
gsutil iam ch "serviceAccount:${postgres.databaseInstance.serviceAccountEmailAddress}:roles/storage.objectAdmin" \
"gs://$TMP_BUCKET"

cat > "$TMP_SQL_FILE" <<'EOT'
`,
footer: pulumi.interpolate`
EOT

# upload SQL to temporary bucket
gsutil cp "$TMP_SQL_FILE" "$GCS_URI"

# then import into Cloud SQL
gcloud sql import sql ${postgres.databaseInstance.name} "$GCS_URI" \
--database="${scanAppDatabaseName(postgres)}" \
--user="${postgres.user.name}" \
--quiet

# cleanup: remove the file from GCS, delete the bucket, remove the local file
gsutil rm "$GCS_URI"
gsutil rb "gs://$TMP_BUCKET"
rm "$TMP_SQL_FILE"
`,
};
}

/*
For the SQL below to apply, the user/operator applying the pulumi
needs the 'Cloud SQL Editor' IAM role in the relevant GCP project
Expand All @@ -339,27 +379,15 @@ function createPublicationAndReplicationSlots(
) {
const dbName = scanAppDatabaseName(postgres);
const schemaName = dbName;
const { header, footer } = databaseCommandBracket(postgres);
return new command.local.Command(
`${postgres.namespace.logicalName}-${replicatorUserName}-pub-replicate-slots`,
{
// TODO (#19809) refactor to invoke external shell script
// ----
// from https://cloud.google.com/datastream/docs/configure-cloudsql-psql
create: pulumi.interpolate`
set -e
TMP_BUCKET="da-cn-tmp-sql-$(date +%s)-$RANDOM"
TMP_SQL_FILE="$(mktemp tmp_pub_rep_slots_XXXXXXXXXX.sql --tmpdir)"
GCS_URI="gs://$TMP_BUCKET/$(basename "$TMP_SQL_FILE")"

# create temporary bucket
gsutil mb --pap enforced -p "${privateNetwork.project}" \
-l "${cloudsdkComputeRegion()}" "gs://$TMP_BUCKET"

# grant DB service account access to the bucket
gsutil iam ch "serviceAccount:${postgres.databaseInstance.serviceAccountEmailAddress}:roles/storage.objectAdmin" \
"gs://$TMP_BUCKET"

cat > "$TMP_SQL_FILE" <<'EOT'
${header}
DO $$
DECLARE
migration_complete BOOLEAN := FALSE;
Expand Down Expand Up @@ -414,21 +442,23 @@ function createPublicationAndReplicationSlots(
ALTER DEFAULT PRIVILEGES IN SCHEMA ${schemaName}
GRANT SELECT ON TABLES TO ${replicatorUserName};
COMMIT;
EOT

# upload SQL to temporary bucket
gsutil cp "$TMP_SQL_FILE" "$GCS_URI"

# then import into Cloud SQL
gcloud sql import sql ${postgres.databaseInstance.name} "$GCS_URI" \
--database="${scanAppDatabaseName(postgres)}" \
--user="${postgres.user.name}" \
--quiet

# cleanup: remove the file from GCS, delete the bucket, remove the local file
gsutil rm "$GCS_URI"
gsutil rb "gs://$TMP_BUCKET"
rm "$TMP_SQL_FILE"
${footer}
`,
delete: pulumi.interpolate`
${header}
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '${replicationSlotName}') THEN
PERFORM PG_DROP_REPLICATION_SLOT('${replicationSlotName}');
END IF;
END $$;
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_publication WHERE pubname = '${publicationName}') THEN
DROP PUBLICATION ${publicationName};
END IF;
END $$;
${footer}
`,
},
{
Expand Down
6 changes: 6 additions & 0 deletions cluster/pulumi/common-sv/src/svConfigs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import { StaticSvConfig } from './config';
import { dsoSize } from './dsoConfig';
import { cometbftRetainBlocks } from './synchronizer/cometbftConfig';

const sv1ScanBigQuery = spliceEnvConfig.envFlag('SV1_SCAN_BIGQUERY', false);

const svCometBftSecrets: pulumi.Output<SvCometBftKeys>[] = isMainNet
? [svCometBftKeysFromSecret('sv1-cometbft-keys')]
: [
Expand Down Expand Up @@ -58,6 +60,9 @@ export const svConfigs: StaticSvConfig[] = isMainNet
},
},
sweep: sweepConfigFromEnv('SV1'),
...(sv1ScanBigQuery
? { scanBigQuery: { dataset: 'mainnet_da2_scan', prefix: 'da2' } }
: {}),
},
]
: [
Expand All @@ -83,6 +88,7 @@ export const svConfigs: StaticSvConfig[] = isMainNet
},
},
sweep: sweepConfigFromEnv('SV1'),
...(sv1ScanBigQuery ? { scanBigQuery: { dataset: 'devnet_da2_scan', prefix: 'da2' } } : {}),
},
{
// TODO(#12169): consider making nodeName and ingressName the same (also for all other SVs)
Expand Down
Loading