Skip to content

Commit fa97119

Browse files
Merge branch 'main' into martinflorian-da/cntf-6839-dont-rate-limit-simtime-test
2 parents 522420a + b1eb83a commit fa97119

File tree

8 files changed

+118
-60
lines changed

8 files changed

+118
-60
lines changed

build-tools/cncluster

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1735,23 +1735,16 @@ subcommand_whitelist[backup_nodes]='Backup one or more CN nodes in the cluster'
17351735
function subcmd_backup_nodes() {
17361736
_cluster_must_exist
17371737

1738-
if [ "$#" -lt 2 ]; then
1739-
echo "Usage: $0 <migration_id> <internal_stack> [node...]"
1738+
if [ "$#" -lt 1 ]; then
1739+
echo "Usage: $0 <migration_id> [node...]"
17401740
exit 1
17411741
fi
17421742

17431743
if [[ ! "$1" =~ ^[0-9]+$ ]]; then
17441744
echo "Usage: $0 <migration_id> [node...]"
17451745
_error "<migration_id> must be a positive integer"
17461746
fi
1747-
1748-
if [[ "$2" != "true" && "$2" != "false" ]]; then
1749-
echo "Usage: $0 <internal_stack> [node...]"
1750-
_error "<internal_stack> must be either \"true\" or \"false\""
1751-
fi
1752-
17531747
migration_id=$1
1754-
internal_stack=$2
17551748

17561749
if [ $# -eq 2 ]; then
17571750
nodes="sv-1 sv-2 sv-3 sv-da-1 validator1 splitwell"
@@ -1764,11 +1757,11 @@ function subcmd_backup_nodes() {
17641757
case "$node" in
17651758
sv-1|sv-2|sv-3|sv-4|sv-da-1)
17661759
_info "Backing up sv node $node"
1767-
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-backup.sh sv "$node" "$migration_id" "$internal_stack"
1760+
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-backup.sh sv "$node" "$migration_id"
17681761
;;
17691762
validator1|splitwell)
17701763
_info "Backing up validator node $node"
1771-
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-backup.sh validator "$node" "$migration_id" "$internal_stack"
1764+
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-backup.sh validator "$node" "$migration_id"
17721765
;;
17731766
*)
17741767
_error "Unknown node $node"
@@ -1805,16 +1798,15 @@ function subcmd_restore_node() {
18051798

18061799
node="${1-}"
18071800
migration_id="${2-}"
1808-
internal_stack="${3-}"
1809-
backup_run_id="${4-}"
1801+
backup_run_id="${3-}"
18101802

18111803
if [ -z "$node" ] || [ -z "$migration_id" ]; then
1812-
_error "Usage: $SCRIPTNAME restore_node <namespace_name> <migration_id> <internal_stack> [<backup_run_id>]"
1804+
_error "Usage: $SCRIPTNAME restore_node <namespace_name> <migration_id> [<backup_run_id>]"
18131805
fi
18141806

18151807
if [ -z "$backup_run_id" ]; then
18161808
_info "No backup run_id given, looking for the latest full backup"
1817-
backup_run_id=$(SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/find-recent-backup.sh "$node" "$migration_id" "$internal_stack")
1809+
backup_run_id=$(SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/find-recent-backup.sh "$node" "$migration_id")
18181810
if [[ -z "$backup_run_id" || "$backup_run_id" == "null" ]]; then
18191811
_error "No recent backup found for $node"
18201812
fi
@@ -1831,11 +1823,11 @@ function subcmd_restore_node() {
18311823
_info "Silencing SV report creation alerts for $sv_name for 1 hour"
18321824
subcmd_silence_grafana_alerts "1 hour" "alertname=Report Creation Time Lag" "report_publisher=$sv_name"
18331825
_info "Restoring sv node $node"
1834-
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-restore.sh $force "$node" "$migration_id" "$backup_run_id" "$internal_stack" cometbft sequencer participant mediator cn-apps
1826+
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-restore.sh $force "$node" "$migration_id" "$backup_run_id" cometbft sequencer participant mediator cn-apps
18351827
;;
18361828
validator1|splitwell)
18371829
_info "Restoring validator node $node"
1838-
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-restore.sh $force "$node" "$migration_id" "$backup_run_id" "$internal_stack" participant validator
1830+
SPLICE_SV=$node SPLICE_MIGRATION_ID=$migration_id "$SPLICE_ROOT"/cluster/scripts/node-restore.sh $force "$node" "$migration_id" "$backup_run_id" participant validator
18391831
;;
18401832
*)
18411833
_error "Unknown node $node"

cluster/pulumi/common/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ export * from './dockerConfig';
2424
export * from './serviceAccount';
2525
export * from './participantKms';
2626
export * from './config/migrationSchema';
27+
export * from './postgres';
2728
export * from './pruning';
2829
export * from './config/loadTesterConfig';
2930
export * from './config/networkWideConfig';

cluster/pulumi/common/src/postgres.ts

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,12 @@ export class CloudPostgres extends pulumi.ComponentResource implements Postgres
6565
secretName: string,
6666
cloudSqlConfig: CloudSqlConfig,
6767
active: boolean = true,
68-
opts: { disableProtection?: boolean; migrationId?: string; logicalDecoding?: boolean } = {}
68+
opts: {
69+
disableProtection?: boolean;
70+
migrationId?: string;
71+
logicalDecoding?: boolean;
72+
disableBackups?: boolean;
73+
} = {}
6974
) {
7075
const instanceLogicalName = xns.logicalName + '-' + instanceName;
7176
const instanceLogicalNameAlias = xns.logicalName + '-' + alias; // pulumi name before #12391
@@ -93,8 +98,8 @@ export class CloudPostgres extends pulumi.ComponentResource implements Postgres
9398
...(opts.logicalDecoding ? [{ name: 'cloudsql.logical_decoding', value: 'on' }] : []),
9499
],
95100
backupConfiguration: {
96-
enabled: true,
97-
pointInTimeRecoveryEnabled: true,
101+
enabled: !opts.disableBackups,
102+
pointInTimeRecoveryEnabled: !opts.disableBackups,
98103
...(spliceConfig.pulumiProjectConfig.cloudSql.backupsToRetain
99104
? {
100105
backupRetentionSettings: {
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
import { CloudPostgres, ExactNamespace } from '@lfdecentralizedtrust/splice-pulumi-common';
4+
5+
export function createCloudSQLInstanceForPerformanceTests(
6+
ghaNamespace: ExactNamespace
7+
): CloudPostgres {
8+
return new CloudPostgres(
9+
ghaNamespace,
10+
'performance-test-db',
11+
'performance-test-db',
12+
'performance-test-db-secret',
13+
{
14+
enabled: true,
15+
maintenanceWindow: { day: 2, hour: 8 },
16+
protected: false,
17+
tier: 'db-custom-2-7680', // same as devnet & testnet as of Jan 2026
18+
enterprisePlus: false,
19+
},
20+
true,
21+
{
22+
disableProtection: true,
23+
disableBackups: true,
24+
logicalDecoding: false,
25+
}
26+
);
27+
}

cluster/pulumi/gha/src/runners.test.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,17 @@ jest.mock('./config', () => ({
1515
runnerHookVersion: '1.1',
1616
},
1717
}));
18+
class FakeCloudPostgres extends pulumi.Resource {}
1819
jest.mock('@lfdecentralizedtrust/splice-pulumi-common', () => ({
1920
__esModule: true,
2021
appsAffinityAndTolerations: {},
2122
DOCKER_REPO: 'https://dummy-docker-repo.com',
2223
HELM_MAX_HISTORY_SIZE: 42,
2324
imagePullSecretByNamespaceNameForServiceAccount: () => [],
2425
infraAffinityAndTolerations: {},
26+
CloudPostgres: function CloudPostgres() {
27+
return new FakeCloudPostgres('CloudPostgres', 'cloud-postgres', true);
28+
},
2529
}));
2630
jest.mock('@lfdecentralizedtrust/splice-pulumi-common/src/config/envConfig', () => ({
2731
__esModule: true,

cluster/pulumi/gha/src/runners.ts

Lines changed: 41 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33
import * as k8s from '@pulumi/kubernetes';
44
import {
55
appsAffinityAndTolerations,
6+
CloudPostgres,
67
DOCKER_REPO,
8+
ExactNamespace,
79
HELM_MAX_HISTORY_SIZE,
810
imagePullSecretByNamespaceNameForServiceAccount,
911
infraAffinityAndTolerations,
@@ -18,6 +20,7 @@ import yaml from 'js-yaml';
1820

1921
import { createCachePvc } from './cache';
2022
import { ghaConfig } from './config';
23+
import { createCloudSQLInstanceForPerformanceTests } from './performanceTests';
2124

2225
type ResourcesSpec = {
2326
requests?: {
@@ -403,7 +406,8 @@ function installK8sRunnerScaleSet(
403406
cachePvcName: string,
404407
resources: ResourcesSpec,
405408
serviceAccountName: string,
406-
dependsOn: Resource[]
409+
dependsOn: Resource[],
410+
performanceTestsDb: CloudPostgres
407411
): Release {
408412
const podConfigMapName = `${name}-pod-config`;
409413
// A configMap that will be mounted to runner pods and provide additional pod spec for the workflow pods
@@ -534,6 +538,21 @@ function installK8sRunnerScaleSet(
534538
name: 'ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE',
535539
value: '/pod.yaml',
536540
},
541+
{
542+
name: 'PERFORMANCE_TESTS_DB_HOST',
543+
value: performanceTestsDb.address,
544+
},
545+
{
546+
name: 'PERFORMANCE_TESTS_DB_USER',
547+
value: 'cnadmin',
548+
},
549+
{
550+
name: 'PERFORMANCE_TESTS_DB_PASSWORD',
551+
valueFrom: {
552+
key: 'postgresPassword',
553+
name: performanceTestsDb.secretName,
554+
},
555+
},
537556
],
538557
volumeMounts: [
539558
{
@@ -701,9 +720,10 @@ function installK8sRunnerScaleSets(
701720
runnersNamespace: Namespace,
702721
tokenSecret: Secret,
703722
cachePvcName: string,
704-
serviceAccountName: string
723+
serviceAccountName: string,
724+
performanceTestsDb: CloudPostgres
705725
): void {
706-
const dependsOn = [controller, runnersNamespace, tokenSecret];
726+
const dependsOn = [controller, runnersNamespace, tokenSecret, performanceTestsDb];
707727

708728
runnerSpecs
709729
.filter(spec => spec.k8s)
@@ -715,7 +735,8 @@ function installK8sRunnerScaleSets(
715735
cachePvcName,
716736
spec.resources,
717737
serviceAccountName,
718-
dependsOn
738+
dependsOn,
739+
performanceTestsDb
719740
);
720741
});
721742
}
@@ -754,12 +775,17 @@ function installPodMonitor(runnersNamespace: Namespace) {
754775
);
755776
}
756777

778+
const GHA_NAMESPACE_NAME = 'gha-runners';
757779
export function installRunnerScaleSets(controller: k8s.helm.v3.Release): void {
758-
const runnersNamespace = new Namespace('gha-runners', {
780+
const runnersNamespace = new Namespace(GHA_NAMESPACE_NAME, {
759781
metadata: {
760-
name: 'gha-runners',
782+
name: GHA_NAMESPACE_NAME,
761783
},
762784
});
785+
const exactNs: ExactNamespace = {
786+
ns: runnersNamespace,
787+
logicalName: GHA_NAMESPACE_NAME,
788+
};
763789

764790
const tokenSecret = new k8s.core.v1.Secret(
765791
'gh-access-token',
@@ -791,7 +817,15 @@ export function installRunnerScaleSets(controller: k8s.helm.v3.Release): void {
791817
const saName = 'k8s-runners';
792818
installRunnersServiceAccount(runnersNamespace, saName);
793819

820+
const performanceTestsDb = createCloudSQLInstanceForPerformanceTests(exactNs);
794821
installDockerRunnerScaleSets(controller, runnersNamespace, tokenSecret, cachePvc, saName);
795-
installK8sRunnerScaleSets(controller, runnersNamespace, tokenSecret, cachePvcName, saName);
822+
installK8sRunnerScaleSets(
823+
controller,
824+
runnersNamespace,
825+
tokenSecret,
826+
cachePvcName,
827+
saName,
828+
performanceTestsDb
829+
);
796830
installPodMonitor(runnersNamespace);
797831
}

cluster/scripts/find-recent-backup.sh

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ source "${TOOLS_LIB}/libcli.source"
1111
source "${SPLICE_ROOT}/cluster/scripts/utils.source"
1212

1313
function usage() {
14-
_info "Usage: $0 <namespace> <migration_id> <internal (true|false)>"
14+
_info "Usage: $0 <namespace> <migration_id>"
1515
}
1616

1717
function is_full_backup_kube() {
@@ -64,16 +64,15 @@ function latest_full_backup_run_id_gcloud() {
6464
local migration_id=$2
6565
local is_sv=$3
6666
local expected_components=$4
67-
local internal=$5
6867
local num_components
6968
num_components=$(echo "$expected_components" | wc -w)
7069
local stack
7170

7271
declare -A run_ids_dict
7372

7473
for component in $expected_components; do
75-
stack=$(get_stack_for_namespace_component "$namespace" "$component" "$internal")
76-
instance="$(create_component_instance "$component" "$migration_id" "$namespace" "$internal")"
74+
stack=$(get_stack_for_namespace_component "$namespace" "$component")
75+
instance="$(create_component_instance "$component" "$migration_id" "$namespace")"
7776
local full_component_instance="$namespace-$instance-pg"
7877

7978
local cloudsql_id
@@ -110,27 +109,26 @@ function latest_full_backup_run_id_gcloud() {
110109
}
111110

112111
function main() {
113-
if [ "$#" -lt 3 ]; then
112+
if [ "$#" -lt 2 ]; then
114113
usage
115114
exit 1
116115
fi
117116

118117
local namespace=$1
119118
local migration_id=$2
120-
local internal=$3
121119

122120
case "$namespace" in
123121
sv-1|sv-2|sv-3|sv-4|sv-da-1)
124122
is_sv=true
125123
full_instance="$namespace-cn-apps-pg"
126124
expected_components="cn-apps sequencer participant mediator"
127-
stack=$(get_stack_for_namespace_component "$namespace" "cn-apps" "$internal")
125+
stack=$(get_stack_for_namespace_component "$namespace" "cn-apps")
128126
;;
129127
*)
130128
is_sv=false
131129
full_instance="$namespace-validator-pg"
132130
expected_components="validator participant"
133-
stack=$(get_stack_for_namespace_component "$namespace" "participant" "$internal")
131+
stack=$(get_stack_for_namespace_component "$namespace" "participant")
134132
;;
135133
esac
136134

@@ -140,7 +138,7 @@ function main() {
140138
backup_run_id=$(latest_full_backup_run_id_kube "$namespace" "$migration_id" "$is_sv" "$expected_components")
141139
echo "$backup_run_id"
142140
elif [ "$type" == "canton:cloud:postgres" ]; then
143-
backup_run_id=$(latest_full_backup_run_id_gcloud "$namespace" "$migration_id" "$is_sv" "$expected_components" "$internal")
141+
backup_run_id=$(latest_full_backup_run_id_gcloud "$namespace" "$migration_id" "$is_sv" "$expected_components")
144142
echo "$backup_run_id"
145143
elif [ -z "$type" ]; then
146144
_error "No postgres instance $full_instance found in stack ${stack}. Is the cluster deployed with split DB instances?"

0 commit comments

Comments
 (0)