Skip to content

Commit 6940ca6

Browse files
authored
Merge pull request #520 from GoogleCloudPlatform/release-v1.4.0
Release v1.4.0
2 parents 8525bf2 + 87f4b06 commit 6940ca6

File tree

131 files changed

+1341
-978
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

131 files changed

+1341
-978
lines changed

cmd/root.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ HPC deployments on the Google Cloud Platform.`,
3434
log.Fatalf("cmd.Help function failed: %s", err)
3535
}
3636
},
37-
Version: "v1.3.0",
37+
Version: "v1.4.0",
3838
}
3939
)
4040

community/examples/cloud-batch.yaml

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,19 +27,19 @@ vars:
2727
deployment_groups:
2828
- group: primary
2929
modules:
30-
- source: modules/network/pre-existing-vpc
30+
- id: network1
31+
source: modules/network/pre-existing-vpc
3132
kind: terraform
32-
id: network1
3333

34-
- source: modules/file-system/filestore
34+
- id: appfs
35+
source: modules/file-system/filestore
3536
kind: terraform
36-
id: appfs
3737
use: [network1]
3838
settings: {local_mount: /sw}
3939

40-
- source: modules/scripts/startup-script
40+
- id: hello-startup-script
41+
source: modules/scripts/startup-script
4142
kind: terraform
42-
id: hello-startup-script
4343
settings:
4444
runners:
4545
- type: shell
@@ -53,9 +53,9 @@ deployment_groups:
5353
#!/bin/sh
5454
echo "Hello World" > /sw/hello.txt
5555
56-
- source: community/modules/scheduler/cloud-batch-job
56+
- id: batch-job
57+
source: community/modules/scheduler/cloud-batch-job
5758
kind: terraform
58-
id: batch-job
5959
use: [network1, appfs, hello-startup-script]
6060
settings:
6161
runnable: "cat /sw/hello.txt"
@@ -64,8 +64,8 @@ deployment_groups:
6464
family: centos-7
6565
project: centos-cloud
6666

67-
- source: community/modules/scheduler/cloud-batch-login-node
67+
- id: batch-login
68+
source: community/modules/scheduler/cloud-batch-login-node
6869
kind: terraform
69-
id: batch-login
7070
use: [batch-job]
7171
outputs: [instructions]

community/examples/hpc-cluster-small-sharedvpc.yaml

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -41,27 +41,27 @@ vars:
4141
deployment_groups:
4242
- group: primary
4343
modules:
44-
- source: modules/network/pre-existing-vpc
44+
- id: network1
45+
source: modules/network/pre-existing-vpc
4546
kind: terraform
46-
id: network1
4747
settings:
4848
project_id: $(vars.host_project_id)
4949
network_name: your-shared-network
5050
subnetwork_name: your-shared-subnetwork
5151

52-
- source: modules/file-system/filestore
52+
- id: homefs
53+
source: modules/file-system/filestore
5354
kind: terraform
54-
id: homefs
5555
use: [network1]
5656
settings:
5757
local_mount: /home
5858
connect_mode: PRIVATE_SERVICE_ACCESS
5959
network_name: $(network1.network_id)
6060

6161
# This debug_partition will work out of the box without requesting additional GCP quota.
62-
- source: community/modules/compute/SchedMD-slurm-on-gcp-partition
62+
- id: debug_partition
63+
source: community/modules/compute/SchedMD-slurm-on-gcp-partition
6364
kind: terraform
64-
id: debug_partition
6565
use:
6666
- network1
6767
- homefs
@@ -73,19 +73,19 @@ deployment_groups:
7373
machine_type: n2-standard-2
7474

7575
# This compute_partition is far more performant than debug_partition but may require requesting GCP quotas first.
76-
- source: community/modules/compute/SchedMD-slurm-on-gcp-partition
76+
- id: compute_partition
77+
source: community/modules/compute/SchedMD-slurm-on-gcp-partition
7778
kind: terraform
78-
id: compute_partition
7979
use:
8080
- network1
8181
- homefs
8282
settings:
8383
partition_name: compute
8484
max_node_count: 20
8585

86-
- source: community/modules/scheduler/SchedMD-slurm-on-gcp-controller
86+
- id: slurm_controller
87+
source: community/modules/scheduler/SchedMD-slurm-on-gcp-controller
8788
kind: terraform
88-
id: slurm_controller
8989
use:
9090
- network1
9191
- homefs
@@ -95,9 +95,9 @@ deployment_groups:
9595
login_node_count: 1
9696
shared_vpc_host_project: $(vars.host_project_id)
9797

98-
- source: community/modules/scheduler/SchedMD-slurm-on-gcp-login-node
98+
- id: slurm_login
99+
source: community/modules/scheduler/SchedMD-slurm-on-gcp-login-node
99100
kind: terraform
100-
id: slurm_login
101101
use:
102102
- network1
103103
- homefs

community/examples/htcondor-pool.yaml

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -27,32 +27,32 @@ vars:
2727
deployment_groups:
2828
- group: htcondor
2929
modules:
30-
- source: modules/network/vpc
30+
- id: network1
31+
source: modules/network/vpc
3132
kind: terraform
32-
id: network1
3333
settings:
3434
network_name: htcondor-pool
3535
subnetwork_name: htcondor-pool-usc1
3636
outputs:
3737
- network_name
3838

39-
- source: community/modules/scripts/htcondor-install
39+
- id: htcondor_install
40+
source: community/modules/scripts/htcondor-install
4041
kind: terraform
41-
id: htcondor_install
4242

43-
- source: community/modules/project/service-enablement
43+
- id: htcondor_services
44+
source: community/modules/project/service-enablement
4445
kind: terraform
45-
id: htcondor_services
4646
use:
4747
- htcondor_install
4848

49-
- source: community/modules/scheduler/htcondor-configure
49+
- id: htcondor_configure
50+
source: community/modules/scheduler/htcondor-configure
5051
kind: terraform
51-
id: htcondor_configure
5252

53-
- source: modules/scripts/startup-script
53+
- id: htcondor_configure_central_manager
54+
source: modules/scripts/startup-script
5455
kind: terraform
55-
id: htcondor_configure_central_manager
5656
settings:
5757
runners:
5858
- type: shell
@@ -61,9 +61,9 @@ deployment_groups:
6161
- $(htcondor_install.install_htcondor_runner)
6262
- $(htcondor_configure.central_manager_runner)
6363

64-
- source: modules/compute/vm-instance
64+
- id: htcondor_cm
65+
source: modules/compute/vm-instance
6566
kind: terraform
66-
id: htcondor_cm
6767
use:
6868
- network1
6969
- htcondor_configure_central_manager
@@ -78,9 +78,9 @@ deployment_groups:
7878
outputs:
7979
- internal_ip
8080

81-
- source: modules/scripts/startup-script
81+
- id: htcondor_configure_execute_point
82+
source: modules/scripts/startup-script
8283
kind: terraform
83-
id: htcondor_configure_execute_point
8484
settings:
8585
runners:
8686
- type: shell
@@ -89,9 +89,9 @@ deployment_groups:
8989
- $(htcondor_install.install_htcondor_runner)
9090
- $(htcondor_configure.execute_point_runner)
9191

92-
- source: community/modules/compute/htcondor-execute-point
92+
- id: htcondor_execute_point
93+
source: community/modules/compute/htcondor-execute-point
9394
kind: terraform
94-
id: htcondor_execute_point
9595
use:
9696
- network1
9797
- htcondor_configure_execute_point
@@ -104,9 +104,9 @@ deployment_groups:
104104
scopes:
105105
- cloud-platform
106106

107-
- source: modules/scripts/startup-script
107+
- id: htcondor_configure_access_point
108+
source: modules/scripts/startup-script
108109
kind: terraform
109-
id: htcondor_configure_access_point
110110
settings:
111111
runners:
112112
- type: shell
@@ -128,9 +128,9 @@ deployment_groups:
128128
request_cpus = 1
129129
request_memory = 100MB
130130
queue
131-
- source: modules/compute/vm-instance
131+
- id: htcondor_access
132+
source: modules/compute/vm-instance
132133
kind: terraform
133-
id: htcondor_access
134134
use:
135135
- network1
136136
- htcondor_configure_access_point

community/examples/intel/daos-cluster.yaml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,16 +28,16 @@ vars:
2828
deployment_groups:
2929
- group: primary
3030
modules:
31-
- source: modules/network/pre-existing-vpc
31+
- id: network1
32+
source: modules/network/pre-existing-vpc
3233
kind: terraform
33-
id: network1
3434

3535
# This module creates a DAOS server. Server images MUST be created before running this.
3636
# https://github.com/daos-stack/google-cloud-daos/tree/main/images
3737
# more info: https://github.com/daos-stack/google-cloud-daos/tree/main/terraform/modules/daos_server
38-
- source: github.com/daos-stack/google-cloud-daos.git//terraform/modules/daos_server?ref=v0.2.1
38+
- id: daos-server
39+
source: github.com/daos-stack/google-cloud-daos.git//terraform/modules/daos_server?ref=v0.2.1
3940
kind: terraform
40-
id: daos-server
4141
use: [network1]
4242
settings:
4343
number_of_instances: 2
@@ -46,9 +46,9 @@ deployment_groups:
4646
# This module creates a MIG with DAOS clients. Client images MUST be created before running this.
4747
# https://github.com/daos-stack/google-cloud-daos/tree/main/images
4848
# more info: https://github.com/daos-stack/google-cloud-daos/tree/main/terraform/modules/daos_client
49-
- source: github.com/daos-stack/google-cloud-daos.git//terraform/modules/daos_client?ref=v0.2.1
49+
- id: daos-client
50+
source: github.com/daos-stack/google-cloud-daos.git//terraform/modules/daos_client?ref=v0.2.1
5051
kind: terraform
51-
id: daos-client
5252
use: [network1, daos-server]
5353
settings:
5454
number_of_instances: 2

community/examples/intel/daos-slurm.yaml

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -28,23 +28,23 @@ vars:
2828
deployment_groups:
2929
- group: primary
3030
modules:
31-
- source: modules/network/pre-existing-vpc
31+
- id: network1
32+
source: modules/network/pre-existing-vpc
3233
kind: terraform
33-
id: network1
3434

35-
- source: modules/file-system/filestore
35+
- id: homefs
36+
source: modules/file-system/filestore
3637
kind: terraform
37-
id: homefs
3838
use: [network1]
3939
settings:
4040
local_mount: "/home"
4141

4242
# This module creates a DAOS server. Server images MUST be created before running this.
4343
# https://github.com/daos-stack/google-cloud-daos/tree/main/images
4444
# more info: https://github.com/daos-stack/google-cloud-daos/tree/main/terraform/modules/daos_server
45-
- source: github.com/daos-stack/google-cloud-daos.git//terraform/modules/daos_server?ref=v0.2.1
45+
- id: daos
46+
source: github.com/daos-stack/google-cloud-daos.git//terraform/modules/daos_server?ref=v0.2.1
4647
kind: terraform
47-
id: daos
4848
use: [network1]
4949
settings:
5050
labels: {ghpc_role: file-system}
@@ -68,9 +68,9 @@ deployment_groups:
6868
reclaim: "lazy"
6969
containers: []
7070

71-
- source: modules/scripts/startup-script
71+
- id: daos-client-script
72+
source: modules/scripts/startup-script
7273
kind: terraform
73-
id: daos-client-script
7474
settings:
7575
runners:
7676
- type: shell
@@ -87,9 +87,9 @@ deployment_groups:
8787
destination: /var/daos/daos_client_config.sh
8888

8989
## This debug_partition will work out of the box without requesting additional GCP quota.
90-
- source: community/modules/compute/SchedMD-slurm-on-gcp-partition
90+
- id: debug_partition
91+
source: community/modules/compute/SchedMD-slurm-on-gcp-partition
9192
kind: terraform
92-
id: debug_partition
9393
use:
9494
- network1
9595
- homefs
@@ -100,19 +100,19 @@ deployment_groups:
100100
machine_type: n2-standard-2
101101

102102
# This compute_partition is far more performant than debug_partition but may require requesting GCP quotas first.
103-
- source: community/modules/compute/SchedMD-slurm-on-gcp-partition
103+
- id: compute_partition
104+
source: community/modules/compute/SchedMD-slurm-on-gcp-partition
104105
kind: terraform
105-
id: compute_partition
106106
use:
107107
- network1
108108
- homefs
109109
settings:
110110
partition_name: compute
111111
max_node_count: 20
112112

113-
- source: community/modules/scheduler/SchedMD-slurm-on-gcp-controller
113+
- id: slurm_controller
114+
source: community/modules/scheduler/SchedMD-slurm-on-gcp-controller
114115
kind: terraform
115-
id: slurm_controller
116116
use:
117117
- network1
118118
- homefs
@@ -127,9 +127,9 @@ deployment_groups:
127127
- "https://www.googleapis.com/auth/devstorage.read_only"
128128
- "https://www.googleapis.com/auth/cloud-platform"
129129

130-
- source: community/modules/scheduler/SchedMD-slurm-on-gcp-login-node
130+
- id: slurm_login
131+
source: community/modules/scheduler/SchedMD-slurm-on-gcp-login-node
131132
kind: terraform
132-
id: slurm_login
133133
use:
134134
- network1
135135
- homefs

0 commit comments

Comments
 (0)