Skip to content

Dualstack Cluster Provisioning #2

Dualstack Cluster Provisioning

Dualstack Cluster Provisioning #2

---
name: Dualstack Cluster Provisioning
on:
schedule:
- cron: "0 9 * * 1"
workflow_dispatch:
inputs:
rancher_version:
description: "Rancher tag version"
rancher_chart_version:
description: "Rancher chart version"
run_all_versions:
description: "Run all supported versions if manually triggered"
required: false
default: false
type: boolean
workflow_call:
inputs:
rancher_version:
description: "Rancher tag version provided from check-rancher-tag workflow"
required: true
type: string
rancher_chart_version:
description: "Rancher chart version provided from check-rancher-tag workflow"
required: true
type: string
permissions:
id-token: write
contents: read
env:
CLOUD_PROVIDER_VERSION: "5.95.0"
HOSTNAME_PREFIX: "gha-ds-prov"
jobs:
v2-13:
if: |
github.event_name == 'schedule' ||
github.event.inputs.run_all_versions == 'true' ||
(github.event_name == 'workflow_dispatch' && startsWith(github.event.inputs.rancher_version, 'v2.13'))
name: ${{ github.event.inputs.rancher_version }}
runs-on: ubuntu-latest
environment: latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
submodules: recursive
- name: Checkout tfp-automation repository
uses: actions/checkout@v4
with:
repository: rancher/tfp-automation
path: tfp-automation
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.IAM_ROLE }}
aws-region: ${{ secrets.AWS_REGION }}
- name: Get AWS credentials from Secrets Manager
uses: aws-actions/aws-secretsmanager-get-secrets@v2
with:
secret-ids: |
AWS_ACCESS_KEY, ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_KEY, ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- name: "Fetch and Set DockerHub Credentials"
uses: rancher-eio/read-vault-secrets@main
with:
secrets: |
secret/data/github/repo/${{ github.repository }}/dockerhub/org-token/credentials username | DOCKERHUB_USERNAME ;
secret/data/github/repo/${{ github.repository }}/dockerhub/org-token/credentials password | DOCKERHUB_PASSWORD
- name: Mask Dockerhub Credentials
run: |
echo "::add-mask::${{ env.DOCKERHUB_USERNAME }}"
echo "::add-mask::${{ env.DOCKERHUB_PASSWORD }}"
- name: Whitelist Runner IP
uses: ./.github/actions/whitelist-runner-ip
with:
prefix-list-id: ${{ secrets.AWS_MANAGED_PREFIX_LIST_ID }}
region: "${{ secrets.AWS_REGION }}"
- name: Set up SSH Keys
uses: ./.github/actions/setup-ssh-keys
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
ssh-private-key-name: ${{ secrets.SSH_PRIVATE_KEY_NAME }}
windows-ssh-private-key: ${{ secrets.WINDOWS_SSH_PRIVATE_KEY }}
windows-ssh-private-key-name: ${{ secrets.WINDOWS_SSH_PRIVATE_KEY_NAME }}
- name: Uniquify hostname prefix
uses: ./.github/actions/uniquify-hostname
- name: Set Rancher version
uses: ./.github/actions/set-env-var
with:
key: RANCHER_VERSION
value: |
${{
github.event.inputs.rancher_version ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.rancher_version) ||
(github.event_name == 'schedule' && vars.RANCHER_VERSION_2_13_HEAD) ||
(github.event.inputs.run_all_versions == 'true' && vars.RANCHER_VERSION_2_13_HEAD)
}}
- name: Set Rancher chart version
uses: ./.github/actions/set-env-var
with:
key: RANCHER_CHART_VERSION
value: |
${{
github.event.inputs.rancher_chart_version ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.rancher_chart_version) ||
(github.event_name == 'schedule' && vars.RELEASED_RANCHER_CHART_VERSION_2_12) ||
(github.event.inputs.run_all_versions == 'true' && vars.RELEASED_RANCHER_CHART_VERSION_2_12)
}}
- name: Set Rancher repo
uses: ./.github/actions/set-rancher-repo
with:
rancher-version: ${{ env.RANCHER_VERSION }}
fallback-repo: ${{ secrets.RANCHER_REPO }}
- name: Get Qase ID
id: get-qase-id
uses: ./.github/actions/get-qase-id
with:
triggered_tag: ${{ github.event.inputs.rancher_version }}
qase_release_id: ${{ vars.HB_QASE_RELEASE_TEST_RUN_ID_2_13 }}
qase_recurring_id: ${{ vars.HB_QASE_RECURRING_TEST_RUN_ID_2_13 }}
- name: Create config.yaml
run: |
cat > config.yaml <<EOF
rancher:
host: "${{ env.HOSTNAME_PREFIX }}.${{ secrets.AWS_ROUTE_53_ZONE }}"
adminPassword: "${{ secrets.RANCHER_ADMIN_PASSWORD }}"
insecure: true
cleanup: true
terraform:
cni: "${{ secrets.CNI }}"
defaultClusterRoleForProjectMembers: "true"
enableNetworkPolicy: false
provider: "${{ vars.PROVIDER_AMAZON }}"
privateKeyPath: "${{ secrets.TFP_SSH_PRIVATE_KEY_PATH }}"
resourcePrefix: "${{ env.HOSTNAME_PREFIX }}"
awsCredentials:
awsAccessKey: "$AWS_ACCESS_KEY"
awsSecretKey: "$AWS_SECRET_KEY"
awsConfig:
ami: "${{ secrets.AWS_AMI }}"
awsKeyName: "${{ secrets.SSH_PRIVATE_KEY_NAME }}"
awsInstanceType: "${{ vars.AWS_INSTANCE_TYPE }}"
awsVolumeType: "${{ vars.AWS_VOLUME_TYPE }}"
region: "${{ secrets.AWS_REGION }}"
awsSecurityGroups: [${{ secrets.AWS_SECURITY_GROUPS }}]
awsSecurityGroupNames: [${{ secrets.AWS_SECURITY_GROUP_NAMES }}]
awsSubnetID: "${{ secrets.AWS_SUBNET_ID }}"
awsVpcID: "${{ secrets.AWS_VPC_ID }}"
awsZoneLetter: "${{ vars.AWS_ZONE_LETTER }}"
awsRootSize: ${{ vars.AWS_ROOT_SIZE }}
awsRoute53Zone: "${{ secrets.AWS_ROUTE_53_ZONE }}"
awsUser: "${{ secrets.DUAL_STACK_AWS_USER }}"
clusterCIDR: "${{ secrets.DUALSTACK_CLUSTER_CIDR }}"
serviceCIDR: "${{ secrets.DUALSTACK_SERVICE_CIDR }}"
sshConnectionType: "${{ vars.SSH_CONNECTION_TYPE }}"
timeout: "${{ vars.TIMEOUT }}"
ipAddressType: "${{ vars.IP_ADDRESS_TYPE }}"
loadBalancerType: "${{ vars.LOAD_BALANCER_DUALSTACK_TYPE }}"
targetType: "${{ vars.TARGET_TYPE }}"
standalone:
bootstrapPassword: "${{ secrets.RANCHER_ADMIN_PASSWORD }}"
certManagerVersion: "${{ vars.CERT_MANAGER_VERSION }}"
certType: "${{ vars.CERT_TYPE }}"
chartVersion: "${{ env.RANCHER_CHART_VERSION }}"
osUser: "${{ secrets.DUAL_STACK_OS_USER }}"
osGroup: "${{ secrets.DUAL_STACK_OS_GROUP }}"
rancherChartRepository: "${{ secrets.RANCHER_HELM_CHART_URL }}"
rancherHostname: "${{ env.HOSTNAME_PREFIX }}.${{ secrets.AWS_ROUTE_53_ZONE }}"
rancherImage: "${{ secrets.RANCHER_IMAGE }}"
rancherTagVersion: "${{ env.RANCHER_VERSION }}"
registryPassword: "${{ env.DOCKERHUB_PASSWORD }}"
registryUsername: "${{ env.DOCKERHUB_USERNAME }}"
repo: "${{ env.RANCHER_REPO }}"
rke2Version: "${{ vars.RKE2_VERSION_2_12 }}"
terratest:
pathToRepo: "${{ secrets.PATH_TO_REPO }}"
standaloneLogging: ${{ vars.TERRAFORM_LOGGING }}
clusterConfig:
cni: "${{ secrets.CNI }}"
provider: "${{ vars.PROVIDER_AMAZON }}"
nodeProvider: "ec2"
pathToRepo: "${{ secrets.PATH_TO_TESTS_REPO }}"
compliance: true
networking:
clusterCIDR: "${{ secrets.DUALSTACK_CLUSTER_CIDR }}"
serviceCIDR: "${{ secrets.DUALSTACK_SERVICE_CIDR }}"
stackPreference: "${{ vars.STACK_PREFERENCE_DUAL }}"
registries:
rke2Registries:
mirrors:
"docker.io":
endpoint: ["https://${{ secrets.QA_PRIVATE_REGISTRY_NAME }}"]
configs:
"${{ secrets.QA_PRIVATE_REGISTRY_NAME }}":
"auth":
username: "${{ env.DOCKERHUB_USERNAME }}"
password: "${{ env.DOCKERHUB_PASSWORD }}"
awsCredentials:
secretKey: "$AWS_SECRET_KEY"
accessKey: "$AWS_ACCESS_KEY"
defaultRegion: "${{ secrets.AWS_REGION }}"
awsMachineConfigs:
region: "${{ secrets.AWS_REGION }}"
awsMachineConfig:
- roles: ["etcd", "controlplane", "worker"]
ami: "${{ secrets.AWS_DUALSTACK_AMI }}"
enablePrimaryIPv6: true
httpProtocolIpv6: "enabled"
ipv6AddressOnly: true
ipv6AddressCount: "1"
instanceType: "${{ vars.AWS_INSTANCE_TYPE }}"
sshUser: "${{ secrets.AWS_USER }}"
subnetId: "${{ secrets.AWS_SUBNET_ID }}"
vpcId: "${{ secrets.AWS_VPC_ID }}"
volumeType: "${{ vars.AWS_VOLUME_TYPE }}"
zone: "${{ vars.AWS_ZONE_LETTER }}"
retries: "5"
rootSize: "${{ vars.AWS_ROOT_SIZE }}"
securityGroup: [${{ secrets.AWS_SECURITY_GROUP_NAMES }}]
subnetId: "${{ secrets.AWS_SUBNET_ID }}"
awsEC2Configs:
region: "${{ secrets.AWS_REGION }}"
awsSecretAccessKey: "$AWS_SECRET_KEY"
awsAccessKeyID: "$AWS_ACCESS_KEY"
awsEC2Config:
- instanceType: "${{ vars.AWS_INSTANCE_TYPE }}"
awsRegionAZ: "${{ secrets.AWS_REGION }}${{ vars.AWS_ZONE_LETTER }}"
awsAMI: "${{ secrets.AWS_DUALSTACK_AMI }}"
awsSecurityGroups: [${{ secrets.AWS_SECURITY_GROUPS }}]
awsSubnetID: "${{ secrets.AWS_SUBNET_ID }}"
awsSSHKeyName: "${{ secrets.SSH_PRIVATE_KEY_NAME }}.pem"
awsCICDInstanceTag: "hb-daily-provisioning"
awsIAMProfile: "${{ secrets.AWS_IAM_PROFILE }}"
awsUser: "${{ secrets.AWS_USER }}"
volumeSize: ${{ vars.AWS_ROOT_SIZE }}
roles: ["etcd", "controlplane", "worker"]
sshPath:
sshPath: "${{ secrets.SSH_PRIVATE_KEY_PATH }}"
EOF
- name: Export CATTLE_TEST_CONFIG
run: echo "CATTLE_TEST_CONFIG=${{ github.workspace }}/config.yaml" >> $GITHUB_ENV
shell: bash
- name: Set up Go environment
uses: actions/setup-go@v5
with:
go-version-file: "./go.mod"
- name: Build Packages
run: ./.github/scripts/go-build.sh
- name: Install gotestsum
run: go install gotest.tools/gotestsum@latest
- name: Set up Terraform
uses: hashicorp/setup-terraform@v2
with:
terraform_version: "${{ vars.TERRAFORM_VERSION }}"
terraform_wrapper: false
- name: Creating Rancher server
run: go run /home/runner/work/tests/tests/validation/recurring/infrastructure/setuprancher/createRancherServer.go
- name: Run Dualstack Provisioning tests
env:
QASE_TEST_RUN_ID: ${{ steps.get-qase-id.outputs.id }}
QASE_AUTOMATION_TOKEN: ${{ secrets.QASE_AUTOMATION_TOKEN }}
QASE_PROJECT_ID: ${{ secrets.HB_QASE_PROJECT_ID }}
uses: ./.github/actions/run-hostbusters-dualstack-provisioning
- name: Cleanup Infrastructure
if: always()
working-directory: tfp-automation/modules/dualstack/aws
run: terraform destroy -auto-approve > /dev/null 2>&1
- name: Refresh AWS credentials
if: always()
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.IAM_ROLE }}
aws-region: ${{ secrets.AWS_REGION }}
- name: Revoke Runner IP
if: always()
uses: ./.github/actions/revoke-runner-ip
with:
prefix-list-id: ${{ secrets.AWS_MANAGED_PREFIX_LIST_ID }}
region: "${{ secrets.AWS_REGION }}"
- name: Set job status output
if: always()
run: echo "job_status=${{ job.status }}" >> $GITHUB_OUTPUT
id: set-job-status
- name: Reporting Results to Slack
if: always()
uses: ./.github/actions/report-to-slack
with:
job-status: ${{ steps.set-job-status.outputs.job_status }}
slack-channel: ${{ secrets.SLACK_CHANNEL }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}