From d90cdcf63f0838911c6e3ea6631dc46e6e80364a Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Thu, 7 Nov 2024 12:24:13 -0500 Subject: [PATCH 01/12] readme updates, removes unused role input --- eks-hosted/01-iam/Pulumi.README.yaml | 6 ----- eks-hosted/01-iam/Pulumi.dev.yaml | 18 ++++++++++++++ eks-hosted/01-iam/config.ts | 1 - eks-hosted/01-iam/index.ts | 4 ---- eks-hosted/02-networking/Pulumi.dev.yaml | 29 +++++++++++++++++++++++ eks-hosted/05-eks-cluster/Pulumi.dev.yaml | 23 ++++++++++++++++++ eks-hosted/05-eks-cluster/config.ts | 7 +++--- eks-hosted/05-eks-cluster/index.ts | 3 +-- eks-hosted/README.md | 19 ++++++++------- 9 files changed, 85 insertions(+), 25 deletions(-) create mode 100644 eks-hosted/01-iam/Pulumi.dev.yaml create mode 100644 eks-hosted/02-networking/Pulumi.dev.yaml create mode 100644 eks-hosted/05-eks-cluster/Pulumi.dev.yaml diff --git a/eks-hosted/01-iam/Pulumi.README.yaml b/eks-hosted/01-iam/Pulumi.README.yaml index 6833a814..8e5c5d0e 100644 --- a/eks-hosted/01-iam/Pulumi.README.yaml +++ b/eks-hosted/01-iam/Pulumi.README.yaml @@ -7,12 +7,6 @@ config: # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. baseName: pulumiselfhost - # Provide an SSO role arn that can be assumed by the Pulumi cli to deploy the infrastructure. - # Currently this is just passed through and consumed by later stacks to enable the k8s provider to assume the role and deploy - # k8s infra to the eks cluster. - # A future iteration may create this sso role as part of the stack. - ssoRoleArn: arn:aws:iam::123456789012:role/SSO-Role-Name - #### BRINGING YOUR OWN IAM INFRASTRUCTURE ### # If you are not using the `01-iam` stack, then set the following values that would have otherwise been provided by the iam stack. # The stack will then "pretend" it created the resources and output the values for the other stacks to use. diff --git a/eks-hosted/01-iam/Pulumi.dev.yaml b/eks-hosted/01-iam/Pulumi.dev.yaml new file mode 100644 index 00000000..17ccd447 --- /dev/null +++ b/eks-hosted/01-iam/Pulumi.dev.yaml @@ -0,0 +1,18 @@ +encryptionsalt: v1:fpWgKidMtzw=:v1:AOY9DqFslMZ7S/Kt:iWJuCzpaM3AsU3ke8VWg5DchlajAXg== +config: + # Set the AWS region to deploy the infrastructure to. + # This should be the same for all the stacks. + aws:region: us-east-1 + + # Set a base name to be used when creating the resources. + # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. + baseName: pulumiselfhost + + #### BRINGING YOUR OWN IAM INFRASTRUCTURE ### + # If you are not using the `01-iam` stack, then set the following values that would have otherwise been provided by the iam stack. + # The stack will then "pretend" it created the resources and output the values for the other stacks to use. + databaseMonitoringRoleArn: + eksServiceRoleName: + eksInstanceRoleName: + instanceProfileName: + diff --git a/eks-hosted/01-iam/config.ts b/eks-hosted/01-iam/config.ts index fa3f8b38..ee3115df 100644 --- a/eks-hosted/01-iam/config.ts +++ b/eks-hosted/01-iam/config.ts @@ -4,7 +4,6 @@ const pulumiConfig = new pulumi.Config(); export const config = { baseName: pulumiConfig.require("baseName"), - ssoRoleArn: pulumiConfig.require("ssoRoleArn"), // These may not be set - see Pulumi.README.yaml for more information. eksServiceRoleName: pulumiConfig.get("eksServiceRoleName"), eksInstanceRoleName: pulumiConfig.get("eksInstanceRoleName"), diff --git a/eks-hosted/01-iam/index.ts b/eks-hosted/01-iam/index.ts index cb68d719..05816b46 100644 --- a/eks-hosted/01-iam/index.ts +++ b/eks-hosted/01-iam/index.ts @@ -3,10 +3,6 @@ import * as aws from "@pulumi/aws"; import { config } from "./config"; import { albControllerPolicyStatement } from "./albControllerPolicy"; -/// SSO Role /// -// This is currently managed outside of the stack and passed through for later stacks to use. -export const ssoRoleArn = config.ssoRoleArn; - // These roles are either provided by the user or created in this stack. export let eksServiceRoleName: string | pulumi.Output; export let eksInstanceRoleName: string | pulumi.Output; diff --git a/eks-hosted/02-networking/Pulumi.dev.yaml b/eks-hosted/02-networking/Pulumi.dev.yaml new file mode 100644 index 00000000..461ce693 --- /dev/null +++ b/eks-hosted/02-networking/Pulumi.dev.yaml @@ -0,0 +1,29 @@ +encryptionsalt: v1:PNMFgSF+l2Q=:v1:R1kzoaceVMFrzr1F:Ycr9wciJlGhqHIylG01DTSVbrUWqqw== +config: + # Set the AWS region to deploy the infrastructure to. + # This should be the same for all the stacks. + aws:region: us-east-1 + + # Set a base name to be used when creating the resources. + # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. + baseName: pulumiselfhost + + # Set the name of the EKS cluster to create. + # This is needed to allow subnet matching for the k8s workloads. + eksClusterName: pulumiselfhost-eks + + # Set the CIDR block for the VPC and related subnets. + networkCidrBlock: 172.16.0.0/16 + + ##### BRINGING YOUR OWN NETWOKRING INFRASTRUCTURE ### + # If you are not using the `02-networking` stack, then set the commented out values that would have otherwise been provided by the stack. + # The stack will then "pretend" it created the resources and output the values for the other stacks to use. + # vpcId: #"vpc-aaaaaaaaaa" + # privateSubnetIds: + #- "subnet-fdafafsadasdfddd" + #- "subnet-dfdfasddsafdsfsd" + #- "subnet-dsfdsfdsfsdasadf" + # publicSubnetIds : + #- "subnet-dsafdfsadfdsafas" + #- "subnet-sasdfsadfsdsdafd" + #- "subnet-dasdfdssdafffdsf" diff --git a/eks-hosted/05-eks-cluster/Pulumi.dev.yaml b/eks-hosted/05-eks-cluster/Pulumi.dev.yaml new file mode 100644 index 00000000..ae3f594c --- /dev/null +++ b/eks-hosted/05-eks-cluster/Pulumi.dev.yaml @@ -0,0 +1,23 @@ +encryptionsalt: v1:BNqq8PDTHtw=:v1:Z+rurRi7bBa8Nekd:SBLxXoeVjnaig3g52QWL9TfzD+za5w== +config: + # Set the AWS region to deploy the infrastructure to. + # This should be the same for all the stacks. + aws:region: us-east-1 + + # Set a base name to be used when creating the resources. + # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. + baseName: pulumiselfhost + + # Set a cluster version for the EKS cluster. If not set a default (see config.ts) will be used. + clusterVersion: "1.30" + + # EKS cluster settings. If not set, defaults (see config.ts) will be used. + instanceType: "t3.xlarge" + standardNodeGroupDesiredCapacity: 2 + standardNodeGroupMinSize: 2 + standardNodeGroupMaxSize: 5 + + pulumiNodeGroupInstanceType: "t3.xlarge" + pulumiNodeGroupDesiredCapacity: 3 + pulumiNodeGroupMinSize: 3 + pulumiNodeGroupMaxSize: 5 diff --git a/eks-hosted/05-eks-cluster/config.ts b/eks-hosted/05-eks-cluster/config.ts index 1e0cebb8..1d768ecd 100644 --- a/eks-hosted/05-eks-cluster/config.ts +++ b/eks-hosted/05-eks-cluster/config.ts @@ -12,7 +12,6 @@ const iamStackRef = new pulumi.StackReference(`${orgName}/selfhosted-01-iam/${st const eksInstanceRoleName = iamStackRef.requireOutput("eksInstanceRoleName"); const instanceProfileName = iamStackRef.requireOutput("instanceProfileName"); const eksServiceRoleName = iamStackRef.requireOutput("eksServiceRoleName"); -const ssoRoleArn = iamStackRef.requireOutput("ssoRoleArn"); // Networking Stack values // Get the needed values from the networking stack. @@ -35,7 +34,8 @@ export const config = { /** * EKS Node Group */ - standardNodeGroupInstanceType: pulumiConfig.get("standardNodeGroupInstanceType") || "t3.xlarge", + // standardNodeGroupInstanceType: pulumiConfig.get("standardNodeGroupInstanceType") || "t3.xlarge", + standardNodeGroupInstanceType: "t3.xlarge", standardNodeGroupDesiredCapacity: pulumiConfig.getNumber("standardNodeGroupDesiredCapacity") ?? 2, standardNodeGroupMinSize: pulumiConfig.getNumber("standardNodeGroupMinSize") ?? 2, standardNodeGroupMaxSize: pulumiConfig.getNumber("standardNodeGroupMaxSize") ?? 5, @@ -49,7 +49,6 @@ export const config = { eksInstanceRoleName: eksInstanceRoleName, instanceProfileName: instanceProfileName, eksServiceRoleName: eksServiceRoleName, - ssoRoleArn: ssoRoleArn, // Networking stack values clusterName: clusterName, @@ -58,3 +57,5 @@ export const config = { privateSubnetIds: privateSubnetIds, }; + +console.log(typeof(config.standardNodeGroupInstanceType)) diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index 27019b92..848dbb6f 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -67,8 +67,7 @@ const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { nodeSecurityGroup: cluster.nodeSecurityGroup, clusterIngressRule: cluster.eksClusterIngressRule, amiId: amiId, - - instanceType: config.standardNodeGroupInstanceType, + instanceType: config.standardNodeGroupInstanceType, desiredCapacity: config.standardNodeGroupDesiredCapacity, minSize: config.standardNodeGroupMinSize, maxSize: config.standardNodeGroupMaxSize, diff --git a/eks-hosted/README.md b/eks-hosted/README.md index 0b5ec533..d0568185 100644 --- a/eks-hosted/README.md +++ b/eks-hosted/README.md @@ -27,14 +27,14 @@ That said, one can use Pulumi Cloud for the state backend as well. However, thes ### Configuration -Each project has its own configuration requirements. Each project folder has a `Pulumi.EXAMPLE.yaml` file that includes instructions for setting up the configuration and can be used as a template for the actual stack config file (see [Pulumi stack config](https://www.pulumi.com/docs/iac/concepts/config/)). +Each project has its own configuration requirements. Each project folder has a `Pulumi.README.yaml` file that includes instructions for setting up the configuration and can be used as a template for the actual stack config file (see [Pulumi stack config](https://www.pulumi.com/docs/iac/concepts/config/)). ### Deployment Order Each subfolder is it's own Pulumi project (and by extension stack). The numbering represents the order of deployment. ### Using Existing Infrastructure -In some cases, you man need to use existing infrastructure. +In some cases, you may need to use existing infrastructure. Currently, the following installer projects support the case where the infrastructure already exists: * 01-iam: IAM resources @@ -43,8 +43,9 @@ Currently, the following installer projects support the case where the infrastru * 30-esc: S3 bucket for ESC-related storage If using pre-existing resources, you will still run the given stacks (i.e. `01-iam` and `02-networking`) but you will provide the values for the resources your created - see the project's `Pulumi.README.yaml` for details. + The stack will then pretend to create the resources and output the values so that downstream stacks can use the values as needed. -- Review the `Pulumi.README.yaml` file to understand some of the inputs for the given stack. +- Review the `Pulumi.README.yaml` file to understand the inputs for the given stack. - Review `index.ts` and any related files to understand how the given infrastructure is created. ### Deployment Instructions @@ -53,16 +54,15 @@ These instructions assume you are using "prod" for the name of your stacks. Of c The process is the same for each microstack: - cd to the given project folder (e.g. `01-iam`) - `npm install` to install the package dependencies -- Run `pulumi stack init prod` (or whatever name of stack you want to use) -- copy "Pulumi.README.yaml" to a file where "README" is replaced with the name of your stack. - - For example, if you are naming the stacks "prod", then you would run `cp Pulumi.README.yaml Pulumi.prod.yaml` +- Run `pulumi stack init prod` (or whatever name of stack you want to use). This will create a new empty stack, and will create a stack config file with the "encryptionsalt" key (if using the passphrase secrets provider). +- copy the contents of the "Pulumi.README.yaml" file into the new "Pulumi.prod.yaml" stack config file, with the "config" key at the top level. - edit "Pulumi.prod.yaml" and follow the instructions in the file about setting configuration values. - In a number of cases you can use the default values copied from "Pulumi.README.yaml". - Run `pulumi up` to deploy the infrastructure. - Move to the next project folder and repeat the above steps. -#### Helpful Tips about Stack Depenencies -The following stacks manage stateful resources or resources that are foundational to other stacks. So careful thought should be given before destroying them: +#### Helpful Tips about Stack Dependencies +The following stacks manage stateful resources or resources that are foundational to other stacks, so think carefully before destroying them: * 01-iam * 02-networking * 05-eks-cluster @@ -71,5 +71,6 @@ The following stacks manage stateful resources or resources that are foundationa * 30-esc The following stacks do not manage stateful resources and so can be destroyed/re-created without losing data. Destroying/recreating these stacks will cause a service disruption but no permanent data loss: -* 25-insights: If restarted, use the service UI "selfhosted" page to reindex the searchclsuter.. See: [Re-index opensearch](https://www.pulumi.com/docs/pulumi-cloud/admin/self-hosted/components/search/#backfilling-data) + +* 25-insights: If restarted, use the service UI "selfhosted" page to reindex the searchcluster.. See: [Re-index opensearch](https://www.pulumi.com/docs/pulumi-cloud/admin/self-hosted/components/search/#backfilling-data) * 90-pulumi-service From 8fcae274919faf73997e065c9f51a47607181cd3 Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Thu, 7 Nov 2024 16:18:13 -0500 Subject: [PATCH 02/12] changes stack outputs to Output from Output using interpolate, comments out labels on nodegroups as needs string, not Output --- eks-hosted/05-eks-cluster/config.ts | 13 +++++-------- eks-hosted/05-eks-cluster/index.ts | 10 ++++++---- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/eks-hosted/05-eks-cluster/config.ts b/eks-hosted/05-eks-cluster/config.ts index 1d768ecd..acd1e818 100644 --- a/eks-hosted/05-eks-cluster/config.ts +++ b/eks-hosted/05-eks-cluster/config.ts @@ -34,8 +34,7 @@ export const config = { /** * EKS Node Group */ - // standardNodeGroupInstanceType: pulumiConfig.get("standardNodeGroupInstanceType") || "t3.xlarge", - standardNodeGroupInstanceType: "t3.xlarge", + standardNodeGroupInstanceType: pulumiConfig.get("standardNodeGroupInstanceType") || "t3.xlarge", standardNodeGroupDesiredCapacity: pulumiConfig.getNumber("standardNodeGroupDesiredCapacity") ?? 2, standardNodeGroupMinSize: pulumiConfig.getNumber("standardNodeGroupMinSize") ?? 2, standardNodeGroupMaxSize: pulumiConfig.getNumber("standardNodeGroupMaxSize") ?? 5, @@ -46,9 +45,9 @@ export const config = { pulumiNodeGroupMaxSize: pulumiConfig.getNumber("pulumiNodeGroupMaxSize") ?? 5, // IAM stack values - eksInstanceRoleName: eksInstanceRoleName, - instanceProfileName: instanceProfileName, - eksServiceRoleName: eksServiceRoleName, + eksInstanceRoleName: pulumi.interpolate `${eksInstanceRoleName}`, + instanceProfileName: pulumi.interpolate `${instanceProfileName}`, + eksServiceRoleName: pulumi.interpolate `${eksServiceRoleName}`, // Networking stack values clusterName: clusterName, @@ -56,6 +55,4 @@ export const config = { publicSubnetIds: publicSubnetIds, privateSubnetIds: privateSubnetIds, -}; - -console.log(typeof(config.standardNodeGroupInstanceType)) +}; \ No newline at end of file diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index 848dbb6f..e5ec7035 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -10,6 +10,7 @@ const tags = { "Project": "pulumi-k8s-aws-cluster", "Owner": "pulumi"}; // --- EKS Cluster --- const serviceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) const instanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) + const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) // Create an EKS cluster. @@ -50,14 +51,14 @@ export const clusterName = cluster.core.cluster.name; export const region = aws.config.region; export const nodeSecurityGroupId = cluster.nodeSecurityGroup.id; // For RDS export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; - ///////////////////// /// Build node groups const ssmParam = pulumi.output(aws.ssm.getParameter({ // https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html name: `/aws/service/eks/optimized-ami/${config.clusterVersion}/amazon-linux-2/recommended`, })) -const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) + +export const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) // Create a standard node group. const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { @@ -72,7 +73,7 @@ const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { minSize: config.standardNodeGroupMinSize, maxSize: config.standardNodeGroupMaxSize, - labels: {"amiId": `${amiId}`}, + // labels: {"amiId": pulumi.interpolate `${amiId}`}, cloudFormationTags: clusterName.apply(clusterName => ({ "k8s.io/cluster-autoscaler/enabled": "true", [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", @@ -82,6 +83,7 @@ const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { providers: { kubernetes: cluster.provider}, }); + // Create a standard node group tainted for use only by self-hosted pulumi. const ngStandardPulumi = new eks.NodeGroup(`${baseName}-ng-standard-pulumi`, { cluster: cluster, @@ -96,7 +98,7 @@ const ngStandardPulumi = new eks.NodeGroup(`${baseName}-ng-standard-pulumi`, { minSize: config.pulumiNodeGroupMinSize, maxSize: config.pulumiNodeGroupMaxSize, - labels: {"amiId": `${amiId}`}, + // labels: {"amiId": amiId}, taints: { "self-hosted-pulumi": { value: "true", effect: "NoSchedule"}}, cloudFormationTags: clusterName.apply(clusterName => ({ "k8s.io/cluster-autoscaler/enabled": "true", From 932f4964137cb58f2592afe697f55e29832ec882 Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Fri, 8 Nov 2024 09:43:19 -0500 Subject: [PATCH 03/12] removes unused import, updates gitignore with stack config files and bun.lockb --- eks-hosted/.gitignore | 2 ++ eks-hosted/10-cluster-svcs/index.ts | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/eks-hosted/.gitignore b/eks-hosted/.gitignore index 6fc672d7..ba5d054d 100644 --- a/eks-hosted/.gitignore +++ b/eks-hosted/.gitignore @@ -9,3 +9,5 @@ package-lock.json alb-ingress-chart-local/ */aws-load-balancer-controller/* Pulumi.pulumitest.yaml +Pulumi.*.yaml +bun.lockb \ No newline at end of file diff --git a/eks-hosted/10-cluster-svcs/index.ts b/eks-hosted/10-cluster-svcs/index.ts index c3053209..2e94b987 100644 --- a/eks-hosted/10-cluster-svcs/index.ts +++ b/eks-hosted/10-cluster-svcs/index.ts @@ -1,6 +1,5 @@ import * as aws from "@pulumi/aws"; import * as k8s from "@pulumi/kubernetes"; -import * as pulumi from "@pulumi/pulumi"; import { config } from "./config"; import { createAlbSecurityGroup, createAlbIngressController } from "./ingress-controller"; From 06208a203136e0b1e696057785dc24c26f03b739 Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Fri, 8 Nov 2024 09:54:23 -0500 Subject: [PATCH 04/12] removes stack config files, cleans up whitespace --- eks-hosted/01-iam/Pulumi.dev.yaml | 18 -------------- eks-hosted/02-networking/Pulumi.dev.yaml | 29 ----------------------- eks-hosted/05-eks-cluster/Pulumi.dev.yaml | 23 ------------------ eks-hosted/05-eks-cluster/config.ts | 3 +-- eks-hosted/05-eks-cluster/index.ts | 8 +++---- 5 files changed, 4 insertions(+), 77 deletions(-) delete mode 100644 eks-hosted/01-iam/Pulumi.dev.yaml delete mode 100644 eks-hosted/02-networking/Pulumi.dev.yaml delete mode 100644 eks-hosted/05-eks-cluster/Pulumi.dev.yaml diff --git a/eks-hosted/01-iam/Pulumi.dev.yaml b/eks-hosted/01-iam/Pulumi.dev.yaml deleted file mode 100644 index 17ccd447..00000000 --- a/eks-hosted/01-iam/Pulumi.dev.yaml +++ /dev/null @@ -1,18 +0,0 @@ -encryptionsalt: v1:fpWgKidMtzw=:v1:AOY9DqFslMZ7S/Kt:iWJuCzpaM3AsU3ke8VWg5DchlajAXg== -config: - # Set the AWS region to deploy the infrastructure to. - # This should be the same for all the stacks. - aws:region: us-east-1 - - # Set a base name to be used when creating the resources. - # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. - baseName: pulumiselfhost - - #### BRINGING YOUR OWN IAM INFRASTRUCTURE ### - # If you are not using the `01-iam` stack, then set the following values that would have otherwise been provided by the iam stack. - # The stack will then "pretend" it created the resources and output the values for the other stacks to use. - databaseMonitoringRoleArn: - eksServiceRoleName: - eksInstanceRoleName: - instanceProfileName: - diff --git a/eks-hosted/02-networking/Pulumi.dev.yaml b/eks-hosted/02-networking/Pulumi.dev.yaml deleted file mode 100644 index 461ce693..00000000 --- a/eks-hosted/02-networking/Pulumi.dev.yaml +++ /dev/null @@ -1,29 +0,0 @@ -encryptionsalt: v1:PNMFgSF+l2Q=:v1:R1kzoaceVMFrzr1F:Ycr9wciJlGhqHIylG01DTSVbrUWqqw== -config: - # Set the AWS region to deploy the infrastructure to. - # This should be the same for all the stacks. - aws:region: us-east-1 - - # Set a base name to be used when creating the resources. - # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. - baseName: pulumiselfhost - - # Set the name of the EKS cluster to create. - # This is needed to allow subnet matching for the k8s workloads. - eksClusterName: pulumiselfhost-eks - - # Set the CIDR block for the VPC and related subnets. - networkCidrBlock: 172.16.0.0/16 - - ##### BRINGING YOUR OWN NETWOKRING INFRASTRUCTURE ### - # If you are not using the `02-networking` stack, then set the commented out values that would have otherwise been provided by the stack. - # The stack will then "pretend" it created the resources and output the values for the other stacks to use. - # vpcId: #"vpc-aaaaaaaaaa" - # privateSubnetIds: - #- "subnet-fdafafsadasdfddd" - #- "subnet-dfdfasddsafdsfsd" - #- "subnet-dsfdsfdsfsdasadf" - # publicSubnetIds : - #- "subnet-dsafdfsadfdsafas" - #- "subnet-sasdfsadfsdsdafd" - #- "subnet-dasdfdssdafffdsf" diff --git a/eks-hosted/05-eks-cluster/Pulumi.dev.yaml b/eks-hosted/05-eks-cluster/Pulumi.dev.yaml deleted file mode 100644 index ae3f594c..00000000 --- a/eks-hosted/05-eks-cluster/Pulumi.dev.yaml +++ /dev/null @@ -1,23 +0,0 @@ -encryptionsalt: v1:BNqq8PDTHtw=:v1:Z+rurRi7bBa8Nekd:SBLxXoeVjnaig3g52QWL9TfzD+za5w== -config: - # Set the AWS region to deploy the infrastructure to. - # This should be the same for all the stacks. - aws:region: us-east-1 - - # Set a base name to be used when creating the resources. - # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. - baseName: pulumiselfhost - - # Set a cluster version for the EKS cluster. If not set a default (see config.ts) will be used. - clusterVersion: "1.30" - - # EKS cluster settings. If not set, defaults (see config.ts) will be used. - instanceType: "t3.xlarge" - standardNodeGroupDesiredCapacity: 2 - standardNodeGroupMinSize: 2 - standardNodeGroupMaxSize: 5 - - pulumiNodeGroupInstanceType: "t3.xlarge" - pulumiNodeGroupDesiredCapacity: 3 - pulumiNodeGroupMinSize: 3 - pulumiNodeGroupMaxSize: 5 diff --git a/eks-hosted/05-eks-cluster/config.ts b/eks-hosted/05-eks-cluster/config.ts index acd1e818..9c4c337c 100644 --- a/eks-hosted/05-eks-cluster/config.ts +++ b/eks-hosted/05-eks-cluster/config.ts @@ -54,5 +54,4 @@ export const config = { vpcId: vpcId, publicSubnetIds: publicSubnetIds, privateSubnetIds: privateSubnetIds, - -}; \ No newline at end of file +} \ No newline at end of file diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index e5ec7035..9602b773 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -10,7 +10,6 @@ const tags = { "Project": "pulumi-k8s-aws-cluster", "Owner": "pulumi"}; // --- EKS Cluster --- const serviceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) const instanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) - const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) // Create an EKS cluster. @@ -51,13 +50,13 @@ export const clusterName = cluster.core.cluster.name; export const region = aws.config.region; export const nodeSecurityGroupId = cluster.nodeSecurityGroup.id; // For RDS export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; + ///////////////////// /// Build node groups const ssmParam = pulumi.output(aws.ssm.getParameter({ // https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html name: `/aws/service/eks/optimized-ami/${config.clusterVersion}/amazon-linux-2/recommended`, })) - export const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) // Create a standard node group. @@ -68,12 +67,12 @@ const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { nodeSecurityGroup: cluster.nodeSecurityGroup, clusterIngressRule: cluster.eksClusterIngressRule, amiId: amiId, - instanceType: config.standardNodeGroupInstanceType, + instanceType: config.standardNodeGroupInstanceType, desiredCapacity: config.standardNodeGroupDesiredCapacity, minSize: config.standardNodeGroupMinSize, maxSize: config.standardNodeGroupMaxSize, - // labels: {"amiId": pulumi.interpolate `${amiId}`}, + // labels: {"amiId": amiId}, cloudFormationTags: clusterName.apply(clusterName => ({ "k8s.io/cluster-autoscaler/enabled": "true", [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", @@ -83,7 +82,6 @@ const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { providers: { kubernetes: cluster.provider}, }); - // Create a standard node group tainted for use only by self-hosted pulumi. const ngStandardPulumi = new eks.NodeGroup(`${baseName}-ng-standard-pulumi`, { cluster: cluster, From 06bbe6d515144272a6d90c56d7c2ec9d91f448ab Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Fri, 8 Nov 2024 10:18:22 -0500 Subject: [PATCH 05/12] runs markdownlint on README --- eks-hosted/05-eks-cluster/package.json | 4 +- eks-hosted/README.md | 51 +++++++++++++++----------- eks-hosted/package.json | 6 +++ 3 files changed, 37 insertions(+), 24 deletions(-) create mode 100644 eks-hosted/package.json diff --git a/eks-hosted/05-eks-cluster/package.json b/eks-hosted/05-eks-cluster/package.json index de4cd1be..bd74f701 100644 --- a/eks-hosted/05-eks-cluster/package.json +++ b/eks-hosted/05-eks-cluster/package.json @@ -5,7 +5,7 @@ }, "dependencies": { "@pulumi/aws": "^6.54.0", - "@pulumi/pulumi": "^3.136.0", - "@pulumi/eks": "^2.8.1" + "@pulumi/eks": "^2.8.1", + "@pulumi/pulumi": "^3.136.0" } } diff --git a/eks-hosted/README.md b/eks-hosted/README.md index d0568185..d0e04609 100644 --- a/eks-hosted/README.md +++ b/eks-hosted/README.md @@ -3,16 +3,19 @@ This version of the EKS installer for Pulumi self-hosted is broken into smaller, individual stacks. This new architecture is being implemented to meet the following requirements: -- Allow users to bring their own infrastructure for sections of the solution. + +- Allow users to bring their own infrastructure for sections of the solution. - For example IAM is managed as a separate stack since some customers cannot allow the installer to create and manage the IAM resources needed for the service infrastructure. Similarly, networking may be handled by a different team, etc. -- Support mixing and matching capabilities based on the license. Different features such as insights, ESC, deployments etc. require their own infrastructure. -- Make it easier to maintain and test the overall solution. By breaking the overall deployment into smaller stacks, it makes it easier to test the different parts of the solution since individual stacks can be upped and destroyed. +- Support mixing and matching capabilities based on the license. Different features such as insights, ESC, deployments etc. require their own infrastructure. +- Make it easier to maintain and test the overall solution. By breaking the overall deployment into smaller stacks, it makes it easier to test the different parts of the solution since individual stacks can be upped and destroyed. This architecture does impose some design requirements: + - Make each stack as self-contained as possible. - In those cases where the provided installer is not used (i.e. the user stands up the resources on their own), then a mechanism is needed to pass in the ids, etc for that externally managed infrastructure while still supporting those cases where the infra is managed by the installers. ## Installer Revision History + Version ID | Date | K8s Version Supported | Note ---|---|---|-- 1.0 | Oct, 2024 | 1.30.3 | Initial version of the new eks installer. @@ -22,29 +25,30 @@ Version ID | Date | K8s Version Supported | Note ### State Management It is generally assumed one is using an S3 state backend. -See [AWS S3 state Backend](https://www.pulumi.com/docs/iac/concepts/state-and-backends/#aws-s3) for instructions on how to set up and login to an s3 backend. +See [AWS S3 state Backend](https://www.pulumi.com/docs/iac/concepts/state-and-backends/#aws-s3) for instructions on how to set up and login to an s3 backend. That said, one can use Pulumi Cloud for the state backend as well. However, these instructions will generally assume an S3 backend is being used. ### Configuration -Each project has its own configuration requirements. Each project folder has a `Pulumi.README.yaml` file that includes instructions for setting up the configuration and can be used as a template for the actual stack config file (see [Pulumi stack config](https://www.pulumi.com/docs/iac/concepts/config/)). +Each project has its own configuration requirements. Each project folder has a `Pulumi.README.yaml` file that includes instructions for setting up the configuration and can be used as a template for the actual stack config file (see [Pulumi stack config](https://www.pulumi.com/docs/iac/concepts/config/)). ### Deployment Order -Each subfolder is it's own Pulumi project (and by extension stack). The numbering represents the order of deployment. +Each subfolder is it's own Pulumi project (and by extension stack). The numbering represents the order of deployment. + +### Using Existing Infrastructure -### Using Existing Infrastructure In some cases, you may need to use existing infrastructure. Currently, the following installer projects support the case where the infrastructure already exists: -* 01-iam: IAM resources -* 02-networking: VPC and subnets -* 15-state-policies-mgmt: S3 buckets for state and policy storage. -* 30-esc: S3 bucket for ESC-related storage +- 01-iam: IAM resources +- 02-networking: VPC and subnets +- 15-state-policies-mgmt: S3 buckets for state and policy storage. +- 30-esc: S3 bucket for ESC-related storage If using pre-existing resources, you will still run the given stacks (i.e. `01-iam` and `02-networking`) but you will provide the values for the resources your created - see the project's `Pulumi.README.yaml` for details. - -The stack will then pretend to create the resources and output the values so that downstream stacks can use the values as needed. +The stack will skip creating the relevant resources and pass the input values through to stack outputs so that downstream stacks can use the values as needed. + - Review the `Pulumi.README.yaml` file to understand the inputs for the given stack. - Review `index.ts` and any related files to understand how the given infrastructure is created. @@ -52,25 +56,28 @@ The stack will then pretend to create the resources and output the values so tha These instructions assume you are using "prod" for the name of your stacks. Of course you can name the stack anything you want. The process is the same for each microstack: + - cd to the given project folder (e.g. `01-iam`) - `npm install` to install the package dependencies - Run `pulumi stack init prod` (or whatever name of stack you want to use). This will create a new empty stack, and will create a stack config file with the "encryptionsalt" key (if using the passphrase secrets provider). -- copy the contents of the "Pulumi.README.yaml" file into the new "Pulumi.prod.yaml" stack config file, with the "config" key at the top level. +- copy the contents of the "Pulumi.README.yaml" file into the new "Pulumi.prod.yaml" stack config file, with the "config" key at the top level. - edit "Pulumi.prod.yaml" and follow the instructions in the file about setting configuration values. - In a number of cases you can use the default values copied from "Pulumi.README.yaml". - Run `pulumi up` to deploy the infrastructure. - Move to the next project folder and repeat the above steps. #### Helpful Tips about Stack Dependencies + The following stacks manage stateful resources or resources that are foundational to other stacks, so think carefully before destroying them: -* 01-iam -* 02-networking -* 05-eks-cluster -* 15-state-policies-mgmt -* 20-database -* 30-esc + +- 01-iam +- 02-networking +- 05-eks-cluster +- 15-state-policies-mgmt +- 20-database +- 30-esc The following stacks do not manage stateful resources and so can be destroyed/re-created without losing data. Destroying/recreating these stacks will cause a service disruption but no permanent data loss: -* 25-insights: If restarted, use the service UI "selfhosted" page to reindex the searchcluster.. See: [Re-index opensearch](https://www.pulumi.com/docs/pulumi-cloud/admin/self-hosted/components/search/#backfilling-data) -* 90-pulumi-service +- 25-insights: If restarted, use the service UI "selfhosted" page to reindex the searchcluster.. See: [Re-index opensearch](https://www.pulumi.com/docs/pulumi-cloud/admin/self-hosted/components/search/#backfilling-data) +- 90-pulumi-service diff --git a/eks-hosted/package.json b/eks-hosted/package.json new file mode 100644 index 00000000..227de65e --- /dev/null +++ b/eks-hosted/package.json @@ -0,0 +1,6 @@ +{ + "dependencies": {}, + "devDependencies": { + "markdownlint-cli": "^0.42.0" + } +} \ No newline at end of file From 3a1085688e2e2672fc463af56ad8225bbcb14c5a Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Mon, 11 Nov 2024 09:53:39 -0500 Subject: [PATCH 06/12] changes vpcSecurityGroupIds input to type that works with PULUMI_ERROR_STRING_OUTPUT --- eks-hosted/20-database/rds-db/index.ts | 2 +- eks-hosted/README.md | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/eks-hosted/20-database/rds-db/index.ts b/eks-hosted/20-database/rds-db/index.ts index bab54f54..da970f99 100644 --- a/eks-hosted/20-database/rds-db/index.ts +++ b/eks-hosted/20-database/rds-db/index.ts @@ -59,7 +59,7 @@ export class RdsDatabase extends pulumi.ComponentResource { masterUsername: "pulumi", masterPassword: this.password, storageEncrypted: true, - vpcSecurityGroupIds: [args.securityGroupId], // Must be able to communicate with EKS nodes. + vpcSecurityGroupIds: pulumi.output([args.securityGroupId]), finalSnapshotIdentifier: finalSnapshotIdentifier.hex, tags, }, { protect: true, }); diff --git a/eks-hosted/README.md b/eks-hosted/README.md index d0e04609..441536e3 100644 --- a/eks-hosted/README.md +++ b/eks-hosted/README.md @@ -78,6 +78,5 @@ The following stacks manage stateful resources or resources that are foundationa - 30-esc The following stacks do not manage stateful resources and so can be destroyed/re-created without losing data. Destroying/recreating these stacks will cause a service disruption but no permanent data loss: - - 25-insights: If restarted, use the service UI "selfhosted" page to reindex the searchcluster.. See: [Re-index opensearch](https://www.pulumi.com/docs/pulumi-cloud/admin/self-hosted/components/search/#backfilling-data) - 90-pulumi-service From 670e5da4808dcd15f934fc1ad6473a786fa19f96 Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Mon, 11 Nov 2024 18:57:37 -0500 Subject: [PATCH 07/12] updates to make typechecker happy and to make inputs work with PULUMI_ERROR_STRING_OUTPUT --- components-microstacks/.gitignore | 1 + eks-hosted/25-insights/Pulumi.README.yaml | 2 ++ eks-hosted/90-pulumi-service/config.ts | 4 ++-- eks-hosted/90-pulumi-service/index.ts | 10 +++++----- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/components-microstacks/.gitignore b/components-microstacks/.gitignore index c2658d7d..c2b8d7c0 100644 --- a/components-microstacks/.gitignore +++ b/components-microstacks/.gitignore @@ -1 +1,2 @@ node_modules/ +bun.lockb \ No newline at end of file diff --git a/eks-hosted/25-insights/Pulumi.README.yaml b/eks-hosted/25-insights/Pulumi.README.yaml index 051055ba..c4ba4744 100644 --- a/eks-hosted/25-insights/Pulumi.README.yaml +++ b/eks-hosted/25-insights/Pulumi.README.yaml @@ -9,4 +9,6 @@ config: # Opensearch configuration # Use "pulumi config set opensearchPassword --secret " + # Password requirements: minimum 8 characters and must contain at least one uppercase letter, + # one lowercase letter, one digit, and one special character opensearchPassword: diff --git a/eks-hosted/90-pulumi-service/config.ts b/eks-hosted/90-pulumi-service/config.ts index 5fbc0f8f..46e932b3 100644 --- a/eks-hosted/90-pulumi-service/config.ts +++ b/eks-hosted/90-pulumi-service/config.ts @@ -67,8 +67,8 @@ export const config = { // reCAPTCHA Config // If the config is not set, then recaptcha will be disabled. - recaptchaSiteKey: pulumiConfig.get("recaptchaSiteKey"), - recaptchaSecretKey: pulumiConfig.get("recaptchaSecretKey"), + recaptchaSiteKey: pulumiConfig.get("recaptchaSiteKey") || "", + recaptchaSecretKey: pulumiConfig.get("recaptchaSecretKey") || "", // Insights Config openSearchEndpoint: insightsStackRef.requireOutput("openSearchEndpoint"), diff --git a/eks-hosted/90-pulumi-service/index.ts b/eks-hosted/90-pulumi-service/index.ts index 62b21a8b..18f14492 100644 --- a/eks-hosted/90-pulumi-service/index.ts +++ b/eks-hosted/90-pulumi-service/index.ts @@ -238,7 +238,7 @@ const consolePodBuilder = new kx.PodBuilder({ "SAML_SSO_ENABLED": `${config.samlSsoEnabled}`, ...recaptchaConsoleConfig, ...consoleEmailLoginConfig - }, + } as EnvMap, resources: consoleResources, }], }); @@ -288,14 +288,14 @@ const zone = aws.route53.getZoneOutput({ const certValidation = new aws.route53.Record("certValidation", { name: certCertificate.domainValidationOptions[0].resourceRecordName, - records: [certCertificate.domainValidationOptions[0].resourceRecordValue], + records: pulumi.output([certCertificate.domainValidationOptions[0].resourceRecordValue]), ttl: 60, type: certCertificate.domainValidationOptions[0].resourceRecordType, zoneId: zone.id, }); const certCertificateValidation = new aws.acm.CertificateValidation("cert", { certificateArn: certCertificate.arn, - validationRecordFqdns: [certValidation.fqdn], + validationRecordFqdns: pulumi.output([certValidation.fqdn]), }); ////////////// @@ -400,7 +400,7 @@ const consoleDnsRecord = new aws.route53.Record("consoleEndDnsRecord", { name: consoleEndpoint, type: "CNAME", ttl: 300, - records: [ consoleLoadbalancerDnsName] + records: pulumi.output([consoleLoadbalancerDnsName]) }) const serviceDnsRecord = new aws.route53.Record("serviceEndDnsRecord", { @@ -408,5 +408,5 @@ const serviceDnsRecord = new aws.route53.Record("serviceEndDnsRecord", { name: serviceEndpoint, type: "CNAME", ttl: 300, - records: [ serviceLoadbalancerDnsName] + records: pulumi.output([serviceLoadbalancerDnsName]) }) From 1fb05294001040f5d4e76118609bf73ec648ea3d Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Tue, 12 Nov 2024 11:15:16 -0500 Subject: [PATCH 08/12] try updating to eks 3.0.1 --- eks-hosted/05-eks-cluster/_index.ts | 110 +++++++++++++++++++++++++ eks-hosted/05-eks-cluster/config.ts | 6 +- eks-hosted/05-eks-cluster/index.ts | 8 +- eks-hosted/20-database/rds-db/index.ts | 2 +- 4 files changed, 118 insertions(+), 8 deletions(-) create mode 100644 eks-hosted/05-eks-cluster/_index.ts diff --git a/eks-hosted/05-eks-cluster/_index.ts b/eks-hosted/05-eks-cluster/_index.ts new file mode 100644 index 00000000..47351457 --- /dev/null +++ b/eks-hosted/05-eks-cluster/_index.ts @@ -0,0 +1,110 @@ +// import * as aws from "@pulumi/aws"; +// import * as eks from "@pulumi/eks"; +// import * as pulumi from "@pulumi/pulumi"; +// import { config } from "./config"; + +// const baseName = config.baseName +// const tags = { "Project": "pulumi-k8s-aws-cluster", "Owner": "pulumi"}; + +// ///////////////////// +// // --- EKS Cluster --- +// const serviceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) +// const instanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) +// const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) + +// // Create an EKS cluster. +// const cluster = new eks.Cluster(`${baseName}`, { +// name: config.clusterName, +// authenticationMode: "API", +// // We keep these serviceRole and instanceRole properties to prevent the EKS provider from creating its own roles. +// serviceRole: serviceRole, +// instanceRole: instanceRole, +// vpcId: config.vpcId, +// publicSubnetIds: config.publicSubnetIds, +// privateSubnetIds: config.privateSubnetIds, +// providerCredentialOpts: { profileName: process.env.AWS_PROFILE}, +// nodeAssociatePublicIpAddress: false, +// skipDefaultNodeGroup: true, +// version: config.clusterVersion, +// createOidcProvider: false, +// tags: tags, +// enabledClusterLogTypes: ["api", "audit", "authenticator", "controllerManager", "scheduler"], +// }, { +// transformations: [(args) => { +// if (args.type === "aws:eks/cluster:Cluster") { +// return { +// props: args.props, +// opts: pulumi.mergeOptions(args.opts, { +// protect: true, +// }) +// } +// } +// return undefined; +// }], +// }); + +// // Export the cluster details. +// export const kubeconfig = pulumi.secret(cluster.kubeconfig.apply(JSON.stringify)); +// export const clusterName = cluster.core.cluster.name; +// export const region = aws.config.region; +// cluster.nodeSecurityGroup.apply(sg => { +// if (sg) { return sg.id } +// else return null +// }); // For RDS +// export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; + +// ///////////////////// +// /// Build node groups +// const ssmParam = pulumi.output(aws.ssm.getParameter({ +// // https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html +// name: `/aws/service/eks/optimized-ami/${config.clusterVersion}/amazon-linux-2/recommended`, +// })) +// export const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) + +// // Create a standard node group. +// const ngStandard = new eks.NodeGroupV2(`${baseName}-ng-standard`, { +// cluster: cluster, +// instanceProfile: instanceProfile, +// nodeAssociatePublicIpAddress: false, +// nodeSecurityGroupId: cluster.nodeSecurityGroupId, +// clusterIngressRuleId: cluster.clusterIngressRuleId, +// amiId: amiId, +// instanceType: config.standardNodeGroupInstanceType, +// desiredCapacity: config.standardNodeGroupDesiredCapacity, +// minSize: config.standardNodeGroupMinSize, +// maxSize: config.standardNodeGroupMaxSize, + +// labels: {"amiId": amiId}, +// cloudFormationTags: clusterName.apply(clusterName => ({ +// "k8s.io/cluster-autoscaler/enabled": "true", +// [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", +// ...tags, +// })), +// }, { +// providers: { kubernetes: cluster.provider}, +// }); + +// // Create a standard node group tainted for use only by self-hosted pulumi. +// const ngStandardPulumi = new eks.NodeGroupV2(`${baseName}-ng-standard-pulumi`, { +// cluster: cluster, +// instanceProfile: instanceProfile, +// nodeAssociatePublicIpAddress: false, +// nodeSecurityGroupId: cluster.nodeSecurityGroupId, +// clusterIngressRuleId: cluster.clusterIngressRuleId, +// amiId: amiId, + +// instanceType: config.pulumiNodeGroupInstanceType, +// desiredCapacity: config.pulumiNodeGroupDesiredCapacity, +// minSize: config.pulumiNodeGroupMinSize, +// maxSize: config.pulumiNodeGroupMaxSize, + +// labels: {"amiId": amiId}, +// taints: { "self-hosted-pulumi": { value: "true", effect: "NoSchedule"}}, +// cloudFormationTags: clusterName.apply(clusterName => ({ +// "k8s.io/cluster-autoscaler/enabled": "true", +// [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", +// ...tags, +// })), +// }, { +// providers: { kubernetes: cluster.provider}, +// }); diff --git a/eks-hosted/05-eks-cluster/config.ts b/eks-hosted/05-eks-cluster/config.ts index 9c4c337c..2a91eec2 100644 --- a/eks-hosted/05-eks-cluster/config.ts +++ b/eks-hosted/05-eks-cluster/config.ts @@ -45,9 +45,9 @@ export const config = { pulumiNodeGroupMaxSize: pulumiConfig.getNumber("pulumiNodeGroupMaxSize") ?? 5, // IAM stack values - eksInstanceRoleName: pulumi.interpolate `${eksInstanceRoleName}`, - instanceProfileName: pulumi.interpolate `${instanceProfileName}`, - eksServiceRoleName: pulumi.interpolate `${eksServiceRoleName}`, + eksInstanceRoleName: eksInstanceRoleName, + instanceProfileName: instanceProfileName, + eksServiceRoleName: eksServiceRoleName, // Networking stack values clusterName: clusterName, diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index 9602b773..f83a3f46 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -25,7 +25,6 @@ const cluster = new eks.Cluster(`${baseName}`, { providerCredentialOpts: { profileName: process.env.AWS_PROFILE}, nodeAssociatePublicIpAddress: false, skipDefaultNodeGroup: true, - deployDashboard: false, version: config.clusterVersion, createOidcProvider: false, tags: tags, @@ -48,7 +47,8 @@ const cluster = new eks.Cluster(`${baseName}`, { export const kubeconfig = pulumi.secret(cluster.kubeconfig.apply(JSON.stringify)); export const clusterName = cluster.core.cluster.name; export const region = aws.config.region; -export const nodeSecurityGroupId = cluster.nodeSecurityGroup.id; // For RDS + +// For RDS export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; ///////////////////// @@ -60,7 +60,7 @@ const ssmParam = pulumi.output(aws.ssm.getParameter({ export const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) // Create a standard node group. -const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { +const ngStandard = new eks.NodeGroupV2(`${baseName}-ng-standard`, { cluster: cluster, instanceProfile: instanceProfile, nodeAssociatePublicIpAddress: false, @@ -83,7 +83,7 @@ const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { }); // Create a standard node group tainted for use only by self-hosted pulumi. -const ngStandardPulumi = new eks.NodeGroup(`${baseName}-ng-standard-pulumi`, { +const ngStandardPulumi = new eks.NodeGroupV2(`${baseName}-ng-standard-pulumi`, { cluster: cluster, instanceProfile: instanceProfile, nodeAssociatePublicIpAddress: false, diff --git a/eks-hosted/20-database/rds-db/index.ts b/eks-hosted/20-database/rds-db/index.ts index da970f99..e27b6cb5 100644 --- a/eks-hosted/20-database/rds-db/index.ts +++ b/eks-hosted/20-database/rds-db/index.ts @@ -59,7 +59,7 @@ export class RdsDatabase extends pulumi.ComponentResource { masterUsername: "pulumi", masterPassword: this.password, storageEncrypted: true, - vpcSecurityGroupIds: pulumi.output([args.securityGroupId]), + vpcSecurityGroupIds: pulumi.output([args.securityGroupId]), // Must be able to communicate with EKS nodes. finalSnapshotIdentifier: finalSnapshotIdentifier.hex, tags, }, { protect: true, }); From 3cb7e749514a12162a80ed52a8354ec47da666da Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Tue, 12 Nov 2024 12:11:21 -0500 Subject: [PATCH 09/12] upgrades eks to 3.0.1, makes iam stack export entire roles and moves instanceProfile to eks stack to work around https://github.com/pulumi/pulumi/issues/17515 --- eks-hosted/01-iam/index.ts | 19 +++-- eks-hosted/05-eks-cluster/_index.ts | 110 ------------------------- eks-hosted/05-eks-cluster/config.ts | 4 + eks-hosted/05-eks-cluster/index.ts | 25 +++--- eks-hosted/05-eks-cluster/package.json | 4 +- 5 files changed, 32 insertions(+), 130 deletions(-) delete mode 100644 eks-hosted/05-eks-cluster/_index.ts diff --git a/eks-hosted/01-iam/index.ts b/eks-hosted/01-iam/index.ts index 05816b46..7da86e4c 100644 --- a/eks-hosted/01-iam/index.ts +++ b/eks-hosted/01-iam/index.ts @@ -5,7 +5,9 @@ import { albControllerPolicyStatement } from "./albControllerPolicy"; // These roles are either provided by the user or created in this stack. export let eksServiceRoleName: string | pulumi.Output; +export let eksServiceRole: aws.iam.Role | pulumi.Output; export let eksInstanceRoleName: string | pulumi.Output; +export let eksInstanceRole: aws.iam.Role | pulumi.Output; export let instanceProfileName: string | pulumi.Output; export let databaseMonitoringRoleArn: string | pulumi.Output; @@ -14,13 +16,16 @@ export let databaseMonitoringRoleArn: string | pulumi.Output; // It's an all-or-nothing situation, so if one is provided, they all must be. if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instanceProfileName && config.databaseMonitoringRoleArn) { eksServiceRoleName = config.eksServiceRoleName; + eksServiceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) eksInstanceRoleName = config.eksInstanceRoleName; + eksInstanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) instanceProfileName = config.instanceProfileName; databaseMonitoringRoleArn = config.databaseMonitoringRoleArn; + } else { // Create the roles. /// Cluster Role /// - const eksRole = new aws.iam.Role(`${config.baseName}-eksRole`, { + eksServiceRole = new aws.iam.Role(`${config.baseName}-eksRole`, { assumeRolePolicy: { Statement: [ { Action:"sts:AssumeRole", @@ -37,10 +42,10 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" ], }); - eksServiceRoleName = eksRole.name; + eksServiceRoleName = eksServiceRole.name; /// Instance Role /// - const instanceRole = new aws.iam.Role(`${config.baseName}-instanceRole`, { + eksInstanceRole = new aws.iam.Role(`${config.baseName}-instanceRole`, { assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal(aws.iam.Principals.Ec2Principal), managedPolicyArns: [ "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", @@ -52,7 +57,7 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr // S3 policy used by Pulumi services const instanceRoleS3Policy = new aws.iam.RolePolicyAttachment("instanceRoleS3Policy", { policyArn: "arn:aws:iam::aws:policy/AmazonS3FullAccess", - role: instanceRole + role: eksInstanceRole }) // ALB management used by ingress controller @@ -61,7 +66,7 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr }); const rpaAlbPolicy = new aws.iam.RolePolicyAttachment("albPolicy", { policyArn: albControllerPolicy.arn, - role: instanceRole + role: eksInstanceRole }) // Opensearch access @@ -81,10 +86,10 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr }); const openSearchPolicyAttachment = new aws.iam.RolePolicyAttachment("opensearchPolicy", { policyArn: opensearchPolicy.arn, - role: instanceRole + role: eksInstanceRole }) - eksInstanceRoleName = instanceRole.name; + eksInstanceRoleName = eksInstanceRole.name; const instanceProfile = new aws.iam.InstanceProfile("ng-standard", {role: eksInstanceRoleName}) instanceProfileName = instanceProfile.name; diff --git a/eks-hosted/05-eks-cluster/_index.ts b/eks-hosted/05-eks-cluster/_index.ts deleted file mode 100644 index 47351457..00000000 --- a/eks-hosted/05-eks-cluster/_index.ts +++ /dev/null @@ -1,110 +0,0 @@ -// import * as aws from "@pulumi/aws"; -// import * as eks from "@pulumi/eks"; -// import * as pulumi from "@pulumi/pulumi"; -// import { config } from "./config"; - -// const baseName = config.baseName -// const tags = { "Project": "pulumi-k8s-aws-cluster", "Owner": "pulumi"}; - -// ///////////////////// -// // --- EKS Cluster --- -// const serviceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) -// const instanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) -// const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) - -// // Create an EKS cluster. -// const cluster = new eks.Cluster(`${baseName}`, { -// name: config.clusterName, -// authenticationMode: "API", -// // We keep these serviceRole and instanceRole properties to prevent the EKS provider from creating its own roles. -// serviceRole: serviceRole, -// instanceRole: instanceRole, -// vpcId: config.vpcId, -// publicSubnetIds: config.publicSubnetIds, -// privateSubnetIds: config.privateSubnetIds, -// providerCredentialOpts: { profileName: process.env.AWS_PROFILE}, -// nodeAssociatePublicIpAddress: false, -// skipDefaultNodeGroup: true, -// version: config.clusterVersion, -// createOidcProvider: false, -// tags: tags, -// enabledClusterLogTypes: ["api", "audit", "authenticator", "controllerManager", "scheduler"], -// }, { -// transformations: [(args) => { -// if (args.type === "aws:eks/cluster:Cluster") { -// return { -// props: args.props, -// opts: pulumi.mergeOptions(args.opts, { -// protect: true, -// }) -// } -// } -// return undefined; -// }], -// }); - -// // Export the cluster details. -// export const kubeconfig = pulumi.secret(cluster.kubeconfig.apply(JSON.stringify)); -// export const clusterName = cluster.core.cluster.name; -// export const region = aws.config.region; -// cluster.nodeSecurityGroup.apply(sg => { -// if (sg) { return sg.id } -// else return null -// }); // For RDS -// export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; - -// ///////////////////// -// /// Build node groups -// const ssmParam = pulumi.output(aws.ssm.getParameter({ -// // https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html -// name: `/aws/service/eks/optimized-ami/${config.clusterVersion}/amazon-linux-2/recommended`, -// })) -// export const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) - -// // Create a standard node group. -// const ngStandard = new eks.NodeGroupV2(`${baseName}-ng-standard`, { -// cluster: cluster, -// instanceProfile: instanceProfile, -// nodeAssociatePublicIpAddress: false, -// nodeSecurityGroupId: cluster.nodeSecurityGroupId, -// clusterIngressRuleId: cluster.clusterIngressRuleId, -// amiId: amiId, -// instanceType: config.standardNodeGroupInstanceType, -// desiredCapacity: config.standardNodeGroupDesiredCapacity, -// minSize: config.standardNodeGroupMinSize, -// maxSize: config.standardNodeGroupMaxSize, - -// labels: {"amiId": amiId}, -// cloudFormationTags: clusterName.apply(clusterName => ({ -// "k8s.io/cluster-autoscaler/enabled": "true", -// [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", -// ...tags, -// })), -// }, { -// providers: { kubernetes: cluster.provider}, -// }); - -// // Create a standard node group tainted for use only by self-hosted pulumi. -// const ngStandardPulumi = new eks.NodeGroupV2(`${baseName}-ng-standard-pulumi`, { -// cluster: cluster, -// instanceProfile: instanceProfile, -// nodeAssociatePublicIpAddress: false, -// nodeSecurityGroupId: cluster.nodeSecurityGroupId, -// clusterIngressRuleId: cluster.clusterIngressRuleId, -// amiId: amiId, - -// instanceType: config.pulumiNodeGroupInstanceType, -// desiredCapacity: config.pulumiNodeGroupDesiredCapacity, -// minSize: config.pulumiNodeGroupMinSize, -// maxSize: config.pulumiNodeGroupMaxSize, - -// labels: {"amiId": amiId}, -// taints: { "self-hosted-pulumi": { value: "true", effect: "NoSchedule"}}, -// cloudFormationTags: clusterName.apply(clusterName => ({ -// "k8s.io/cluster-autoscaler/enabled": "true", -// [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", -// ...tags, -// })), -// }, { -// providers: { kubernetes: cluster.provider}, -// }); diff --git a/eks-hosted/05-eks-cluster/config.ts b/eks-hosted/05-eks-cluster/config.ts index 2a91eec2..843ab666 100644 --- a/eks-hosted/05-eks-cluster/config.ts +++ b/eks-hosted/05-eks-cluster/config.ts @@ -12,6 +12,8 @@ const iamStackRef = new pulumi.StackReference(`${orgName}/selfhosted-01-iam/${st const eksInstanceRoleName = iamStackRef.requireOutput("eksInstanceRoleName"); const instanceProfileName = iamStackRef.requireOutput("instanceProfileName"); const eksServiceRoleName = iamStackRef.requireOutput("eksServiceRoleName"); +const eksServiceRole = iamStackRef.requireOutput("eksServiceRole") +const eksInstanceRole = iamStackRef.requireOutput("eksInstanceRole") // Networking Stack values // Get the needed values from the networking stack. @@ -48,6 +50,8 @@ export const config = { eksInstanceRoleName: eksInstanceRoleName, instanceProfileName: instanceProfileName, eksServiceRoleName: eksServiceRoleName, + eksInstanceRole: eksInstanceRole, + eksServiceRole: eksServiceRole, // Networking stack values clusterName: clusterName, diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index f83a3f46..72f22201 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -10,15 +10,15 @@ const tags = { "Project": "pulumi-k8s-aws-cluster", "Owner": "pulumi"}; // --- EKS Cluster --- const serviceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) const instanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) -const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) +// const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) // Create an EKS cluster. const cluster = new eks.Cluster(`${baseName}`, { name: config.clusterName, authenticationMode: "API", // We keep these serviceRole and instanceRole properties to prevent the EKS provider from creating its own roles. - serviceRole: serviceRole, - instanceRole: instanceRole, + serviceRole: config.eksServiceRole, + instanceRole: config.eksInstanceRole, vpcId: config.vpcId, publicSubnetIds: config.publicSubnetIds, privateSubnetIds: config.privateSubnetIds, @@ -47,8 +47,10 @@ const cluster = new eks.Cluster(`${baseName}`, { export const kubeconfig = pulumi.secret(cluster.kubeconfig.apply(JSON.stringify)); export const clusterName = cluster.core.cluster.name; export const region = aws.config.region; - -// For RDS +cluster.nodeSecurityGroup.apply(sg => { + if (sg) { return sg.id } + else return null +}); // For RDS export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; ///////////////////// @@ -59,20 +61,21 @@ const ssmParam = pulumi.output(aws.ssm.getParameter({ })) export const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) +const instanceProfile = new aws.iam.InstanceProfile("ng-standard", {role: config.eksInstanceRoleName}) // Create a standard node group. const ngStandard = new eks.NodeGroupV2(`${baseName}-ng-standard`, { cluster: cluster, instanceProfile: instanceProfile, nodeAssociatePublicIpAddress: false, - nodeSecurityGroup: cluster.nodeSecurityGroup, - clusterIngressRule: cluster.eksClusterIngressRule, + nodeSecurityGroupId: cluster.nodeSecurityGroupId, + clusterIngressRuleId: cluster.clusterIngressRuleId, amiId: amiId, instanceType: config.standardNodeGroupInstanceType, desiredCapacity: config.standardNodeGroupDesiredCapacity, minSize: config.standardNodeGroupMinSize, maxSize: config.standardNodeGroupMaxSize, - // labels: {"amiId": amiId}, + labels: {"amiId": amiId}, cloudFormationTags: clusterName.apply(clusterName => ({ "k8s.io/cluster-autoscaler/enabled": "true", [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", @@ -87,8 +90,8 @@ const ngStandardPulumi = new eks.NodeGroupV2(`${baseName}-ng-standard-pulumi`, { cluster: cluster, instanceProfile: instanceProfile, nodeAssociatePublicIpAddress: false, - nodeSecurityGroup: cluster.nodeSecurityGroup, - clusterIngressRule: cluster.eksClusterIngressRule, + nodeSecurityGroupId: cluster.nodeSecurityGroupId, + clusterIngressRuleId: cluster.clusterIngressRuleId, amiId: amiId, instanceType: config.pulumiNodeGroupInstanceType, @@ -96,7 +99,7 @@ const ngStandardPulumi = new eks.NodeGroupV2(`${baseName}-ng-standard-pulumi`, { minSize: config.pulumiNodeGroupMinSize, maxSize: config.pulumiNodeGroupMaxSize, - // labels: {"amiId": amiId}, + labels: {"amiId": amiId}, taints: { "self-hosted-pulumi": { value: "true", effect: "NoSchedule"}}, cloudFormationTags: clusterName.apply(clusterName => ({ "k8s.io/cluster-autoscaler/enabled": "true", diff --git a/eks-hosted/05-eks-cluster/package.json b/eks-hosted/05-eks-cluster/package.json index bd74f701..122ae12c 100644 --- a/eks-hosted/05-eks-cluster/package.json +++ b/eks-hosted/05-eks-cluster/package.json @@ -4,8 +4,8 @@ "typescript": "^3.0.0" }, "dependencies": { - "@pulumi/aws": "^6.54.0", - "@pulumi/eks": "^2.8.1", + "@pulumi/aws": "^6.59.0", + "@pulumi/eks": "^3.0.1", "@pulumi/pulumi": "^3.136.0" } } From 900e60f75a5b47db93f3e5b24d1e4706e3edc872 Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Tue, 12 Nov 2024 12:19:38 -0500 Subject: [PATCH 10/12] removes unused code --- eks-hosted/05-eks-cluster/index.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index 72f22201..f3cf2195 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -8,9 +8,6 @@ const tags = { "Project": "pulumi-k8s-aws-cluster", "Owner": "pulumi"}; ///////////////////// // --- EKS Cluster --- -const serviceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) -const instanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) -// const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) // Create an EKS cluster. const cluster = new eks.Cluster(`${baseName}`, { From 187915e8fe3ee03c462dfa417f9cd3ac4fb1b581 Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Tue, 12 Nov 2024 12:23:06 -0500 Subject: [PATCH 11/12] removes unused code --- eks-hosted/05-eks-cluster/index.ts | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index f3cf2195..5a1f56a7 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -44,10 +44,8 @@ const cluster = new eks.Cluster(`${baseName}`, { export const kubeconfig = pulumi.secret(cluster.kubeconfig.apply(JSON.stringify)); export const clusterName = cluster.core.cluster.name; export const region = aws.config.region; -cluster.nodeSecurityGroup.apply(sg => { - if (sg) { return sg.id } - else return null -}); // For RDS + +// For RDS export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; ///////////////////// From ffaf951e1126e6647178e851e52a29b9d92c1104 Mon Sep 17 00:00:00 2001 From: Jonathan Davenport Date: Tue, 12 Nov 2024 12:26:08 -0500 Subject: [PATCH 12/12] cleanup --- eks-hosted/05-eks-cluster/index.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index 5a1f56a7..3cd18e16 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -44,6 +44,7 @@ const cluster = new eks.Cluster(`${baseName}`, { export const kubeconfig = pulumi.secret(cluster.kubeconfig.apply(JSON.stringify)); export const clusterName = cluster.core.cluster.name; export const region = aws.config.region; +export const nodeSecurityGroupId = cluster.nodeSecurityGroupId; // For RDS export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; @@ -54,7 +55,7 @@ const ssmParam = pulumi.output(aws.ssm.getParameter({ // https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html name: `/aws/service/eks/optimized-ami/${config.clusterVersion}/amazon-linux-2/recommended`, })) -export const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) +const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) const instanceProfile = new aws.iam.InstanceProfile("ng-standard", {role: config.eksInstanceRoleName}) // Create a standard node group.