diff --git a/components-microstacks/.gitignore b/components-microstacks/.gitignore index c2658d7d..c2b8d7c0 100644 --- a/components-microstacks/.gitignore +++ b/components-microstacks/.gitignore @@ -1 +1,2 @@ node_modules/ +bun.lockb \ No newline at end of file diff --git a/eks-hosted/.gitignore b/eks-hosted/.gitignore index 6fc672d7..ba5d054d 100644 --- a/eks-hosted/.gitignore +++ b/eks-hosted/.gitignore @@ -9,3 +9,5 @@ package-lock.json alb-ingress-chart-local/ */aws-load-balancer-controller/* Pulumi.pulumitest.yaml +Pulumi.*.yaml +bun.lockb \ No newline at end of file diff --git a/eks-hosted/01-iam/Pulumi.README.yaml b/eks-hosted/01-iam/Pulumi.README.yaml index 6833a814..8e5c5d0e 100644 --- a/eks-hosted/01-iam/Pulumi.README.yaml +++ b/eks-hosted/01-iam/Pulumi.README.yaml @@ -7,12 +7,6 @@ config: # Note, you will be setting this value for each stack you create, so use a value that will make sense across all the infrastructure. baseName: pulumiselfhost - # Provide an SSO role arn that can be assumed by the Pulumi cli to deploy the infrastructure. - # Currently this is just passed through and consumed by later stacks to enable the k8s provider to assume the role and deploy - # k8s infra to the eks cluster. - # A future iteration may create this sso role as part of the stack. - ssoRoleArn: arn:aws:iam::123456789012:role/SSO-Role-Name - #### BRINGING YOUR OWN IAM INFRASTRUCTURE ### # If you are not using the `01-iam` stack, then set the following values that would have otherwise been provided by the iam stack. # The stack will then "pretend" it created the resources and output the values for the other stacks to use. diff --git a/eks-hosted/01-iam/config.ts b/eks-hosted/01-iam/config.ts index fa3f8b38..ee3115df 100644 --- a/eks-hosted/01-iam/config.ts +++ b/eks-hosted/01-iam/config.ts @@ -4,7 +4,6 @@ const pulumiConfig = new pulumi.Config(); export const config = { baseName: pulumiConfig.require("baseName"), - ssoRoleArn: pulumiConfig.require("ssoRoleArn"), // These may not be set - see Pulumi.README.yaml for more information. eksServiceRoleName: pulumiConfig.get("eksServiceRoleName"), eksInstanceRoleName: pulumiConfig.get("eksInstanceRoleName"), diff --git a/eks-hosted/01-iam/index.ts b/eks-hosted/01-iam/index.ts index cb68d719..7da86e4c 100644 --- a/eks-hosted/01-iam/index.ts +++ b/eks-hosted/01-iam/index.ts @@ -3,13 +3,11 @@ import * as aws from "@pulumi/aws"; import { config } from "./config"; import { albControllerPolicyStatement } from "./albControllerPolicy"; -/// SSO Role /// -// This is currently managed outside of the stack and passed through for later stacks to use. -export const ssoRoleArn = config.ssoRoleArn; - // These roles are either provided by the user or created in this stack. export let eksServiceRoleName: string | pulumi.Output; +export let eksServiceRole: aws.iam.Role | pulumi.Output; export let eksInstanceRoleName: string | pulumi.Output; +export let eksInstanceRole: aws.iam.Role | pulumi.Output; export let instanceProfileName: string | pulumi.Output; export let databaseMonitoringRoleArn: string | pulumi.Output; @@ -18,13 +16,16 @@ export let databaseMonitoringRoleArn: string | pulumi.Output; // It's an all-or-nothing situation, so if one is provided, they all must be. if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instanceProfileName && config.databaseMonitoringRoleArn) { eksServiceRoleName = config.eksServiceRoleName; + eksServiceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) eksInstanceRoleName = config.eksInstanceRoleName; + eksInstanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) instanceProfileName = config.instanceProfileName; databaseMonitoringRoleArn = config.databaseMonitoringRoleArn; + } else { // Create the roles. /// Cluster Role /// - const eksRole = new aws.iam.Role(`${config.baseName}-eksRole`, { + eksServiceRole = new aws.iam.Role(`${config.baseName}-eksRole`, { assumeRolePolicy: { Statement: [ { Action:"sts:AssumeRole", @@ -41,10 +42,10 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" ], }); - eksServiceRoleName = eksRole.name; + eksServiceRoleName = eksServiceRole.name; /// Instance Role /// - const instanceRole = new aws.iam.Role(`${config.baseName}-instanceRole`, { + eksInstanceRole = new aws.iam.Role(`${config.baseName}-instanceRole`, { assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal(aws.iam.Principals.Ec2Principal), managedPolicyArns: [ "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", @@ -56,7 +57,7 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr // S3 policy used by Pulumi services const instanceRoleS3Policy = new aws.iam.RolePolicyAttachment("instanceRoleS3Policy", { policyArn: "arn:aws:iam::aws:policy/AmazonS3FullAccess", - role: instanceRole + role: eksInstanceRole }) // ALB management used by ingress controller @@ -65,7 +66,7 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr }); const rpaAlbPolicy = new aws.iam.RolePolicyAttachment("albPolicy", { policyArn: albControllerPolicy.arn, - role: instanceRole + role: eksInstanceRole }) // Opensearch access @@ -85,10 +86,10 @@ if (config.eksServiceRoleName && config.eksInstanceRoleName && config.instancePr }); const openSearchPolicyAttachment = new aws.iam.RolePolicyAttachment("opensearchPolicy", { policyArn: opensearchPolicy.arn, - role: instanceRole + role: eksInstanceRole }) - eksInstanceRoleName = instanceRole.name; + eksInstanceRoleName = eksInstanceRole.name; const instanceProfile = new aws.iam.InstanceProfile("ng-standard", {role: eksInstanceRoleName}) instanceProfileName = instanceProfile.name; diff --git a/eks-hosted/05-eks-cluster/config.ts b/eks-hosted/05-eks-cluster/config.ts index 1e0cebb8..843ab666 100644 --- a/eks-hosted/05-eks-cluster/config.ts +++ b/eks-hosted/05-eks-cluster/config.ts @@ -12,7 +12,8 @@ const iamStackRef = new pulumi.StackReference(`${orgName}/selfhosted-01-iam/${st const eksInstanceRoleName = iamStackRef.requireOutput("eksInstanceRoleName"); const instanceProfileName = iamStackRef.requireOutput("instanceProfileName"); const eksServiceRoleName = iamStackRef.requireOutput("eksServiceRoleName"); -const ssoRoleArn = iamStackRef.requireOutput("ssoRoleArn"); +const eksServiceRole = iamStackRef.requireOutput("eksServiceRole") +const eksInstanceRole = iamStackRef.requireOutput("eksInstanceRole") // Networking Stack values // Get the needed values from the networking stack. @@ -49,12 +50,12 @@ export const config = { eksInstanceRoleName: eksInstanceRoleName, instanceProfileName: instanceProfileName, eksServiceRoleName: eksServiceRoleName, - ssoRoleArn: ssoRoleArn, + eksInstanceRole: eksInstanceRole, + eksServiceRole: eksServiceRole, // Networking stack values clusterName: clusterName, vpcId: vpcId, publicSubnetIds: publicSubnetIds, privateSubnetIds: privateSubnetIds, - -}; +} \ No newline at end of file diff --git a/eks-hosted/05-eks-cluster/index.ts b/eks-hosted/05-eks-cluster/index.ts index 27019b92..3cd18e16 100644 --- a/eks-hosted/05-eks-cluster/index.ts +++ b/eks-hosted/05-eks-cluster/index.ts @@ -8,24 +8,20 @@ const tags = { "Project": "pulumi-k8s-aws-cluster", "Owner": "pulumi"}; ///////////////////// // --- EKS Cluster --- -const serviceRole = aws.iam.Role.get("eksServiceRole", config.eksServiceRoleName) -const instanceRole = aws.iam.Role.get("instanceRole", config.eksInstanceRoleName) -const instanceProfile = aws.iam.InstanceProfile.get("ng-standard", config.instanceProfileName) // Create an EKS cluster. const cluster = new eks.Cluster(`${baseName}`, { name: config.clusterName, authenticationMode: "API", // We keep these serviceRole and instanceRole properties to prevent the EKS provider from creating its own roles. - serviceRole: serviceRole, - instanceRole: instanceRole, + serviceRole: config.eksServiceRole, + instanceRole: config.eksInstanceRole, vpcId: config.vpcId, publicSubnetIds: config.publicSubnetIds, privateSubnetIds: config.privateSubnetIds, providerCredentialOpts: { profileName: process.env.AWS_PROFILE}, nodeAssociatePublicIpAddress: false, skipDefaultNodeGroup: true, - deployDashboard: false, version: config.clusterVersion, createOidcProvider: false, tags: tags, @@ -48,7 +44,9 @@ const cluster = new eks.Cluster(`${baseName}`, { export const kubeconfig = pulumi.secret(cluster.kubeconfig.apply(JSON.stringify)); export const clusterName = cluster.core.cluster.name; export const region = aws.config.region; -export const nodeSecurityGroupId = cluster.nodeSecurityGroup.id; // For RDS +export const nodeSecurityGroupId = cluster.nodeSecurityGroupId; + +// For RDS export const nodeGroupInstanceType = config.pulumiNodeGroupInstanceType; ///////////////////// @@ -57,23 +55,23 @@ const ssmParam = pulumi.output(aws.ssm.getParameter({ // https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html name: `/aws/service/eks/optimized-ami/${config.clusterVersion}/amazon-linux-2/recommended`, })) -const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) +const amiId = ssmParam.value.apply(s => JSON.parse(s).image_id) +const instanceProfile = new aws.iam.InstanceProfile("ng-standard", {role: config.eksInstanceRoleName}) // Create a standard node group. -const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { +const ngStandard = new eks.NodeGroupV2(`${baseName}-ng-standard`, { cluster: cluster, instanceProfile: instanceProfile, nodeAssociatePublicIpAddress: false, - nodeSecurityGroup: cluster.nodeSecurityGroup, - clusterIngressRule: cluster.eksClusterIngressRule, + nodeSecurityGroupId: cluster.nodeSecurityGroupId, + clusterIngressRuleId: cluster.clusterIngressRuleId, amiId: amiId, - instanceType: config.standardNodeGroupInstanceType, desiredCapacity: config.standardNodeGroupDesiredCapacity, minSize: config.standardNodeGroupMinSize, maxSize: config.standardNodeGroupMaxSize, - labels: {"amiId": `${amiId}`}, + labels: {"amiId": amiId}, cloudFormationTags: clusterName.apply(clusterName => ({ "k8s.io/cluster-autoscaler/enabled": "true", [`k8s.io/cluster-autoscaler/${clusterName}`]: "true", @@ -84,12 +82,12 @@ const ngStandard = new eks.NodeGroup(`${baseName}-ng-standard`, { }); // Create a standard node group tainted for use only by self-hosted pulumi. -const ngStandardPulumi = new eks.NodeGroup(`${baseName}-ng-standard-pulumi`, { +const ngStandardPulumi = new eks.NodeGroupV2(`${baseName}-ng-standard-pulumi`, { cluster: cluster, instanceProfile: instanceProfile, nodeAssociatePublicIpAddress: false, - nodeSecurityGroup: cluster.nodeSecurityGroup, - clusterIngressRule: cluster.eksClusterIngressRule, + nodeSecurityGroupId: cluster.nodeSecurityGroupId, + clusterIngressRuleId: cluster.clusterIngressRuleId, amiId: amiId, instanceType: config.pulumiNodeGroupInstanceType, @@ -97,7 +95,7 @@ const ngStandardPulumi = new eks.NodeGroup(`${baseName}-ng-standard-pulumi`, { minSize: config.pulumiNodeGroupMinSize, maxSize: config.pulumiNodeGroupMaxSize, - labels: {"amiId": `${amiId}`}, + labels: {"amiId": amiId}, taints: { "self-hosted-pulumi": { value: "true", effect: "NoSchedule"}}, cloudFormationTags: clusterName.apply(clusterName => ({ "k8s.io/cluster-autoscaler/enabled": "true", diff --git a/eks-hosted/05-eks-cluster/package.json b/eks-hosted/05-eks-cluster/package.json index de4cd1be..122ae12c 100644 --- a/eks-hosted/05-eks-cluster/package.json +++ b/eks-hosted/05-eks-cluster/package.json @@ -4,8 +4,8 @@ "typescript": "^3.0.0" }, "dependencies": { - "@pulumi/aws": "^6.54.0", - "@pulumi/pulumi": "^3.136.0", - "@pulumi/eks": "^2.8.1" + "@pulumi/aws": "^6.59.0", + "@pulumi/eks": "^3.0.1", + "@pulumi/pulumi": "^3.136.0" } } diff --git a/eks-hosted/10-cluster-svcs/index.ts b/eks-hosted/10-cluster-svcs/index.ts index c3053209..2e94b987 100644 --- a/eks-hosted/10-cluster-svcs/index.ts +++ b/eks-hosted/10-cluster-svcs/index.ts @@ -1,6 +1,5 @@ import * as aws from "@pulumi/aws"; import * as k8s from "@pulumi/kubernetes"; -import * as pulumi from "@pulumi/pulumi"; import { config } from "./config"; import { createAlbSecurityGroup, createAlbIngressController } from "./ingress-controller"; diff --git a/eks-hosted/20-database/rds-db/index.ts b/eks-hosted/20-database/rds-db/index.ts index 0cc2753e..e27b6cb5 100644 --- a/eks-hosted/20-database/rds-db/index.ts +++ b/eks-hosted/20-database/rds-db/index.ts @@ -59,7 +59,7 @@ export class RdsDatabase extends pulumi.ComponentResource { masterUsername: "pulumi", masterPassword: this.password, storageEncrypted: true, - vpcSecurityGroupIds: pulumi.output(args.securityGroupId).apply(id => [id]), // Must be able to communicate with EKS nodes. + vpcSecurityGroupIds: pulumi.output([args.securityGroupId]), // Must be able to communicate with EKS nodes. finalSnapshotIdentifier: finalSnapshotIdentifier.hex, tags, }, { protect: true, }); diff --git a/eks-hosted/25-insights/Pulumi.README.yaml b/eks-hosted/25-insights/Pulumi.README.yaml index 051055ba..c4ba4744 100644 --- a/eks-hosted/25-insights/Pulumi.README.yaml +++ b/eks-hosted/25-insights/Pulumi.README.yaml @@ -9,4 +9,6 @@ config: # Opensearch configuration # Use "pulumi config set opensearchPassword --secret " + # Password requirements: minimum 8 characters and must contain at least one uppercase letter, + # one lowercase letter, one digit, and one special character opensearchPassword: diff --git a/eks-hosted/90-pulumi-service/config.ts b/eks-hosted/90-pulumi-service/config.ts index 5fbc0f8f..46e932b3 100644 --- a/eks-hosted/90-pulumi-service/config.ts +++ b/eks-hosted/90-pulumi-service/config.ts @@ -67,8 +67,8 @@ export const config = { // reCAPTCHA Config // If the config is not set, then recaptcha will be disabled. - recaptchaSiteKey: pulumiConfig.get("recaptchaSiteKey"), - recaptchaSecretKey: pulumiConfig.get("recaptchaSecretKey"), + recaptchaSiteKey: pulumiConfig.get("recaptchaSiteKey") || "", + recaptchaSecretKey: pulumiConfig.get("recaptchaSecretKey") || "", // Insights Config openSearchEndpoint: insightsStackRef.requireOutput("openSearchEndpoint"), diff --git a/eks-hosted/90-pulumi-service/index.ts b/eks-hosted/90-pulumi-service/index.ts index 62b21a8b..18f14492 100644 --- a/eks-hosted/90-pulumi-service/index.ts +++ b/eks-hosted/90-pulumi-service/index.ts @@ -238,7 +238,7 @@ const consolePodBuilder = new kx.PodBuilder({ "SAML_SSO_ENABLED": `${config.samlSsoEnabled}`, ...recaptchaConsoleConfig, ...consoleEmailLoginConfig - }, + } as EnvMap, resources: consoleResources, }], }); @@ -288,14 +288,14 @@ const zone = aws.route53.getZoneOutput({ const certValidation = new aws.route53.Record("certValidation", { name: certCertificate.domainValidationOptions[0].resourceRecordName, - records: [certCertificate.domainValidationOptions[0].resourceRecordValue], + records: pulumi.output([certCertificate.domainValidationOptions[0].resourceRecordValue]), ttl: 60, type: certCertificate.domainValidationOptions[0].resourceRecordType, zoneId: zone.id, }); const certCertificateValidation = new aws.acm.CertificateValidation("cert", { certificateArn: certCertificate.arn, - validationRecordFqdns: [certValidation.fqdn], + validationRecordFqdns: pulumi.output([certValidation.fqdn]), }); ////////////// @@ -400,7 +400,7 @@ const consoleDnsRecord = new aws.route53.Record("consoleEndDnsRecord", { name: consoleEndpoint, type: "CNAME", ttl: 300, - records: [ consoleLoadbalancerDnsName] + records: pulumi.output([consoleLoadbalancerDnsName]) }) const serviceDnsRecord = new aws.route53.Record("serviceEndDnsRecord", { @@ -408,5 +408,5 @@ const serviceDnsRecord = new aws.route53.Record("serviceEndDnsRecord", { name: serviceEndpoint, type: "CNAME", ttl: 300, - records: [ serviceLoadbalancerDnsName] + records: pulumi.output([serviceLoadbalancerDnsName]) }) diff --git a/eks-hosted/README.md b/eks-hosted/README.md index 0b5ec533..441536e3 100644 --- a/eks-hosted/README.md +++ b/eks-hosted/README.md @@ -3,16 +3,19 @@ This version of the EKS installer for Pulumi self-hosted is broken into smaller, individual stacks. This new architecture is being implemented to meet the following requirements: -- Allow users to bring their own infrastructure for sections of the solution. + +- Allow users to bring their own infrastructure for sections of the solution. - For example IAM is managed as a separate stack since some customers cannot allow the installer to create and manage the IAM resources needed for the service infrastructure. Similarly, networking may be handled by a different team, etc. -- Support mixing and matching capabilities based on the license. Different features such as insights, ESC, deployments etc. require their own infrastructure. -- Make it easier to maintain and test the overall solution. By breaking the overall deployment into smaller stacks, it makes it easier to test the different parts of the solution since individual stacks can be upped and destroyed. +- Support mixing and matching capabilities based on the license. Different features such as insights, ESC, deployments etc. require their own infrastructure. +- Make it easier to maintain and test the overall solution. By breaking the overall deployment into smaller stacks, it makes it easier to test the different parts of the solution since individual stacks can be upped and destroyed. This architecture does impose some design requirements: + - Make each stack as self-contained as possible. - In those cases where the provided installer is not used (i.e. the user stands up the resources on their own), then a mechanism is needed to pass in the ids, etc for that externally managed infrastructure while still supporting those cases where the infra is managed by the installers. ## Installer Revision History + Version ID | Date | K8s Version Supported | Note ---|---|---|-- 1.0 | Oct, 2024 | 1.30.3 | Initial version of the new eks installer. @@ -22,54 +25,58 @@ Version ID | Date | K8s Version Supported | Note ### State Management It is generally assumed one is using an S3 state backend. -See [AWS S3 state Backend](https://www.pulumi.com/docs/iac/concepts/state-and-backends/#aws-s3) for instructions on how to set up and login to an s3 backend. +See [AWS S3 state Backend](https://www.pulumi.com/docs/iac/concepts/state-and-backends/#aws-s3) for instructions on how to set up and login to an s3 backend. That said, one can use Pulumi Cloud for the state backend as well. However, these instructions will generally assume an S3 backend is being used. ### Configuration -Each project has its own configuration requirements. Each project folder has a `Pulumi.EXAMPLE.yaml` file that includes instructions for setting up the configuration and can be used as a template for the actual stack config file (see [Pulumi stack config](https://www.pulumi.com/docs/iac/concepts/config/)). +Each project has its own configuration requirements. Each project folder has a `Pulumi.README.yaml` file that includes instructions for setting up the configuration and can be used as a template for the actual stack config file (see [Pulumi stack config](https://www.pulumi.com/docs/iac/concepts/config/)). ### Deployment Order -Each subfolder is it's own Pulumi project (and by extension stack). The numbering represents the order of deployment. +Each subfolder is it's own Pulumi project (and by extension stack). The numbering represents the order of deployment. + +### Using Existing Infrastructure -### Using Existing Infrastructure -In some cases, you man need to use existing infrastructure. +In some cases, you may need to use existing infrastructure. Currently, the following installer projects support the case where the infrastructure already exists: -* 01-iam: IAM resources -* 02-networking: VPC and subnets -* 15-state-policies-mgmt: S3 buckets for state and policy storage. -* 30-esc: S3 bucket for ESC-related storage +- 01-iam: IAM resources +- 02-networking: VPC and subnets +- 15-state-policies-mgmt: S3 buckets for state and policy storage. +- 30-esc: S3 bucket for ESC-related storage If using pre-existing resources, you will still run the given stacks (i.e. `01-iam` and `02-networking`) but you will provide the values for the resources your created - see the project's `Pulumi.README.yaml` for details. -The stack will then pretend to create the resources and output the values so that downstream stacks can use the values as needed. -- Review the `Pulumi.README.yaml` file to understand some of the inputs for the given stack. +The stack will skip creating the relevant resources and pass the input values through to stack outputs so that downstream stacks can use the values as needed. + +- Review the `Pulumi.README.yaml` file to understand the inputs for the given stack. - Review `index.ts` and any related files to understand how the given infrastructure is created. ### Deployment Instructions These instructions assume you are using "prod" for the name of your stacks. Of course you can name the stack anything you want. The process is the same for each microstack: + - cd to the given project folder (e.g. `01-iam`) - `npm install` to install the package dependencies -- Run `pulumi stack init prod` (or whatever name of stack you want to use) -- copy "Pulumi.README.yaml" to a file where "README" is replaced with the name of your stack. - - For example, if you are naming the stacks "prod", then you would run `cp Pulumi.README.yaml Pulumi.prod.yaml` +- Run `pulumi stack init prod` (or whatever name of stack you want to use). This will create a new empty stack, and will create a stack config file with the "encryptionsalt" key (if using the passphrase secrets provider). +- copy the contents of the "Pulumi.README.yaml" file into the new "Pulumi.prod.yaml" stack config file, with the "config" key at the top level. - edit "Pulumi.prod.yaml" and follow the instructions in the file about setting configuration values. - In a number of cases you can use the default values copied from "Pulumi.README.yaml". - Run `pulumi up` to deploy the infrastructure. - Move to the next project folder and repeat the above steps. -#### Helpful Tips about Stack Depenencies -The following stacks manage stateful resources or resources that are foundational to other stacks. So careful thought should be given before destroying them: -* 01-iam -* 02-networking -* 05-eks-cluster -* 15-state-policies-mgmt -* 20-database -* 30-esc +#### Helpful Tips about Stack Dependencies + +The following stacks manage stateful resources or resources that are foundational to other stacks, so think carefully before destroying them: + +- 01-iam +- 02-networking +- 05-eks-cluster +- 15-state-policies-mgmt +- 20-database +- 30-esc The following stacks do not manage stateful resources and so can be destroyed/re-created without losing data. Destroying/recreating these stacks will cause a service disruption but no permanent data loss: -* 25-insights: If restarted, use the service UI "selfhosted" page to reindex the searchclsuter.. See: [Re-index opensearch](https://www.pulumi.com/docs/pulumi-cloud/admin/self-hosted/components/search/#backfilling-data) -* 90-pulumi-service +- 25-insights: If restarted, use the service UI "selfhosted" page to reindex the searchcluster.. See: [Re-index opensearch](https://www.pulumi.com/docs/pulumi-cloud/admin/self-hosted/components/search/#backfilling-data) +- 90-pulumi-service diff --git a/eks-hosted/package.json b/eks-hosted/package.json new file mode 100644 index 00000000..227de65e --- /dev/null +++ b/eks-hosted/package.json @@ -0,0 +1,6 @@ +{ + "dependencies": {}, + "devDependencies": { + "markdownlint-cli": "^0.42.0" + } +} \ No newline at end of file