diff --git a/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/README.md b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/README.md new file mode 100644 index 000000000..3a3f5166c --- /dev/null +++ b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/README.md @@ -0,0 +1,24 @@ +### Notes + +1. See [Sample Project for Confluent Terraform Provider](https://registry.terraform.io/providers/confluentinc/confluent/latest/docs/guides/sample-project) that provides step-by-step instructions of running this example. + +2. This example assumes that Terraform is run from a host in the private network (you could also leverage the ["Agent" Execution Mode](https://developer.hashicorp.com/terraform/cloud-docs/agents) if you are using Terraform Enterprise), where it will have connectivity to the [Kafka REST API](https://docs.confluent.io/cloud/current/api.html#tag/Topic-(v3)) in other words, to the [REST endpoint](https://docs.confluent.io/cloud/current/clusters/broker-config.html#access-cluster-settings-in-the-ccloud-console) on the provisioned Kafka cluster. If it is not, you must make these changes: + + * Update the `confluent_api_key` resources by setting their `disable_wait_for_ready` flag to `true`. Otherwise, Terraform will attempt to validate API key creation by listing topics, which will fail without access to the Kafka REST API. Otherwise, you might see errors like: + + ``` + Error: error waiting for Kafka API Key "[REDACTED]" to sync: error listing Kafka Topics using Kafka API Key "[REDACTED]": Get "[https://[REDACTED]/kafka/v3/clusters/[REDACTED]/topics](https://[REDACTED]/kafka/v3/clusters/[REDACTED]/topics)": GET [https://[REDACTED]/kafka/v3/clusters/[REDACTED]/topics](https://[REDACTED]/kafka/v3/clusters/[REDACTED]/topics) giving up after 5 attempt(s): Get "[https://[REDACTED]/kafka/v3/clusters/[REDACTED]/topics](https://[REDACTED]/kafka/v3/clusters/[REDACTED/topics)": dial tcp [REDACTED]:443: i/o timeout + ``` + + * Remove the `confluent_kafka_topic` resource. These resources are provisioned using the Kafka REST API, which is only accessible from the private network. + +3. One common deployment workflow for environments with private networking is as follows: + + * A initial (centrally-run) Terraform deployment provisions infrastructure: network, Kafka cluster, and other resources on cloud provider of your choice to setup private network connectivity (like DNS records) + + * A secondary Terraform deployment (run from within the private network) provisions data-plane resources (Kafka Topics and ACLs) + + * Note that RBAC role bindings can be provisioned in either the first or second step, as they are provisioned through the [Confluent Cloud API](https://docs.confluent.io/cloud/current/api.html), not the [Kafka REST API](https://docs.confluent.io/cloud/current/api.html#tag/Topic-(v3)) + + +4. See [AWS PrivateLink](https://docs.confluent.io/cloud/current/networking/private-links/aws-privatelink.html) for more details. diff --git a/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/main.tf b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/main.tf new file mode 100644 index 000000000..548e71a4b --- /dev/null +++ b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/main.tf @@ -0,0 +1,595 @@ +terraform { + required_version = ">= 0.14.0" + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.17.0" + } + confluent = { + source = "confluentinc/confluent" + version = "2.36.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.1" + } + tls = { + source = "hashicorp/tls" + version = "~> 4.0" + } + } +} + +provider "confluent" { + cloud_api_key = var.confluent_cloud_api_key + cloud_api_secret = var.confluent_cloud_api_secret +} + +provider "aws" { + region = var.region +} + +# Generate random CIDR block for VPC +resource "random_integer" "network_prefix_1" { + min = 0 + max = 255 +} + +resource "random_integer" "network_prefix_2" { + min = 0 + max = 255 +} + +locals { + network_addr_prefix = "10.${random_integer.network_prefix_1.result}.${random_integer.network_prefix_2.result}" + vpc_cidr_block = "${local.network_addr_prefix}.0/24" + + # Calculate subnet CIDRs for 3 availability zones + subnet_cidrs = [ + "${local.network_addr_prefix}.0/26", # i=0: 0/26 + "${local.network_addr_prefix}.64/26", # i=1: 64/26 + "${local.network_addr_prefix}.128/26" # i=2: 128/26 + ] + + topic_name = "orders" + topics_confluent_cloud_url = "https://confluent.cloud/environments/${confluent_environment.staging.id}/clusters/${confluent_kafka_cluster.dedicated.id}/topics?topics_filter=showAll" +} + +# Get available AZs for the region +data "aws_availability_zones" "available" { + state = "available" +} + +resource "aws_vpc" "main" { + cidr_block = local.vpc_cidr_block + enable_dns_hostnames = true + enable_dns_support = true + + tags = { + Name = "confluent-privatelink-vpc" + } +} + +resource "aws_internet_gateway" "main" { + vpc_id = aws_vpc.main.id + + tags = { + Name = "confluent-privatelink-igw" + } +} + +# Create subnets in 3 availability zones +resource "aws_subnet" "main" { + count = 3 + + vpc_id = aws_vpc.main.id + cidr_block = local.subnet_cidrs[count.index] + availability_zone = data.aws_availability_zones.available.names[count.index] + + tags = { + Name = "confluent-privatelink-subnet-${count.index}" + } +} + +# Create route table for public access +resource "aws_route_table" "public" { + vpc_id = aws_vpc.main.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.main.id + } + + tags = { + Name = "confluent-privatelink-rt" + } +} + +# Associate route table with first subnet (for EC2 if needed) +resource "aws_route_table_association" "public" { + subnet_id = aws_subnet.main[0].id + route_table_id = aws_route_table.public.id +} + +# Generate SSH key pair automatically +resource "tls_private_key" "main" { + algorithm = "RSA" + rsa_bits = 2048 +} + +# Create key pair for EC2 access +resource "aws_key_pair" "main" { + key_name = "confluent-privatelink-key" + public_key = tls_private_key.main.public_key_openssh +} + +# Create single security group for demo (both EC2 and PL) +resource "aws_security_group" "main" { + name = "pni-demo-sg-${confluent_environment.staging.id}" + description = "Demo security group for PNI test (EC2 + PL)" + vpc_id = aws_vpc.main.id + + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = [aws_vpc.main.cidr_block] + } + + # SSH access for EC2 + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = var.client_cidr_blocks + description = "SSH access" + } + + # HTTPS access + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = concat(var.client_cidr_blocks, [aws_vpc.main.cidr_block]) + description = "HTTPS access" + } + + # Kafka broker access for ENIs + ingress { + from_port = 9092 + to_port = 9092 + protocol = "tcp" + cidr_blocks = concat(var.client_cidr_blocks, [aws_vpc.main.cidr_block]) + description = "Kafka broker access" + } + + # https://docs.confluent.io/cloud/current/networking/aws-pni.html#update-the-security-group-to-block-outbound-traffic + # SECURITY WARNING: For production deployments, restrict egress to egress = [] to remove the default 0.0.0.0/0 egress rule. + # This demo intentionally uses 0.0.0.0/0 to allow downloading Confluent CLI, Terraform provider, and related dependencies. + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + description = "All outbound traffic" + } + + tags = { + Name = "confluent-privatelink-vpc" + } +} + +# Get Amazon Linux 2023 AMI +data "aws_ami" "amazon_linux_2023" { + most_recent = true + owners = ["amazon"] + + filter { + name = "name" + values = ["al2023-ami-2023.*-x86_64"] + } +} + +# Create EC2 instance for testing +resource "aws_instance" "test" { + ami = data.aws_ami.amazon_linux_2023.id + instance_type = "t2.micro" + key_name = aws_key_pair.main.key_name + vpc_security_group_ids = [aws_security_group.main.id] + subnet_id = aws_subnet.main[0].id + associate_public_ip_address = true + + user_data = <<-EOF +#!/bin/bash +set -e + +yum update -y +# Install nginx and stream module (Amazon Linux 2023 specific) +yum install -y wget yum-utils nginx nginx-mod-stream bind-utils + +# START of setting up https://docs.confluent.io/cloud/current/networking/ccloud-console-access.html#configure-a-proxy +BOOTSTRAP_HOST="${confluent_kafka_cluster.dedicated.bootstrap_endpoint}" + +echo "Setting up NGINX proxy for Confluent Cloud PNI" >> /var/log/user-data.log +echo "Bootstrap host: $BOOTSTRAP_HOST" >> /var/log/user-data.log + +# Step 3: Test NGINX configuration (before we modify it) +echo "Testing initial NGINX configuration..." >> /var/log/user-data.log +nginx -t >> /var/log/user-data.log 2>&1 + +# Step 4: Check if ngx_stream_module.so exists and set MODULE_PATH +echo "Checking for stream module..." >> /var/log/user-data.log +if [ -f /usr/lib64/nginx/modules/ngx_stream_module.so ]; then + MODULE_PATH="/usr/lib64/nginx/modules/ngx_stream_module.so" + echo "Found stream module at: $MODULE_PATH" >> /var/log/user-data.log +elif [ -f /usr/lib/nginx/modules/ngx_stream_module.so ]; then + MODULE_PATH="/usr/lib/nginx/modules/ngx_stream_module.so" + echo "Found stream module at: $MODULE_PATH" >> /var/log/user-data.log +else + echo "ERROR: ngx_stream_module.so not found!" >> /var/log/user-data.log + exit 1 +fi + +# Step 5: Use AWS resolver directly (we know it works on EC2) +RESOLVER="169.254.169.253" +echo "Using AWS resolver: $RESOLVER" >> /var/log/user-data.log + +# Step 6: Update NGINX configuration +cat > /etc/nginx/nginx.conf <> /var/log/user-data.log +if nginx -t >> /var/log/user-data.log 2>&1; then + echo "NGINX configuration test passed" >> /var/log/user-data.log +else + echo "NGINX configuration test failed:" >> /var/log/user-data.log + nginx -t >> /var/log/user-data.log 2>&1 + exit 1 +fi + +# Step 8: Restart NGINX +echo "Restarting NGINX..." >> /var/log/user-data.log +systemctl restart nginx + +# Step 9: Verify NGINX is running +echo "Verifying NGINX status..." >> /var/log/user-data.log +if systemctl is-active --quiet nginx; then + echo "NGINX is running successfully" >> /var/log/user-data.log + systemctl status nginx >> /var/log/user-data.log 2>&1 +else + echo "NGINX failed to start:" >> /var/log/user-data.log + systemctl status nginx >> /var/log/user-data.log 2>&1 + # Check error logs as suggested in Confluent docs + echo "NGINX error log:" >> /var/log/user-data.log + tail -20 /var/log/nginx/error.log >> /var/log/user-data.log 2>&1 + exit 1 +fi + +# Enable NGINX to start on boot +systemctl enable nginx + +# Install Confluent CLI +echo "Installing Confluent CLI..." >> /var/log/user-data.log +mkdir -p /usr/local/bin +curl -sL --http1.1 https://cnfl.io/cli | sh -s -- -b /usr/local/bin + +# Install Terraform +echo "Installing Terraform..." >> /var/log/user-data.log +yum-config-manager --add-repo https://rpm.releases.hashicorp.com/AmazonLinux/hashicorp.repo +yum -y install terraform + +# Verify installations +if /usr/local/bin/confluent version >> /var/log/user-data.log 2>&1; then + echo "Confluent CLI installed successfully" >> /var/log/user-data.log +else + echo "Confluent CLI installation failed" >> /var/log/user-data.log +fi + +if terraform version >> /var/log/user-data.log 2>&1; then + echo "Terraform installed successfully" >> /var/log/user-data.log +else + echo "Terraform installation failed" >> /var/log/user-data.log +fi + +echo "Proxy setup completed successfully!" >> /var/log/user-data.log +echo "You can now test with: nslookup $BOOTSTRAP_HOST $RESOLVER" >> /var/log/user-data.log +EOF + + tags = { + Name = "confluent-privatelink-test" + } +} + +# Create Confluent Environment +resource "confluent_environment" "staging" { + display_name = "Staging" + + stream_governance { + package = "ESSENTIALS" + } +} + +# Create locals for zone mapping +locals { + # Create zone ID to subnet ID mapping for PrivateLink + subnets_to_privatelink = { + for i, subnet in aws_subnet.main : + data.aws_availability_zones.available.zone_ids[i] => subnet.id + } + dns_domain = confluent_network.private-link.dns_domain + bootstrap_prefix = split(".", confluent_kafka_cluster.dedicated.bootstrap_endpoint)[0] +} + +resource "confluent_network" "private-link" { + display_name = "Private Link Network" + cloud = "AWS" + region = var.region + connection_types = ["PRIVATELINK"] + zones = keys(local.subnets_to_privatelink) + environment { + id = confluent_environment.staging.id + } + dns_config { + resolution = "PRIVATE" + } +} + +# Create PrivateLink access +resource "confluent_private_link_access" "aws" { + display_name = "AWS Private Link Access" + aws { + account = var.aws_account_id + } + environment { + id = confluent_environment.staging.id + } + network { + id = confluent_network.private-link.id + } +} + +resource "confluent_kafka_cluster" "dedicated" { + display_name = "inventory" + availability = "MULTI_ZONE" + cloud = confluent_network.private-link.cloud + region = confluent_network.private-link.region + dedicated { + cku = 2 + } + environment { + id = confluent_environment.staging.id + } + network { + id = confluent_network.private-link.id + } +} + +data "confluent_schema_registry_cluster" "essentials" { + environment { + id = confluent_environment.staging.id + } + + depends_on = [ + confluent_kafka_cluster.dedicated + ] +} + +resource "confluent_service_account" "app-manager" { + display_name = "app-manager" + description = "Service account to manage 'inventory' Kafka cluster" +} + +resource "confluent_role_binding" "app-manager-kafka-cluster-admin" { + principal = "User:${confluent_service_account.app-manager.id}" + role_name = "CloudClusterAdmin" + crn_pattern = confluent_kafka_cluster.dedicated.rbac_crn +} + +resource "confluent_api_key" "app-manager-kafka-api-key" { + display_name = "app-manager-kafka-api-key" + description = "Kafka API Key that is owned by 'app-manager' service account" + disable_wait_for_ready = true + owner { + id = confluent_service_account.app-manager.id + api_version = confluent_service_account.app-manager.api_version + kind = confluent_service_account.app-manager.kind + } + + managed_resource { + id = confluent_kafka_cluster.dedicated.id + api_version = confluent_kafka_cluster.dedicated.api_version + kind = confluent_kafka_cluster.dedicated.kind + + environment { + id = confluent_environment.staging.id + } + } + + depends_on = [ + confluent_role_binding.app-manager-kafka-cluster-admin, + confluent_private_link_access.aws, + aws_vpc_endpoint.privatelink, + aws_route53_record.privatelink, + aws_route53_record.privatelink-zonal, + ] +} + +resource "confluent_service_account" "app-consumer" { + display_name = "app-consumer" + description = "Service account to consume from '${local.topic_name}' topic of 'inventory' Kafka cluster" +} + +resource "confluent_api_key" "app-consumer-kafka-api-key" { + display_name = "app-consumer-kafka-api-key" + description = "Kafka API Key that is owned by 'app-consumer' service account" + disable_wait_for_ready = true + owner { + id = confluent_service_account.app-consumer.id + api_version = confluent_service_account.app-consumer.api_version + kind = confluent_service_account.app-consumer.kind + } + + managed_resource { + id = confluent_kafka_cluster.dedicated.id + api_version = confluent_kafka_cluster.dedicated.api_version + kind = confluent_kafka_cluster.dedicated.kind + + environment { + id = confluent_environment.staging.id + } + } + + depends_on = [ + confluent_private_link_access.aws, + aws_vpc_endpoint.privatelink, + aws_route53_record.privatelink, + aws_route53_record.privatelink-zonal, + ] +} + +resource "confluent_service_account" "app-producer" { + display_name = "app-producer" + description = "Service account to produce to '${local.topic_name}' topic of 'inventory' Kafka cluster" +} + +resource "confluent_api_key" "app-producer-kafka-api-key" { + display_name = "app-producer-kafka-api-key" + description = "Kafka API Key that is owned by 'app-producer' service account" + disable_wait_for_ready = true + owner { + id = confluent_service_account.app-producer.id + api_version = confluent_service_account.app-producer.api_version + kind = confluent_service_account.app-producer.kind + } + + managed_resource { + id = confluent_kafka_cluster.dedicated.id + api_version = confluent_kafka_cluster.dedicated.api_version + kind = confluent_kafka_cluster.dedicated.kind + + environment { + id = confluent_environment.staging.id + } + } + + depends_on = [ + confluent_private_link_access.aws, + aws_vpc_endpoint.privatelink, + aws_route53_record.privatelink, + aws_route53_record.privatelink-zonal, + ] +} + +resource "confluent_role_binding" "app-producer-developer-write" { + principal = "User:${confluent_service_account.app-producer.id}" + role_name = "DeveloperWrite" + crn_pattern = "${confluent_kafka_cluster.dedicated.rbac_crn}/kafka=${confluent_kafka_cluster.dedicated.id}/topic=${local.topic_name}" +} + +resource "confluent_role_binding" "app-consumer-developer-read-from-topic" { + principal = "User:${confluent_service_account.app-consumer.id}" + role_name = "DeveloperRead" + crn_pattern = "${confluent_kafka_cluster.dedicated.rbac_crn}/kafka=${confluent_kafka_cluster.dedicated.id}/topic=${local.topic_name}" +} + +resource "confluent_role_binding" "app-consumer-developer-read-from-group" { + principal = "User:${confluent_service_account.app-consumer.id}" + role_name = "DeveloperRead" + crn_pattern = "${confluent_kafka_cluster.dedicated.rbac_crn}/kafka=${confluent_kafka_cluster.dedicated.id}/group=confluent_cli_consumer_*" +} + +# AWS PrivateLink infrastructure +data "aws_availability_zone" "privatelink" { + for_each = local.subnets_to_privatelink + zone_id = each.key +} + +resource "aws_vpc_endpoint" "privatelink" { + vpc_id = aws_vpc.main.id + service_name = confluent_network.private-link.aws[0].private_link_endpoint_service + vpc_endpoint_type = "Interface" + + security_group_ids = [ + aws_security_group.main.id, + ] + + subnet_ids = [for zone, subnet_id in local.subnets_to_privatelink : subnet_id] + private_dns_enabled = false + + depends_on = [ + confluent_private_link_access.aws, + ] +} + +resource "aws_route53_zone" "privatelink" { + name = local.dns_domain + + vpc { + vpc_id = aws_vpc.main.id + } +} + +resource "aws_route53_record" "privatelink" { + count = length(local.subnets_to_privatelink) == 1 ? 0 : 1 + zone_id = aws_route53_zone.privatelink.zone_id + name = "*.${aws_route53_zone.privatelink.name}" + type = "CNAME" + ttl = "60" + records = [ + aws_vpc_endpoint.privatelink.dns_entry[0]["dns_name"] + ] +} + +locals { + endpoint_prefix = split(".", aws_vpc_endpoint.privatelink.dns_entry[0]["dns_name"])[0] +} + +resource "aws_route53_record" "privatelink-zonal" { + for_each = local.subnets_to_privatelink + + zone_id = aws_route53_zone.privatelink.zone_id + name = length(local.subnets_to_privatelink) == 1 ? "*" : "*.${each.key}" + type = "CNAME" + ttl = "60" + records = [ + format("%s-%s%s", + local.endpoint_prefix, + data.aws_availability_zone.privatelink[each.key].name, + replace(aws_vpc_endpoint.privatelink.dns_entry[0]["dns_name"], local.endpoint_prefix, "") + ) + ] +} diff --git a/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/outputs.tf b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/outputs.tf new file mode 100644 index 000000000..1a0acffd0 --- /dev/null +++ b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/outputs.tf @@ -0,0 +1,131 @@ +output "instructions" { + value = <<-EOT + Environment ID: ${confluent_environment.staging.id} + Kafka Cluster ID: ${confluent_kafka_cluster.dedicated.id} + Kafka topic name: ${local.topic_name} + + Service Accounts and their Kafka API Keys (API Keys inherit the permissions granted to the owner): + ${confluent_service_account.app-manager.display_name}: ${confluent_service_account.app-manager.id} + ${confluent_service_account.app-manager.display_name}'s Kafka API Key: "${confluent_api_key.app-manager-kafka-api-key.id}" + ${confluent_service_account.app-manager.display_name}'s Kafka API Secret: "${confluent_api_key.app-manager-kafka-api-key.secret}" + + ${confluent_service_account.app-producer.display_name}: ${confluent_service_account.app-producer.id} + ${confluent_service_account.app-producer.display_name}'s Kafka API Key: "${confluent_api_key.app-producer-kafka-api-key.id}" + ${confluent_service_account.app-producer.display_name}'s Kafka API Secret: "${confluent_api_key.app-producer-kafka-api-key.secret}" + + ${confluent_service_account.app-consumer.display_name}: ${confluent_service_account.app-consumer.id} + ${confluent_service_account.app-consumer.display_name}'s Kafka API Key: "${confluent_api_key.app-consumer-kafka-api-key.id}" + ${confluent_service_account.app-consumer.display_name}'s Kafka API Secret: "${confluent_api_key.app-consumer-kafka-api-key.secret}" + + 🔑 SSH SETUP INSTRUCTIONS: + + 1. First, save your private key: + echo '${tls_private_key.main.private_key_pem}' > ~/.ssh/pni-test-key.pem + + 2. Set correct permissions on the private key: + chmod 600 ~/.ssh/pni-test-key.pem + + 3. Verify the key file permissions and content: + ls -la ~/.ssh/pni-test-key.pem + head -n 1 ~/.ssh/pni-test-key.pem # Should show "-----BEGIN RSA PRIVATE KEY-----" or similar + + 4. Connect to your EC2 instance: + ssh -i ~/.ssh/pni-test-key.pem ec2-user@${aws_instance.test.public_ip} + + 📝 Note: The private key is automatically generated by Terraform. + + 5. Test connectivity (port 443): + curl --request GET \ + --url ${confluent_kafka_cluster.dedicated.rest_endpoint}/kafka/v3/clusters/${confluent_kafka_cluster.dedicated.id}/topics \ + -u "${confluent_api_key.app-manager-kafka-api-key.id}:${confluent_api_key.app-manager-kafka-api-key.secret}" + + 6. Testing installation commands: + + # First, check if user_data script ran successfully + sudo cat /var/log/cloud-init-output.log | tail -20 # Check for any errors during boot + sudo cat /var/log/user-data.log | tail -20 # Check for any errors during boot + + # Verify Confluent CLI installation + confluent version + + # Verify Terraform installation + terraform version + + 🏗️ TERRAFORM SETUP INSTRUCTIONS: + + 1. Create main.tf file on your EC2 instance with the following content: + + cat << 'MAINEOF' > main.tf +terraform { + required_version = ">= 0.14.0" + required_providers { + confluent = { + source = "confluentinc/confluent" + version = "2.32.0" + } + } +} + +provider "confluent" { + kafka_id = "${confluent_kafka_cluster.dedicated.id}" + kafka_rest_endpoint = "${confluent_kafka_cluster.dedicated.rest_endpoint}" + kafka_api_key = "${confluent_api_key.app-manager-kafka-api-key.id}" + kafka_api_secret = "${confluent_api_key.app-manager-kafka-api-key.secret}" +} + +resource "confluent_kafka_topic" "orders" { + topic_name = "${local.topic_name}" + lifecycle { + prevent_destroy = true + } +} +MAINEOF + 2. Initialize and apply Terraform: + terraform init + terraform apply + + + In order to use the Confluent CLI v4 to produce and consume messages from topic '${local.topic_name}' using Kafka API Keys + of ${confluent_service_account.app-producer.display_name} and ${confluent_service_account.app-consumer.display_name} service accounts + run the following commands: + + # 1. Log in to Confluent Cloud + $ confluent login + + # 2. Produce key-value records to topic '${local.topic_name}' by using ${confluent_service_account.app-producer.display_name}'s Kafka API Key + $ confluent kafka topic produce ${local.topic_name} --environment ${confluent_environment.staging.id} --cluster ${confluent_kafka_cluster.dedicated.id} --api-key "${confluent_api_key.app-producer-kafka-api-key.id}" --api-secret "${confluent_api_key.app-producer-kafka-api-key.secret}" --bootstrap "${confluent_kafka_cluster.dedicated.bootstrap_endpoint}" + # Enter a few records and then press 'Ctrl-C' when you're done. + # Sample records: + # {"number":1,"date":18500,"shipping_address":"899 W Evelyn Ave, Mountain View, CA 94041, USA","cost":15.00} + # {"number":2,"date":18501,"shipping_address":"1 Bedford St, London WC2E 9HG, United Kingdom","cost":5.00} + # {"number":3,"date":18502,"shipping_address":"3307 Northland Dr Suite 400, Austin, TX 78731, USA","cost":10.00} + + # 3. Consume records from topic '${local.topic_name}' by using ${confluent_service_account.app-consumer.display_name}'s Kafka API Key + $ confluent kafka topic consume ${local.topic_name} --from-beginning --environment ${confluent_environment.staging.id} --cluster ${confluent_kafka_cluster.dedicated.id} --api-key "${confluent_api_key.app-consumer-kafka-api-key.id}" --api-secret "${confluent_api_key.app-consumer-kafka-api-key.secret}" --bootstrap "${confluent_kafka_cluster.dedicated.bootstrap_endpoint}" + # When you are done, press 'Ctrl-C'. + + 🔑 CONFLUENT CLOUD CONSOLE ACCESS INSTRUCTIONS: + + 1. Exit the EC2 SSH session. + + 2. You should see "Create topic" button grayed out on ${local.topics_confluent_cloud_url}. + + 3. Update the /etc/hosts file on your laptop (the NGINX proxy was set up via Terraform already): + echo "\n${aws_instance.test.public_ip} ${trimprefix(trimsuffix(confluent_kafka_cluster.dedicated.rest_endpoint, ":443"), "https://")}" | sudo tee -a /etc/hosts + + 4. You should now see the "orders" topic on ${local.topics_confluent_cloud_url}. + + 5. (Optional) Alternatively, you can also send a direct cURL request from your laptop to verify the NGINX proxy was set up correctly: + + curl --request GET \ + --url ${confluent_kafka_cluster.dedicated.rest_endpoint}/kafka/v3/clusters/${confluent_kafka_cluster.dedicated.id}/topics \ + -u "${confluent_api_key.app-manager-kafka-api-key.id}:${confluent_api_key.app-manager-kafka-api-key.secret}" + + + 5. For more details, + see https://docs.confluent.io/cloud/current/networking/ccloud-console-access.html#configure-a-proxy for more details. + + EOT + + sensitive = true +} diff --git a/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/variables.tf b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/variables.tf new file mode 100644 index 000000000..bd338ce85 --- /dev/null +++ b/examples/configurations/dedicated-privatelink-aws-kafka-rbac-nginx/variables.tf @@ -0,0 +1,25 @@ +variable "confluent_cloud_api_key" { + description = "Confluent Cloud API Key (also referred as Cloud API ID)" + type = string +} + +variable "confluent_cloud_api_secret" { + description = "Confluent Cloud API Secret" + type = string + sensitive = true +} + +variable "aws_account_id" { + description = "The AWS Account ID (12 digits)" + type = string +} + +variable "region" { + description = "The AWS Region" + type = string +} + +variable "client_cidr_blocks" { + description = "List of client CIDR blocks allowed to access EC2 via SSH" + type = list(string) +}