diff --git a/README.md b/README.md
index 43494f6d4..d4a459282 100644
--- a/README.md
+++ b/README.md
@@ -63,6 +63,7 @@
- `ts#mcp-server` - Add a [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) server to a TypeScript project.
- `ts#strands-agent` - Add a [Strands Agent](https://strandsagents.com/) to a TypeScript project.
- `ts#lambda-function` - Generate a TypeScript lambda function with optional type-safe event sources.
+- `ts#rdb` - Generate a TypeScript relational database project with Aurora and [Prisma](https://www.prisma.io/docs) support.
- `terraform#project` - Generate a new Terraform project.
- `py#project` - Generate a uv based Python project.
- `py#fast-api` - Generate a FastAPI backend service with [AWS Powertools](https://github.com/aws-powertools/powertools-lambda-python) pre-configured.
diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs
index 5b3994e6e..dbd71608c 100644
--- a/docs/astro.config.mjs
+++ b/docs/astro.config.mjs
@@ -307,6 +307,7 @@ export default defineConfig({
items: [
{ label: 'ts#project', link: '/guides/typescript-project' },
{ label: 'ts#infra', link: '/guides/typescript-infrastructure' },
+ { label: 'ts#rdb', link: '/guides/ts-rdb' },
{ label: 'ts#trpc-api', link: '/guides/trpc' },
{ label: 'ts#smithy-api', link: '/guides/ts-smithy-api' },
{
diff --git a/docs/src/content/docs/en/guides/ts-rdb.mdx b/docs/src/content/docs/en/guides/ts-rdb.mdx
new file mode 100644
index 000000000..ce79abe71
--- /dev/null
+++ b/docs/src/content/docs/en/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: Relational Database
+description: Create a relational database project
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+This generator creates a new relational database project backed by [Amazon Aurora](https://aws.amazon.com/rds/aurora/). It generates the application code and infrastructure needed to provision a database using AWS CDK or Terraform, with support for Aurora PostgreSQL and Aurora MySQL.
+
+The generated infrastructure includes a reusable Aurora construct along with an application-specific database construct. For CDK projects, the generated construct also includes a migration handler that runs your database migrations after the cluster is created, helping you bootstrap your schema as part of deployment.
+
+## Usage
+
+### Generate a Relational Database
+
+You can generate a new relational database project in two ways:
+
+
+
+### Options
+
+
+
+## Generator Output
+
+The generator will create the following project structure in the `/` directory:
+
+
+ - lib
+ - prisma.ts Prisma client setup for connecting to the database
+ - prisma
+ - schema.prisma Prisma schema and example model
+ - src
+ - index.ts Project entry point
+ - migration-handler.ts Lambda handler used to run database migrations during deployment
+ - Dockerfile Container image definition for the migration handler
+ - project.json Project configuration and build targets
+ - prisma.config.ts Configuration for Prisma CLI
+ - tsconfig.json Base TypeScript configuration for source and tests
+ - tsconfig.lib.json TypeScript configuration for source cod
+ - tsconfig.spec.json TypeScript configuration for tests
+ - eslint.config.mjs Configuration for ESLint
+
+
+### Infrastructure
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts Infrastructure specific to your database
+ - core
+ - rdb
+ - aurora.ts Generic Aurora database construct
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf Module specific to your database
+ - core
+ - rdb
+ - aurora
+ - aurora.tf Generic Aurora module
+
+
+
+
+## Implementing your Database
+
+### Data Modelling
+
+The generated project uses [Prisma ORM](https://www.prisma.io/docs/orm) for defining your database schema and generating a type-safe client. You can model your tables, relations and field types in `prisma/schema.prisma`, then use Prisma to generate the client and create migrations for your database.
+
+For more details on how to define models with Prisma, refer to the official guide on [data modeling with Prisma ORM](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm).
+
+As an example, a `User` model would be represented as follows in the Prisma schema:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### Data Migration
+
+When generating Prisma migration files, Prisma Migrate requires access to a shadow database. This is used during migration generation to compare your Prisma schema with the current state of the database and determine the SQL changes that need to be created.
+
+For more details on why this is required and how Prisma uses it, refer to the Prisma documentation on [shadow databases](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database).
+
+:::note[Run Prisma from the Package Directory]
+When using the Prisma CLI, run the commands from your generated database package directory, for example `packages/postgres`, so Prisma can find `prisma/schema.prisma` and `prisma.config.ts`.
+:::
+
+To generate migrations locally, you can point Prisma at a local database instance. First export `DATABASE_URL` so Prisma can connect to your database:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+Then create the migration files without applying them:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+Running `prisma migrate dev --create-only` will keep the `prisma/migrations` directory up to date by generating a new migration folder each time your schema changes require a new migration.
+
+Your Prisma directory will then look something like this:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+To apply these migrations to the local database that `DATABASE_URL` is pointing to, you can run:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[Automatic AWS Migrations]
+For deployments to AWS, you do not need to run `prisma migrate deploy` manually. The generated migration handler applies your migrations automatically during deployment, as long as the `prisma/migrations` directory is up to date.
+:::
+
+
+### Generated Database Client
+
+The generator automatically configures the `generate` target to generate the type-safe TypeScript Prisma client whenever you build the project.
+
+The generated client is structured as follows:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+The generated client is used via `lib/prisma.ts`, which configures the appropriate Prisma adapter and exports a ready-to-use `prisma` client instance:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+You can then use the exported `prisma` client in other projects in your workspace, such as a `tRPC` API:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## Deploying your Database
+
+### Infrastructure
+
+The relational database generator creates CDK or Terraform infrastructure as code based on your selected `iacProvider`. You can use this to deploy your database.
+
+
+
+The CDK construct for deploying your database is created in the `common/constructs` folder. You can consume this in a CDK application, for example:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+This provisions an Aurora cluster in your VPC, along with the generated credentials secret and migration handler. The database should typically be deployed into private isolated subnets so that it is not directly accessible from the public internet.
+
+If you need to customise the cluster topology, you can override Aurora cluster properties such as `writer` and `readers` when instantiating the construct.
+
+
+The Terraform module for deploying your database is created in the `common/terraform` folder. You can use this in a Terraform configuration:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+This provisions an Aurora cluster, the generated credentials secret, the migration Lambda function and the supporting resources needed to publish and run the migration container image.
+
+If you need to customise the cluster topology or sizing, you can override module inputs such as `instance_count`, `serverless_min_capacity`, `serverless_max_capacity`, `engine_version` and `port`.
+
+
+
+### Granting Access
+
+To allow your API to connect to the database, deploy the API into the same VPC as the database. You can then provide the database connection string to the API and allow the API handlers to connect to the database port.
+
+:::note[Prisma Environment Variables]
+When using PostgreSQL with Prisma, the generated client expects `DATABASE_URL`.
+
+When using MySQL with Prisma, the generated client expects `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD` and `DATABASE_NAME`, as configured in `lib/prisma.ts`.
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+This ensures that each API Lambda function runs inside the same VPC as the database and is explicitly allowed to connect to the database on its default port.
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+This configures the API to run in the same VPC as the database, passes the generated `DATABASE_URL` to the API and explicitly allows traffic from the API security group to the database security group on the database port.
+
+
+
+## Customising your Database Architecture
+
+The generated database construct exposes the underlying Aurora configuration so you can adapt it to your workload.
+
+### Overriding the Writer
+
+
+
+In CDK, you can override the default writer instance if you want to change how the primary Aurora instance is provisioned:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+In Terraform, Aurora instances are managed through module inputs rather than separate `writer` and `readers` definitions. The generated module always provisions the primary instance for you, and you customise the cluster around it using inputs such as `instance_count`, `engine_version` and the serverless capacity settings:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Adding Readers
+
+
+
+If your workload needs read scaling, you can add reader instances in CDK using the `readers` property:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+In Terraform, the equivalent control is `instance_count`, which determines how many Aurora instances are created:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Configuring Serverless Capacity
+
+You can control Aurora Serverless v2 scaling limits to match your workload.
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Pinning the Engine Version
+
+If you want to use a specific Aurora engine version, you can pin it explicitly rather than relying on the default.
+
+
+
+In CDK, pass `engineVersion` to the generated construct:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+In Terraform, use `engine_version`:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Selecting Subnets and Lifecycle Options
+
+You can also customise where the database is deployed and how it behaves during deletion.
+
+
+
+Use `vpcSubnets` to control which subnets the database is placed into, and lifecycle-related properties such as `deletionProtection` and `removalPolicy` to control deletion behaviour:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+Use `subnet_ids` to control where the database and migration Lambda are deployed.
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+You can use `deletion_protection` to prevent accidental deletion, `skip_final_snapshot` to control snapshot behaviour on deletion, and `port` to override the default engine port.
+
+
diff --git a/docs/src/content/docs/es/guides/ts-rdb.mdx b/docs/src/content/docs/es/guides/ts-rdb.mdx
new file mode 100644
index 000000000..4f7a4f8ea
--- /dev/null
+++ b/docs/src/content/docs/es/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "Base de Datos Relacional"
+description: "Crear un proyecto de base de datos relacional"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+Este generador crea un nuevo proyecto de base de datos relacional respaldado por [Amazon Aurora](https://aws.amazon.com/rds/aurora/). Genera el código de aplicación y la infraestructura necesaria para aprovisionar una base de datos usando AWS CDK o Terraform, con soporte para Aurora PostgreSQL y Aurora MySQL.
+
+La infraestructura generada incluye un constructo Aurora reutilizable junto con un constructo de base de datos específico de la aplicación. Para proyectos CDK, el constructo generado también incluye un manejador de migración que ejecuta las migraciones de tu base de datos después de que se crea el clúster, ayudándote a inicializar tu esquema como parte del despliegue.
+
+## Uso
+
+### Generar una Base de Datos Relacional
+
+Puedes generar un nuevo proyecto de base de datos relacional de dos maneras:
+
+
+
+### Opciones
+
+
+
+## Salida del Generador
+
+El generador creará la siguiente estructura de proyecto en el directorio `/`:
+
+
+ - lib
+ - prisma.ts Configuración del cliente Prisma para conectarse a la base de datos
+ - prisma
+ - schema.prisma Esquema de Prisma y modelo de ejemplo
+ - src
+ - index.ts Punto de entrada del proyecto
+ - migration-handler.ts Manejador Lambda usado para ejecutar migraciones de base de datos durante el despliegue
+ - Dockerfile Definición de imagen de contenedor para el manejador de migración
+ - project.json Configuración del proyecto y objetivos de construcción
+ - prisma.config.ts Configuración para Prisma CLI
+ - tsconfig.json Configuración base de TypeScript para código fuente y pruebas
+ - tsconfig.lib.json Configuración de TypeScript para código fuente
+ - tsconfig.spec.json Configuración de TypeScript para pruebas
+ - eslint.config.mjs Configuración para ESLint
+
+
+### Infraestructura
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts Infraestructura específica para tu base de datos
+ - core
+ - rdb
+ - aurora.ts Constructo genérico de base de datos Aurora
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf Módulo específico para tu base de datos
+ - core
+ - rdb
+ - aurora
+ - aurora.tf Módulo genérico Aurora
+
+
+
+
+## Implementando tu Base de Datos
+
+### Modelado de Datos
+
+El proyecto generado usa [Prisma ORM](https://www.prisma.io/docs/orm) para definir tu esquema de base de datos y generar un cliente con tipos seguros. Puedes modelar tus tablas, relaciones y tipos de campos en `prisma/schema.prisma`, luego usar Prisma para generar el cliente y crear migraciones para tu base de datos.
+
+Para más detalles sobre cómo definir modelos con Prisma, consulta la guía oficial sobre [modelado de datos con Prisma ORM](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm).
+
+Como ejemplo, un modelo `User` se representaría de la siguiente manera en el esquema de Prisma:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### Migración de Datos
+
+Al generar archivos de migración de Prisma, Prisma Migrate requiere acceso a una base de datos sombra. Esta se usa durante la generación de migraciones para comparar tu esquema de Prisma con el estado actual de la base de datos y determinar los cambios SQL que necesitan ser creados.
+
+Para más detalles sobre por qué esto es necesario y cómo Prisma lo usa, consulta la documentación de Prisma sobre [bases de datos sombra](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database).
+
+:::note[Ejecutar Prisma desde el Directorio del Paquete]
+Al usar Prisma CLI, ejecuta los comandos desde el directorio del paquete de base de datos generado, por ejemplo `packages/postgres`, para que Prisma pueda encontrar `prisma/schema.prisma` y `prisma.config.ts`.
+:::
+
+Para generar migraciones localmente, puedes apuntar Prisma a una instancia de base de datos local. Primero exporta `DATABASE_URL` para que Prisma pueda conectarse a tu base de datos:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+Luego crea los archivos de migración sin aplicarlos:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+Ejecutar `prisma migrate dev --create-only` mantendrá actualizado el directorio `prisma/migrations` generando una nueva carpeta de migración cada vez que los cambios en tu esquema requieran una nueva migración.
+
+Tu directorio Prisma se verá entonces algo así:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+Para aplicar estas migraciones a la base de datos local a la que apunta `DATABASE_URL`, puedes ejecutar:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[Migraciones Automáticas en AWS]
+Para despliegues en AWS, no necesitas ejecutar `prisma migrate deploy` manualmente. El manejador de migración generado aplica tus migraciones automáticamente durante el despliegue, siempre que el directorio `prisma/migrations` esté actualizado.
+:::
+
+
+### Cliente de Base de Datos Generado
+
+El generador configura automáticamente el objetivo `generate` para generar el cliente TypeScript de Prisma con tipos seguros cada vez que construyes el proyecto.
+
+El cliente generado está estructurado de la siguiente manera:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+El cliente generado se usa a través de `lib/prisma.ts`, que configura el adaptador de Prisma apropiado y exporta una instancia del cliente `prisma` lista para usar:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+Luego puedes usar el cliente `prisma` exportado en otros proyectos en tu espacio de trabajo, como una API `tRPC`:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## Desplegando tu Base de Datos
+
+### Infraestructura
+
+El generador de base de datos relacional crea infraestructura como código CDK o Terraform basándose en tu `iacProvider` seleccionado. Puedes usar esto para desplegar tu base de datos.
+
+
+
+El constructo CDK para desplegar tu base de datos se crea en la carpeta `common/constructs`. Puedes consumir esto en una aplicación CDK, por ejemplo:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+Esto aprovisiona un clúster Aurora en tu VPC, junto con el secreto de credenciales generado y el manejador de migración. La base de datos típicamente debería desplegarse en subredes privadas aisladas para que no sea directamente accesible desde internet público.
+
+Si necesitas personalizar la topología del clúster, puedes sobrescribir propiedades del clúster Aurora como `writer` y `readers` al instanciar el constructo.
+
+
+El módulo Terraform para desplegar tu base de datos se crea en la carpeta `common/terraform`. Puedes usar esto en una configuración de Terraform:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+Esto aprovisiona un clúster Aurora, el secreto de credenciales generado, la función Lambda de migración y los recursos de soporte necesarios para publicar y ejecutar la imagen de contenedor de migración.
+
+Si necesitas personalizar la topología o el tamaño del clúster, puedes sobrescribir entradas del módulo como `instance_count`, `serverless_min_capacity`, `serverless_max_capacity`, `engine_version` y `port`.
+
+
+
+### Otorgando Acceso
+
+Para permitir que tu API se conecte a la base de datos, despliega la API en la misma VPC que la base de datos. Luego puedes proporcionar la cadena de conexión de la base de datos a la API y permitir que los manejadores de la API se conecten al puerto de la base de datos.
+
+:::note[Variables de Entorno de Prisma]
+Al usar PostgreSQL con Prisma, el cliente generado espera `DATABASE_URL`.
+
+Al usar MySQL con Prisma, el cliente generado espera `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD` y `DATABASE_NAME`, como se configura en `lib/prisma.ts`.
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+Esto asegura que cada función Lambda de la API se ejecute dentro de la misma VPC que la base de datos y tenga permiso explícito para conectarse a la base de datos en su puerto predeterminado.
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+Esto configura la API para ejecutarse en la misma VPC que la base de datos, pasa el `DATABASE_URL` generado a la API y permite explícitamente el tráfico del grupo de seguridad de la API al grupo de seguridad de la base de datos en el puerto de la base de datos.
+
+
+
+## Personalizando la Arquitectura de tu Base de Datos
+
+El constructo de base de datos generado expone la configuración subyacente de Aurora para que puedas adaptarla a tu carga de trabajo.
+
+### Sobrescribiendo el Writer
+
+
+
+En CDK, puedes sobrescribir la instancia writer predeterminada si deseas cambiar cómo se aprovisiona la instancia Aurora principal:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+En Terraform, las instancias Aurora se gestionan a través de entradas del módulo en lugar de definiciones separadas de `writer` y `readers`. El módulo generado siempre aprovisiona la instancia principal para ti, y personalizas el clúster alrededor de ella usando entradas como `instance_count`, `engine_version` y la configuración de capacidad serverless:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Agregando Readers
+
+
+
+Si tu carga de trabajo necesita escalado de lectura, puedes agregar instancias reader en CDK usando la propiedad `readers`:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+En Terraform, el control equivalente es `instance_count`, que determina cuántas instancias Aurora se crean:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Configurando la Capacidad Serverless
+
+Puedes controlar los límites de escalado de Aurora Serverless v2 para que coincidan con tu carga de trabajo.
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Fijando la Versión del Motor
+
+Si deseas usar una versión específica del motor Aurora, puedes fijarla explícitamente en lugar de depender del valor predeterminado.
+
+
+
+En CDK, pasa `engineVersion` al constructo generado:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+En Terraform, usa `engine_version`:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Seleccionando Subredes y Opciones de Ciclo de Vida
+
+También puedes personalizar dónde se despliega la base de datos y cómo se comporta durante la eliminación.
+
+
+
+Usa `vpcSubnets` para controlar en qué subredes se coloca la base de datos, y propiedades relacionadas con el ciclo de vida como `deletionProtection` y `removalPolicy` para controlar el comportamiento de eliminación:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+Usa `subnet_ids` para controlar dónde se despliegan la base de datos y la Lambda de migración.
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+Puedes usar `deletion_protection` para prevenir eliminación accidental, `skip_final_snapshot` para controlar el comportamiento de snapshot al eliminar, y `port` para sobrescribir el puerto predeterminado del motor.
+
+
\ No newline at end of file
diff --git a/docs/src/content/docs/fr/guides/ts-rdb.mdx b/docs/src/content/docs/fr/guides/ts-rdb.mdx
new file mode 100644
index 000000000..f0bd69f04
--- /dev/null
+++ b/docs/src/content/docs/fr/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "Base de données relationnelle"
+description: "Créer un projet de base de données relationnelle"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+Ce générateur crée un nouveau projet de base de données relationnelle basé sur [Amazon Aurora](https://aws.amazon.com/rds/aurora/). Il génère le code d'application et l'infrastructure nécessaires pour provisionner une base de données en utilisant AWS CDK ou Terraform, avec prise en charge d'Aurora PostgreSQL et Aurora MySQL.
+
+L'infrastructure générée comprend une construction Aurora réutilisable ainsi qu'une construction de base de données spécifique à l'application. Pour les projets CDK, la construction générée inclut également un gestionnaire de migration qui exécute vos migrations de base de données après la création du cluster, vous aidant à initialiser votre schéma dans le cadre du déploiement.
+
+## Utilisation
+
+### Générer une base de données relationnelle
+
+Vous pouvez générer un nouveau projet de base de données relationnelle de deux manières :
+
+
+
+### Options
+
+
+
+## Sortie du générateur
+
+Le générateur créera la structure de projet suivante dans le répertoire `/` :
+
+
+ - lib
+ - prisma.ts Configuration du client Prisma pour se connecter à la base de données
+ - prisma
+ - schema.prisma Schéma Prisma et modèle d'exemple
+ - src
+ - index.ts Point d'entrée du projet
+ - migration-handler.ts Gestionnaire Lambda utilisé pour exécuter les migrations de base de données pendant le déploiement
+ - Dockerfile Définition de l'image conteneur pour le gestionnaire de migration
+ - project.json Configuration du projet et cibles de build
+ - prisma.config.ts Configuration pour Prisma CLI
+ - tsconfig.json Configuration TypeScript de base pour les sources et les tests
+ - tsconfig.lib.json Configuration TypeScript pour le code source
+ - tsconfig.spec.json Configuration TypeScript pour les tests
+ - eslint.config.mjs Configuration pour ESLint
+
+
+### Infrastructure
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts Infrastructure spécifique à votre base de données
+ - core
+ - rdb
+ - aurora.ts Construction de base de données Aurora générique
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf Module spécifique à votre base de données
+ - core
+ - rdb
+ - aurora
+ - aurora.tf Module Aurora générique
+
+
+
+
+## Implémenter votre base de données
+
+### Modélisation des données
+
+Le projet généré utilise [Prisma ORM](https://www.prisma.io/docs/orm) pour définir votre schéma de base de données et générer un client type-safe. Vous pouvez modéliser vos tables, relations et types de champs dans `prisma/schema.prisma`, puis utiliser Prisma pour générer le client et créer des migrations pour votre base de données.
+
+Pour plus de détails sur la façon de définir des modèles avec Prisma, consultez le guide officiel sur la [modélisation des données avec Prisma ORM](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm).
+
+À titre d'exemple, un modèle `User` serait représenté comme suit dans le schéma Prisma :
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### Migration des données
+
+Lors de la génération de fichiers de migration Prisma, Prisma Migrate nécessite l'accès à une base de données fantôme (shadow database). Celle-ci est utilisée pendant la génération de migration pour comparer votre schéma Prisma avec l'état actuel de la base de données et déterminer les modifications SQL qui doivent être créées.
+
+Pour plus de détails sur les raisons de cette exigence et la façon dont Prisma l'utilise, consultez la documentation Prisma sur les [bases de données fantômes](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database).
+
+:::note[Exécuter Prisma depuis le répertoire du package]
+Lors de l'utilisation de la CLI Prisma, exécutez les commandes depuis le répertoire de votre package de base de données généré, par exemple `packages/postgres`, afin que Prisma puisse trouver `prisma/schema.prisma` et `prisma.config.ts`.
+:::
+
+Pour générer des migrations localement, vous pouvez pointer Prisma vers une instance de base de données locale. Exportez d'abord `DATABASE_URL` pour que Prisma puisse se connecter à votre base de données :
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+Ensuite, créez les fichiers de migration sans les appliquer :
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+L'exécution de `prisma migrate dev --create-only` maintiendra le répertoire `prisma/migrations` à jour en générant un nouveau dossier de migration chaque fois que vos modifications de schéma nécessitent une nouvelle migration.
+
+Votre répertoire Prisma ressemblera alors à ceci :
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+Pour appliquer ces migrations à la base de données locale vers laquelle `DATABASE_URL` pointe, vous pouvez exécuter :
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[Migrations AWS automatiques]
+Pour les déploiements sur AWS, vous n'avez pas besoin d'exécuter `prisma migrate deploy` manuellement. Le gestionnaire de migration généré applique vos migrations automatiquement pendant le déploiement, tant que le répertoire `prisma/migrations` est à jour.
+:::
+
+
+### Client de base de données généré
+
+Le générateur configure automatiquement la cible `generate` pour générer le client Prisma TypeScript type-safe chaque fois que vous construisez le projet.
+
+Le client généré est structuré comme suit :
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+Le client généré est utilisé via `lib/prisma.ts`, qui configure l'adaptateur Prisma approprié et exporte une instance de client `prisma` prête à l'emploi :
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+Vous pouvez ensuite utiliser le client `prisma` exporté dans d'autres projets de votre espace de travail, comme une API `tRPC` :
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## Déployer votre base de données
+
+### Infrastructure
+
+Le générateur de base de données relationnelle crée une infrastructure en tant que code CDK ou Terraform en fonction de votre `iacProvider` sélectionné. Vous pouvez l'utiliser pour déployer votre base de données.
+
+
+
+La construction CDK pour déployer votre base de données est créée dans le dossier `common/constructs`. Vous pouvez l'utiliser dans une application CDK, par exemple :
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+Cela provisionne un cluster Aurora dans votre VPC, ainsi que le secret d'identifiants généré et le gestionnaire de migration. La base de données devrait généralement être déployée dans des sous-réseaux privés isolés afin qu'elle ne soit pas directement accessible depuis l'internet public.
+
+Si vous devez personnaliser la topologie du cluster, vous pouvez remplacer les propriétés du cluster Aurora telles que `writer` et `readers` lors de l'instanciation de la construction.
+
+
+Le module Terraform pour déployer votre base de données est créé dans le dossier `common/terraform`. Vous pouvez l'utiliser dans une configuration Terraform :
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+Cela provisionne un cluster Aurora, le secret d'identifiants généré, la fonction Lambda de migration et les ressources de support nécessaires pour publier et exécuter l'image conteneur de migration.
+
+Si vous devez personnaliser la topologie ou la taille du cluster, vous pouvez remplacer les entrées du module telles que `instance_count`, `serverless_min_capacity`, `serverless_max_capacity`, `engine_version` et `port`.
+
+
+
+### Accorder l'accès
+
+Pour permettre à votre API de se connecter à la base de données, déployez l'API dans le même VPC que la base de données. Vous pouvez ensuite fournir la chaîne de connexion de la base de données à l'API et autoriser les gestionnaires d'API à se connecter au port de la base de données.
+
+:::note[Variables d'environnement Prisma]
+Lors de l'utilisation de PostgreSQL avec Prisma, le client généré attend `DATABASE_URL`.
+
+Lors de l'utilisation de MySQL avec Prisma, le client généré attend `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD` et `DATABASE_NAME`, comme configuré dans `lib/prisma.ts`.
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+Cela garantit que chaque fonction Lambda d'API s'exécute à l'intérieur du même VPC que la base de données et est explicitement autorisée à se connecter à la base de données sur son port par défaut.
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+Cela configure l'API pour qu'elle s'exécute dans le même VPC que la base de données, transmet le `DATABASE_URL` généré à l'API et autorise explicitement le trafic du groupe de sécurité de l'API vers le groupe de sécurité de la base de données sur le port de la base de données.
+
+
+
+## Personnaliser l'architecture de votre base de données
+
+La construction de base de données générée expose la configuration Aurora sous-jacente afin que vous puissiez l'adapter à votre charge de travail.
+
+### Remplacer le Writer
+
+
+
+Dans CDK, vous pouvez remplacer l'instance writer par défaut si vous souhaitez modifier la façon dont l'instance Aurora principale est provisionnée :
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+Dans Terraform, les instances Aurora sont gérées via les entrées du module plutôt que par des définitions `writer` et `readers` séparées. Le module généré provisionne toujours l'instance principale pour vous, et vous personnalisez le cluster autour de celle-ci en utilisant des entrées telles que `instance_count`, `engine_version` et les paramètres de capacité serverless :
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Ajouter des Readers
+
+
+
+Si votre charge de travail nécessite une mise à l'échelle en lecture, vous pouvez ajouter des instances reader dans CDK en utilisant la propriété `readers` :
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+Dans Terraform, le contrôle équivalent est `instance_count`, qui détermine combien d'instances Aurora sont créées :
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Configurer la capacité Serverless
+
+Vous pouvez contrôler les limites de mise à l'échelle d'Aurora Serverless v2 pour correspondre à votre charge de travail.
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Épingler la version du moteur
+
+Si vous souhaitez utiliser une version spécifique du moteur Aurora, vous pouvez l'épingler explicitement plutôt que de vous fier à la valeur par défaut.
+
+
+
+Dans CDK, passez `engineVersion` à la construction générée :
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+Dans Terraform, utilisez `engine_version` :
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Sélectionner les sous-réseaux et les options de cycle de vie
+
+Vous pouvez également personnaliser l'emplacement de déploiement de la base de données et son comportement lors de la suppression.
+
+
+
+Utilisez `vpcSubnets` pour contrôler dans quels sous-réseaux la base de données est placée, et les propriétés liées au cycle de vie telles que `deletionProtection` et `removalPolicy` pour contrôler le comportement de suppression :
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+Utilisez `subnet_ids` pour contrôler où la base de données et la Lambda de migration sont déployées.
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+Vous pouvez utiliser `deletion_protection` pour empêcher la suppression accidentelle, `skip_final_snapshot` pour contrôler le comportement des snapshots lors de la suppression, et `port` pour remplacer le port du moteur par défaut.
+
+
\ No newline at end of file
diff --git a/docs/src/content/docs/it/guides/ts-rdb.mdx b/docs/src/content/docs/it/guides/ts-rdb.mdx
new file mode 100644
index 000000000..b373d6695
--- /dev/null
+++ b/docs/src/content/docs/it/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "Database Relazionale"
+description: "Crea un progetto di database relazionale"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+Questo generatore crea un nuovo progetto di database relazionale basato su [Amazon Aurora](https://aws.amazon.com/rds/aurora/). Genera il codice applicativo e l'infrastruttura necessaria per effettuare il provisioning di un database utilizzando AWS CDK o Terraform, con supporto per Aurora PostgreSQL e Aurora MySQL.
+
+L'infrastruttura generata include un costrutto Aurora riutilizzabile insieme a un costrutto di database specifico per l'applicazione. Per i progetti CDK, il costrutto generato include anche un gestore delle migrazioni che esegue le migrazioni del database dopo la creazione del cluster, aiutandoti a inizializzare il tuo schema come parte del deployment.
+
+## Utilizzo
+
+### Generare un Database Relazionale
+
+Puoi generare un nuovo progetto di database relazionale in due modi:
+
+
+
+### Opzioni
+
+
+
+## Output del Generatore
+
+Il generatore creerà la seguente struttura di progetto nella directory `/`:
+
+
+ - lib
+ - prisma.ts Configurazione del client Prisma per la connessione al database
+ - prisma
+ - schema.prisma Schema Prisma e modello di esempio
+ - src
+ - index.ts Punto di ingresso del progetto
+ - migration-handler.ts Gestore Lambda utilizzato per eseguire le migrazioni del database durante il deployment
+ - Dockerfile Definizione dell'immagine container per il gestore delle migrazioni
+ - project.json Configurazione del progetto e target di build
+ - prisma.config.ts Configurazione per la CLI di Prisma
+ - tsconfig.json Configurazione TypeScript di base per sorgenti e test
+ - tsconfig.lib.json Configurazione TypeScript per il codice sorgente
+ - tsconfig.spec.json Configurazione TypeScript per i test
+ - eslint.config.mjs Configurazione per ESLint
+
+
+### Infrastruttura
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts Infrastruttura specifica per il tuo database
+ - core
+ - rdb
+ - aurora.ts Costrutto generico del database Aurora
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf Modulo specifico per il tuo database
+ - core
+ - rdb
+ - aurora
+ - aurora.tf Modulo generico Aurora
+
+
+
+
+## Implementazione del Database
+
+### Modellazione dei Dati
+
+Il progetto generato utilizza [Prisma ORM](https://www.prisma.io/docs/orm) per definire lo schema del database e generare un client type-safe. Puoi modellare le tue tabelle, relazioni e tipi di campo in `prisma/schema.prisma`, quindi utilizzare Prisma per generare il client e creare migrazioni per il tuo database.
+
+Per maggiori dettagli su come definire i modelli con Prisma, consulta la guida ufficiale sulla [modellazione dei dati con Prisma ORM](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm).
+
+Come esempio, un modello `User` sarebbe rappresentato come segue nello schema Prisma:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### Migrazione dei Dati
+
+Quando si generano i file di migrazione Prisma, Prisma Migrate richiede l'accesso a un database shadow. Questo viene utilizzato durante la generazione della migrazione per confrontare il tuo schema Prisma con lo stato attuale del database e determinare le modifiche SQL che devono essere create.
+
+Per maggiori dettagli sul perché questo è richiesto e su come Prisma lo utilizza, consulta la documentazione di Prisma sui [database shadow](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database).
+
+:::note[Esegui Prisma dalla Directory del Package]
+Quando utilizzi la CLI di Prisma, esegui i comandi dalla directory del package del database generato, ad esempio `packages/postgres`, in modo che Prisma possa trovare `prisma/schema.prisma` e `prisma.config.ts`.
+:::
+
+Per generare le migrazioni localmente, puoi puntare Prisma a un'istanza di database locale. Prima esporta `DATABASE_URL` in modo che Prisma possa connettersi al tuo database:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+Quindi crea i file di migrazione senza applicarli:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+Eseguire `prisma migrate dev --create-only` manterrà aggiornata la directory `prisma/migrations` generando una nuova cartella di migrazione ogni volta che le modifiche allo schema richiedono una nuova migrazione.
+
+La tua directory Prisma apparirà quindi così:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+Per applicare queste migrazioni al database locale a cui punta `DATABASE_URL`, puoi eseguire:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[Migrazioni AWS Automatiche]
+Per i deployment su AWS, non è necessario eseguire `prisma migrate deploy` manualmente. Il gestore delle migrazioni generato applica automaticamente le tue migrazioni durante il deployment, purché la directory `prisma/migrations` sia aggiornata.
+:::
+
+
+### Client Database Generato
+
+Il generatore configura automaticamente il target `generate` per generare il client Prisma TypeScript type-safe ogni volta che costruisci il progetto.
+
+Il client generato è strutturato come segue:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+Il client generato viene utilizzato tramite `lib/prisma.ts`, che configura l'adattatore Prisma appropriato ed esporta un'istanza del client `prisma` pronta all'uso:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+Puoi quindi utilizzare il client `prisma` esportato in altri progetti nel tuo workspace, come un'API `tRPC`:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## Deployment del Database
+
+### Infrastruttura
+
+Il generatore di database relazionale crea infrastruttura come codice CDK o Terraform in base al tuo `iacProvider` selezionato. Puoi utilizzarlo per effettuare il deployment del tuo database.
+
+
+
+Il costrutto CDK per il deployment del tuo database viene creato nella cartella `common/constructs`. Puoi utilizzarlo in un'applicazione CDK, ad esempio:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+Questo effettua il provisioning di un cluster Aurora nel tuo VPC, insieme al secret delle credenziali generato e al gestore delle migrazioni. Il database dovrebbe tipicamente essere deployato in subnet isolate private in modo che non sia direttamente accessibile da Internet pubblico.
+
+Se hai bisogno di personalizzare la topologia del cluster, puoi sovrascrivere le proprietà del cluster Aurora come `writer` e `readers` durante l'istanziazione del costrutto.
+
+
+Il modulo Terraform per il deployment del tuo database viene creato nella cartella `common/terraform`. Puoi utilizzarlo in una configurazione Terraform:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+Questo effettua il provisioning di un cluster Aurora, del secret delle credenziali generato, della funzione Lambda di migrazione e delle risorse di supporto necessarie per pubblicare ed eseguire l'immagine container di migrazione.
+
+Se hai bisogno di personalizzare la topologia o il dimensionamento del cluster, puoi sovrascrivere gli input del modulo come `instance_count`, `serverless_min_capacity`, `serverless_max_capacity`, `engine_version` e `port`.
+
+
+
+### Concessione dell'Accesso
+
+Per consentire alla tua API di connettersi al database, deploya l'API nello stesso VPC del database. Puoi quindi fornire la stringa di connessione del database all'API e consentire ai gestori dell'API di connettersi alla porta del database.
+
+:::note[Variabili d'Ambiente Prisma]
+Quando si utilizza PostgreSQL con Prisma, il client generato si aspetta `DATABASE_URL`.
+
+Quando si utilizza MySQL con Prisma, il client generato si aspetta `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD` e `DATABASE_NAME`, come configurato in `lib/prisma.ts`.
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+Questo garantisce che ogni funzione Lambda dell'API venga eseguita all'interno dello stesso VPC del database e sia esplicitamente autorizzata a connettersi al database sulla sua porta predefinita.
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+Questo configura l'API per essere eseguita nello stesso VPC del database, passa il `DATABASE_URL` generato all'API e consente esplicitamente il traffico dal security group dell'API al security group del database sulla porta del database.
+
+
+
+## Personalizzazione dell'Architettura del Database
+
+Il costrutto del database generato espone la configurazione Aurora sottostante in modo da poterla adattare al tuo carico di lavoro.
+
+### Override del Writer
+
+
+
+In CDK, puoi sovrascrivere l'istanza writer predefinita se desideri modificare il modo in cui viene effettuato il provisioning dell'istanza Aurora primaria:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+In Terraform, le istanze Aurora sono gestite tramite input del modulo piuttosto che definizioni separate di `writer` e `readers`. Il modulo generato effettua sempre il provisioning dell'istanza primaria per te, e personalizzi il cluster attorno ad essa utilizzando input come `instance_count`, `engine_version` e le impostazioni di capacità serverless:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Aggiunta di Reader
+
+
+
+Se il tuo carico di lavoro richiede scalabilità in lettura, puoi aggiungere istanze reader in CDK utilizzando la proprietà `readers`:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+In Terraform, il controllo equivalente è `instance_count`, che determina quante istanze Aurora vengono create:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Configurazione della Capacità Serverless
+
+Puoi controllare i limiti di scaling di Aurora Serverless v2 per adattarli al tuo carico di lavoro.
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Fissaggio della Versione del Motore
+
+Se desideri utilizzare una versione specifica del motore Aurora, puoi fissarla esplicitamente invece di affidarti a quella predefinita.
+
+
+
+In CDK, passa `engineVersion` al costrutto generato:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+In Terraform, utilizza `engine_version`:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Selezione di Subnet e Opzioni del Ciclo di Vita
+
+Puoi anche personalizzare dove viene deployato il database e come si comporta durante l'eliminazione.
+
+
+
+Utilizza `vpcSubnets` per controllare in quali subnet viene posizionato il database, e proprietà relative al ciclo di vita come `deletionProtection` e `removalPolicy` per controllare il comportamento di eliminazione:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+Utilizza `subnet_ids` per controllare dove vengono deployati il database e la Lambda di migrazione.
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+Puoi utilizzare `deletion_protection` per prevenire l'eliminazione accidentale, `skip_final_snapshot` per controllare il comportamento degli snapshot durante l'eliminazione, e `port` per sovrascrivere la porta predefinita del motore.
+
+
\ No newline at end of file
diff --git a/docs/src/content/docs/jp/guides/ts-rdb.mdx b/docs/src/content/docs/jp/guides/ts-rdb.mdx
new file mode 100644
index 000000000..be4b7e642
--- /dev/null
+++ b/docs/src/content/docs/jp/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "リレーショナルデータベース"
+description: "リレーショナルデータベースプロジェクトを作成する"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+このジェネレーターは、[Amazon Aurora](https://aws.amazon.com/rds/aurora/)を基盤とする新しいリレーショナルデータベースプロジェクトを作成します。AWS CDKまたはTerraformを使用してデータベースをプロビジョニングするために必要なアプリケーションコードとインフラストラクチャを生成し、Aurora PostgreSQLとAurora MySQLをサポートします。
+
+生成されるインフラストラクチャには、再利用可能なAuroraコンストラクトと、アプリケーション固有のデータベースコンストラクトが含まれます。CDKプロジェクトの場合、生成されたコンストラクトには、クラスター作成後にデータベースマイグレーションを実行するマイグレーションハンドラーも含まれており、デプロイの一部としてスキーマをブートストラップするのに役立ちます。
+
+## 使用方法
+
+### リレーショナルデータベースの生成
+
+新しいリレーショナルデータベースプロジェクトは、2つの方法で生成できます:
+
+
+
+### オプション
+
+
+
+## ジェネレーターの出力
+
+ジェネレーターは、`/`ディレクトリに以下のプロジェクト構造を作成します:
+
+
+ - lib
+ - prisma.ts データベースに接続するためのPrismaクライアントのセットアップ
+ - prisma
+ - schema.prisma Prismaスキーマとサンプルモデル
+ - src
+ - index.ts プロジェクトのエントリーポイント
+ - migration-handler.ts デプロイ時にデータベースマイグレーションを実行するために使用されるLambdaハンドラー
+ - Dockerfile マイグレーションハンドラーのコンテナイメージ定義
+ - project.json プロジェクト設定とビルドターゲット
+ - prisma.config.ts Prisma CLIの設定
+ - tsconfig.json ソースとテストのベースTypeScript設定
+ - tsconfig.lib.json ソースコードのTypeScript設定
+ - tsconfig.spec.json テストのTypeScript設定
+ - eslint.config.mjs ESLintの設定
+
+
+### インフラストラクチャ
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts データベース固有のインフラストラクチャ
+ - core
+ - rdb
+ - aurora.ts 汎用Auroraデータベースコンストラクト
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf データベース固有のモジュール
+ - core
+ - rdb
+ - aurora
+ - aurora.tf 汎用Auroraモジュール
+
+
+
+
+## データベースの実装
+
+### データモデリング
+
+生成されたプロジェクトは、データベーススキーマの定義と型安全なクライアントの生成に[Prisma ORM](https://www.prisma.io/docs/orm)を使用します。`prisma/schema.prisma`でテーブル、リレーション、フィールドタイプをモデル化し、Prismaを使用してクライアントを生成し、データベースのマイグレーションを作成できます。
+
+Prismaでモデルを定義する方法の詳細については、[Prisma ORMによるデータモデリング](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm)の公式ガイドを参照してください。
+
+例として、`User`モデルは、Prismaスキーマで次のように表現されます:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### データマイグレーション
+
+Prismaマイグレーションファイルを生成する際、Prisma Migrateはシャドウデータベースへのアクセスを必要とします。これは、マイグレーション生成中にPrismaスキーマをデータベースの現在の状態と比較し、作成する必要があるSQL変更を決定するために使用されます。
+
+これが必要な理由とPrismaがどのように使用するかの詳細については、[シャドウデータベース](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database)に関するPrismaドキュメントを参照してください。
+
+:::note[パッケージディレクトリからPrismaを実行]
+Prisma CLIを使用する場合は、生成されたデータベースパッケージディレクトリ(例: `packages/postgres`)からコマンドを実行してください。そうすることで、Prismaが`prisma/schema.prisma`と`prisma.config.ts`を見つけることができます。
+:::
+
+ローカルでマイグレーションを生成するには、Prismaをローカルデータベースインスタンスに向けることができます。まず、Prismaがデータベースに接続できるように`DATABASE_URL`をエクスポートします:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+次に、マイグレーションファイルを適用せずに作成します:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+`prisma migrate dev --create-only`を実行すると、スキーマの変更が新しいマイグレーションを必要とするたびに新しいマイグレーションフォルダを生成することで、`prisma/migrations`ディレクトリを最新の状態に保ちます。
+
+Prismaディレクトリは次のようになります:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+これらのマイグレーションを`DATABASE_URL`が指しているローカルデータベースに適用するには、次を実行できます:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[AWSでの自動マイグレーション]
+AWSへのデプロイの場合、`prisma migrate deploy`を手動で実行する必要はありません。生成されたマイグレーションハンドラーは、`prisma/migrations`ディレクトリが最新である限り、デプロイ中にマイグレーションを自動的に適用します。
+:::
+
+
+### 生成されたデータベースクライアント
+
+ジェネレーターは、プロジェクトをビルドするたびに型安全なTypeScript Prismaクライアントを生成するように`generate`ターゲットを自動的に構成します。
+
+生成されたクライアントは次のように構造化されています:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+生成されたクライアントは`lib/prisma.ts`を介して使用され、適切なPrismaアダプターを構成し、すぐに使用できる`prisma`クライアントインスタンスをエクスポートします:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+その後、エクスポートされた`prisma`クライアントをワークスペース内の他のプロジェクト(例: `tRPC` API)で使用できます:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## データベースのデプロイ
+
+### インフラストラクチャ
+
+リレーショナルデータベースジェネレーターは、選択した`iacProvider`に基づいてCDKまたはTerraformのInfrastructure as Codeを作成します。これを使用してデータベースをデプロイできます。
+
+
+
+データベースをデプロイするためのCDKコンストラクトは、`common/constructs`フォルダに作成されます。これをCDKアプリケーションで使用できます。例:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+これにより、VPC内にAuroraクラスターが、生成された認証情報シークレットとマイグレーションハンドラーと共にプロビジョニングされます。データベースは通常、パブリックインターネットから直接アクセスできないように、プライベート分離サブネットにデプロイする必要があります。
+
+クラスタートポロジをカスタマイズする必要がある場合は、コンストラクトをインスタンス化する際に`writer`や`readers`などのAuroraクラスタープロパティをオーバーライドできます。
+
+
+データベースをデプロイするためのTerraformモジュールは、`common/terraform`フォルダに作成されます。これをTerraform構成で使用できます:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+これにより、Auroraクラスター、生成された認証情報シークレット、マイグレーションLambda関数、およびマイグレーションコンテナイメージの公開と実行に必要なサポートリソースがプロビジョニングされます。
+
+クラスタートポロジまたはサイジングをカスタマイズする必要がある場合は、`instance_count`、`serverless_min_capacity`、`serverless_max_capacity`、`engine_version`、`port`などのモジュール入力をオーバーライドできます。
+
+
+
+### アクセスの許可
+
+APIがデータベースに接続できるようにするには、APIをデータベースと同じVPCにデプロイします。その後、データベース接続文字列をAPIに提供し、APIハンドラーがデータベースポートに接続できるようにします。
+
+:::note[Prisma環境変数]
+PrismaでPostgreSQLを使用する場合、生成されたクライアントは`DATABASE_URL`を期待します。
+
+PrismaでMySQLを使用する場合、生成されたクライアントは`DATABASE_HOST`、`DATABASE_USER`、`DATABASE_PASSWORD`、`DATABASE_NAME`を期待します。これは`lib/prisma.ts`で構成されています。
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+これにより、各API Lambda関数がデータベースと同じVPC内で実行され、デフォルトポートでデータベースに接続することが明示的に許可されます。
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+これにより、APIがデータベースと同じVPCで実行されるように構成され、生成された`DATABASE_URL`がAPIに渡され、データベースポート上でAPIセキュリティグループからデータベースセキュリティグループへのトラフィックが明示的に許可されます。
+
+
+
+## データベースアーキテクチャのカスタマイズ
+
+生成されたデータベースコンストラクトは、基盤となるAurora構成を公開しているため、ワークロードに適応させることができます。
+
+### Writerのオーバーライド
+
+
+
+CDKでは、プライマリAuroraインスタンスのプロビジョニング方法を変更したい場合、デフォルトのwriterインスタンスをオーバーライドできます:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+Terraformでは、Auroraインスタンスは個別の`writer`と`readers`の定義ではなく、モジュール入力を通じて管理されます。生成されたモジュールは常にプライマリインスタンスをプロビジョニングし、`instance_count`、`engine_version`、サーバーレスキャパシティ設定などの入力を使用してクラスターをカスタマイズします:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Readerの追加
+
+
+
+ワークロードに読み取りスケーリングが必要な場合、CDKで`readers`プロパティを使用してreaderインスタンスを追加できます:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+Terraformでは、同等の制御は`instance_count`で、作成されるAuroraインスタンスの数を決定します:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### サーバーレスキャパシティの構成
+
+ワークロードに合わせてAurora Serverless v2のスケーリング制限を制御できます。
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### エンジンバージョンの固定
+
+特定のAuroraエンジンバージョンを使用したい場合は、デフォルトに依存するのではなく、明示的に固定できます。
+
+
+
+CDKでは、生成されたコンストラクトに`engineVersion`を渡します:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+Terraformでは、`engine_version`を使用します:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### サブネットとライフサイクルオプションの選択
+
+データベースのデプロイ場所と削除時の動作もカスタマイズできます。
+
+
+
+`vpcSubnets`を使用してデータベースを配置するサブネットを制御し、`deletionProtection`や`removalPolicy`などのライフサイクル関連のプロパティを使用して削除動作を制御します:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+`subnet_ids`を使用して、データベースとマイグレーションLambdaのデプロイ場所を制御します。
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+`deletion_protection`を使用して誤削除を防ぎ、`skip_final_snapshot`を使用して削除時のスナップショット動作を制御し、`port`を使用してデフォルトのエンジンポートをオーバーライドできます。
+
+
\ No newline at end of file
diff --git a/docs/src/content/docs/ko/guides/ts-rdb.mdx b/docs/src/content/docs/ko/guides/ts-rdb.mdx
new file mode 100644
index 000000000..6758c3ae2
--- /dev/null
+++ b/docs/src/content/docs/ko/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "관계형 데이터베이스"
+description: "관계형 데이터베이스 프로젝트 생성"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+이 생성기는 [Amazon Aurora](https://aws.amazon.com/rds/aurora/)를 기반으로 하는 새로운 관계형 데이터베이스 프로젝트를 생성합니다. AWS CDK 또는 Terraform을 사용하여 데이터베이스를 프로비저닝하는 데 필요한 애플리케이션 코드와 인프라를 생성하며, Aurora PostgreSQL 및 Aurora MySQL을 지원합니다.
+
+생성된 인프라에는 재사용 가능한 Aurora 구성 요소와 애플리케이션별 데이터베이스 구성 요소가 포함됩니다. CDK 프로젝트의 경우, 생성된 구성 요소에는 클러스터 생성 후 데이터베이스 마이그레이션을 실행하는 마이그레이션 핸들러도 포함되어 있어 배포의 일부로 스키마를 부트스트랩하는 데 도움이 됩니다.
+
+## 사용법
+
+### 관계형 데이터베이스 생성
+
+새로운 관계형 데이터베이스 프로젝트는 두 가지 방법으로 생성할 수 있습니다:
+
+
+
+### 옵션
+
+
+
+## 생성기 출력
+
+생성기는 `/` 디렉토리에 다음과 같은 프로젝트 구조를 생성합니다:
+
+
+ - lib
+ - prisma.ts 데이터베이스 연결을 위한 Prisma 클라이언트 설정
+ - prisma
+ - schema.prisma Prisma 스키마 및 예제 모델
+ - src
+ - index.ts 프로젝트 진입점
+ - migration-handler.ts 배포 중 데이터베이스 마이그레이션을 실행하는 데 사용되는 Lambda 핸들러
+ - Dockerfile 마이그레이션 핸들러를 위한 컨테이너 이미지 정의
+ - project.json 프로젝트 구성 및 빌드 타겟
+ - prisma.config.ts Prisma CLI 구성
+ - tsconfig.json 소스 및 테스트를 위한 기본 TypeScript 구성
+ - tsconfig.lib.json 소스 코드를 위한 TypeScript 구성
+ - tsconfig.spec.json 테스트를 위한 TypeScript 구성
+ - eslint.config.mjs ESLint 구성
+
+
+### 인프라
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts 데이터베이스에 특정한 인프라
+ - core
+ - rdb
+ - aurora.ts 범용 Aurora 데이터베이스 구성 요소
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf 데이터베이스에 특정한 모듈
+ - core
+ - rdb
+ - aurora
+ - aurora.tf 범용 Aurora 모듈
+
+
+
+
+## 데이터베이스 구현
+
+### 데이터 모델링
+
+생성된 프로젝트는 데이터베이스 스키마를 정의하고 타입 안전 클라이언트를 생성하기 위해 [Prisma ORM](https://www.prisma.io/docs/orm)을 사용합니다. `prisma/schema.prisma`에서 테이블, 관계 및 필드 타입을 모델링한 다음 Prisma를 사용하여 클라이언트를 생성하고 데이터베이스에 대한 마이그레이션을 생성할 수 있습니다.
+
+Prisma로 모델을 정의하는 방법에 대한 자세한 내용은 [data modeling with Prisma ORM](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm) 공식 가이드를 참조하세요.
+
+예를 들어, `User` 모델은 Prisma 스키마에서 다음과 같이 표현됩니다:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### 데이터 마이그레이션
+
+Prisma 마이그레이션 파일을 생성할 때 Prisma Migrate는 섀도우 데이터베이스에 대한 액세스가 필요합니다. 이는 마이그레이션 생성 중에 Prisma 스키마를 데이터베이스의 현재 상태와 비교하고 생성해야 하는 SQL 변경 사항을 결정하는 데 사용됩니다.
+
+이것이 필요한 이유와 Prisma가 이를 사용하는 방법에 대한 자세한 내용은 [shadow databases](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database)에 대한 Prisma 문서를 참조하세요.
+
+:::note[패키지 디렉토리에서 Prisma 실행]
+Prisma CLI를 사용할 때는 생성된 데이터베이스 패키지 디렉토리(예: `packages/postgres`)에서 명령을 실행하여 Prisma가 `prisma/schema.prisma` 및 `prisma.config.ts`를 찾을 수 있도록 하세요.
+:::
+
+로컬에서 마이그레이션을 생성하려면 Prisma를 로컬 데이터베이스 인스턴스로 지정할 수 있습니다. 먼저 Prisma가 데이터베이스에 연결할 수 있도록 `DATABASE_URL`을 내보냅니다:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+그런 다음 적용하지 않고 마이그레이션 파일을 생성합니다:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+`prisma migrate dev --create-only`를 실행하면 스키마 변경이 새로운 마이그레이션을 필요로 할 때마다 새 마이그레이션 폴더를 생성하여 `prisma/migrations` 디렉토리를 최신 상태로 유지합니다.
+
+그러면 Prisma 디렉토리는 다음과 같이 보일 것입니다:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+`DATABASE_URL`이 가리키는 로컬 데이터베이스에 이러한 마이그레이션을 적용하려면 다음을 실행할 수 있습니다:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[자동 AWS 마이그레이션]
+AWS에 배포하는 경우 `prisma migrate deploy`를 수동으로 실행할 필요가 없습니다. 생성된 마이그레이션 핸들러는 `prisma/migrations` 디렉토리가 최신 상태인 한 배포 중에 자동으로 마이그레이션을 적용합니다.
+:::
+
+
+### 생성된 데이터베이스 클라이언트
+
+생성기는 프로젝트를 빌드할 때마다 타입 안전 TypeScript Prisma 클라이언트를 생성하도록 `generate` 타겟을 자동으로 구성합니다.
+
+생성된 클라이언트는 다음과 같이 구조화됩니다:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+생성된 클라이언트는 적절한 Prisma 어댑터를 구성하고 바로 사용할 수 있는 `prisma` 클라이언트 인스턴스를 내보내는 `lib/prisma.ts`를 통해 사용됩니다:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+그런 다음 `tRPC` API와 같은 워크스페이스의 다른 프로젝트에서 내보낸 `prisma` 클라이언트를 사용할 수 있습니다:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## 데이터베이스 배포
+
+### 인프라
+
+관계형 데이터베이스 생성기는 선택한 `iacProvider`에 따라 CDK 또는 Terraform 인프라 코드를 생성합니다. 이를 사용하여 데이터베이스를 배포할 수 있습니다.
+
+
+
+데이터베이스를 배포하기 위한 CDK 구성 요소는 `common/constructs` 폴더에 생성됩니다. 이를 CDK 애플리케이션에서 사용할 수 있습니다. 예를 들어:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+이것은 생성된 자격 증명 시크릿 및 마이그레이션 핸들러와 함께 VPC에 Aurora 클러스터를 프로비저닝합니다. 데이터베이스는 일반적으로 공용 인터넷에서 직접 액세스할 수 없도록 프라이빗 격리 서브넷에 배포되어야 합니다.
+
+클러스터 토폴로지를 사용자 정의해야 하는 경우 구성 요소를 인스턴스화할 때 `writer` 및 `readers`와 같은 Aurora 클러스터 속성을 재정의할 수 있습니다.
+
+
+데이터베이스를 배포하기 위한 Terraform 모듈은 `common/terraform` 폴더에 생성됩니다. 이를 Terraform 구성에서 사용할 수 있습니다:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+이것은 Aurora 클러스터, 생성된 자격 증명 시크릿, 마이그레이션 Lambda 함수 및 마이그레이션 컨테이너 이미지를 게시하고 실행하는 데 필요한 지원 리소스를 프로비저닝합니다.
+
+클러스터 토폴로지 또는 크기를 사용자 정의해야 하는 경우 `instance_count`, `serverless_min_capacity`, `serverless_max_capacity`, `engine_version` 및 `port`와 같은 모듈 입력을 재정의할 수 있습니다.
+
+
+
+### 액세스 권한 부여
+
+API가 데이터베이스에 연결할 수 있도록 하려면 API를 데이터베이스와 동일한 VPC에 배포하세요. 그런 다음 데이터베이스 연결 문자열을 API에 제공하고 API 핸들러가 데이터베이스 포트에 연결할 수 있도록 허용할 수 있습니다.
+
+:::note[Prisma 환경 변수]
+Prisma와 함께 PostgreSQL을 사용할 때 생성된 클라이언트는 `DATABASE_URL`을 기대합니다.
+
+Prisma와 함께 MySQL을 사용할 때 생성된 클라이언트는 `lib/prisma.ts`에 구성된 대로 `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD` 및 `DATABASE_NAME`을 기대합니다.
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+이렇게 하면 각 API Lambda 함수가 데이터베이스와 동일한 VPC 내에서 실행되고 기본 포트에서 데이터베이스에 연결할 수 있도록 명시적으로 허용됩니다.
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+이것은 API가 데이터베이스와 동일한 VPC에서 실행되도록 구성하고, 생성된 `DATABASE_URL`을 API에 전달하며, API 보안 그룹에서 데이터베이스 보안 그룹으로 데이터베이스 포트의 트래픽을 명시적으로 허용합니다.
+
+
+
+## 데이터베이스 아키텍처 사용자 정의
+
+생성된 데이터베이스 구성 요소는 워크로드에 맞게 조정할 수 있도록 기본 Aurora 구성을 노출합니다.
+
+### Writer 재정의
+
+
+
+CDK에서는 기본 Aurora 인스턴스가 프로비저닝되는 방식을 변경하려는 경우 기본 writer 인스턴스를 재정의할 수 있습니다:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+Terraform에서는 Aurora 인스턴스가 별도의 `writer` 및 `readers` 정의가 아닌 모듈 입력을 통해 관리됩니다. 생성된 모듈은 항상 기본 인스턴스를 프로비저닝하며, `instance_count`, `engine_version` 및 서버리스 용량 설정과 같은 입력을 사용하여 클러스터를 사용자 정의할 수 있습니다:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Reader 추가
+
+
+
+워크로드에 읽기 확장이 필요한 경우 CDK에서 `readers` 속성을 사용하여 reader 인스턴스를 추가할 수 있습니다:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+Terraform에서 동등한 제어는 생성되는 Aurora 인스턴스 수를 결정하는 `instance_count`입니다:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### 서버리스 용량 구성
+
+워크로드에 맞게 Aurora Serverless v2 확장 제한을 제어할 수 있습니다.
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### 엔진 버전 고정
+
+특정 Aurora 엔진 버전을 사용하려는 경우 기본값에 의존하지 않고 명시적으로 고정할 수 있습니다.
+
+
+
+CDK에서는 생성된 구성 요소에 `engineVersion`을 전달합니다:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+Terraform에서는 `engine_version`을 사용합니다:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### 서브넷 및 수명 주기 옵션 선택
+
+데이터베이스가 배포되는 위치와 삭제 시 동작 방식도 사용자 정의할 수 있습니다.
+
+
+
+`vpcSubnets`을 사용하여 데이터베이스가 배치되는 서브넷을 제어하고, `deletionProtection` 및 `removalPolicy`와 같은 수명 주기 관련 속성을 사용하여 삭제 동작을 제어합니다:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+`subnet_ids`를 사용하여 데이터베이스 및 마이그레이션 Lambda가 배포되는 위치를 제어합니다.
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+`deletion_protection`을 사용하여 실수로 인한 삭제를 방지하고, `skip_final_snapshot`을 사용하여 삭제 시 스냅샷 동작을 제어하며, `port`를 사용하여 기본 엔진 포트를 재정의할 수 있습니다.
+
+
\ No newline at end of file
diff --git a/docs/src/content/docs/pt/guides/ts-rdb.mdx b/docs/src/content/docs/pt/guides/ts-rdb.mdx
new file mode 100644
index 000000000..2e5a2d0c4
--- /dev/null
+++ b/docs/src/content/docs/pt/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "Banco de Dados Relacional"
+description: "Criar um projeto de banco de dados relacional"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+Este gerador cria um novo projeto de banco de dados relacional baseado no [Amazon Aurora](https://aws.amazon.com/rds/aurora/). Ele gera o código da aplicação e a infraestrutura necessária para provisionar um banco de dados usando AWS CDK ou Terraform, com suporte para Aurora PostgreSQL e Aurora MySQL.
+
+A infraestrutura gerada inclui um construtor Aurora reutilizável juntamente com um construtor de banco de dados específico da aplicação. Para projetos CDK, o construtor gerado também inclui um manipulador de migração que executa suas migrações de banco de dados após a criação do cluster, ajudando você a inicializar seu esquema como parte da implantação.
+
+## Uso
+
+### Gerar um Banco de Dados Relacional
+
+Você pode gerar um novo projeto de banco de dados relacional de duas maneiras:
+
+
+
+### Opções
+
+
+
+## Saída do Gerador
+
+O gerador criará a seguinte estrutura de projeto no diretório `/`:
+
+
+ - lib
+ - prisma.ts Configuração do cliente Prisma para conectar ao banco de dados
+ - prisma
+ - schema.prisma Esquema Prisma e modelo de exemplo
+ - src
+ - index.ts Ponto de entrada do projeto
+ - migration-handler.ts Manipulador Lambda usado para executar migrações de banco de dados durante a implantação
+ - Dockerfile Definição de imagem de container para o manipulador de migração
+ - project.json Configuração do projeto e alvos de build
+ - prisma.config.ts Configuração para Prisma CLI
+ - tsconfig.json Configuração base do TypeScript para código fonte e testes
+ - tsconfig.lib.json Configuração do TypeScript para código fonte
+ - tsconfig.spec.json Configuração do TypeScript para testes
+ - eslint.config.mjs Configuração para ESLint
+
+
+### Infraestrutura
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts Infraestrutura específica para seu banco de dados
+ - core
+ - rdb
+ - aurora.ts Construtor genérico de banco de dados Aurora
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf Módulo específico para seu banco de dados
+ - core
+ - rdb
+ - aurora
+ - aurora.tf Módulo genérico Aurora
+
+
+
+
+## Implementando seu Banco de Dados
+
+### Modelagem de Dados
+
+O projeto gerado usa [Prisma ORM](https://www.prisma.io/docs/orm) para definir seu esquema de banco de dados e gerar um cliente com segurança de tipos. Você pode modelar suas tabelas, relações e tipos de campo em `prisma/schema.prisma`, depois usar o Prisma para gerar o cliente e criar migrações para seu banco de dados.
+
+Para mais detalhes sobre como definir modelos com Prisma, consulte o guia oficial sobre [modelagem de dados com Prisma ORM](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm).
+
+Como exemplo, um modelo `User` seria representado da seguinte forma no esquema Prisma:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### Migração de Dados
+
+Ao gerar arquivos de migração Prisma, o Prisma Migrate requer acesso a um banco de dados shadow. Isso é usado durante a geração de migração para comparar seu esquema Prisma com o estado atual do banco de dados e determinar as alterações SQL que precisam ser criadas.
+
+Para mais detalhes sobre por que isso é necessário e como o Prisma o utiliza, consulte a documentação do Prisma sobre [bancos de dados shadow](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database).
+
+:::note[Execute o Prisma a partir do Diretório do Pacote]
+Ao usar a CLI do Prisma, execute os comandos a partir do diretório do pacote de banco de dados gerado, por exemplo `packages/postgres`, para que o Prisma possa encontrar `prisma/schema.prisma` e `prisma.config.ts`.
+:::
+
+Para gerar migrações localmente, você pode apontar o Prisma para uma instância de banco de dados local. Primeiro exporte `DATABASE_URL` para que o Prisma possa conectar ao seu banco de dados:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+Em seguida, crie os arquivos de migração sem aplicá-los:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+Executar `prisma migrate dev --create-only` manterá o diretório `prisma/migrations` atualizado gerando uma nova pasta de migração cada vez que suas alterações de esquema exigirem uma nova migração.
+
+Seu diretório Prisma ficará então algo assim:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+Para aplicar essas migrações ao banco de dados local para o qual `DATABASE_URL` está apontando, você pode executar:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[Migrações Automáticas na AWS]
+Para implantações na AWS, você não precisa executar `prisma migrate deploy` manualmente. O manipulador de migração gerado aplica suas migrações automaticamente durante a implantação, desde que o diretório `prisma/migrations` esteja atualizado.
+:::
+
+
+### Cliente de Banco de Dados Gerado
+
+O gerador configura automaticamente o alvo `generate` para gerar o cliente Prisma TypeScript com segurança de tipos sempre que você compilar o projeto.
+
+O cliente gerado é estruturado da seguinte forma:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+O cliente gerado é usado através de `lib/prisma.ts`, que configura o adaptador Prisma apropriado e exporta uma instância do cliente `prisma` pronta para uso:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+Você pode então usar o cliente `prisma` exportado em outros projetos no seu workspace, como uma API `tRPC`:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## Implantando seu Banco de Dados
+
+### Infraestrutura
+
+O gerador de banco de dados relacional cria infraestrutura como código CDK ou Terraform com base no seu `iacProvider` selecionado. Você pode usar isso para implantar seu banco de dados.
+
+
+
+O construtor CDK para implantar seu banco de dados é criado na pasta `common/constructs`. Você pode consumir isso em uma aplicação CDK, por exemplo:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+Isso provisiona um cluster Aurora em sua VPC, juntamente com o segredo de credenciais gerado e o manipulador de migração. O banco de dados normalmente deve ser implantado em sub-redes privadas isoladas para que não seja diretamente acessível pela internet pública.
+
+Se você precisar personalizar a topologia do cluster, pode substituir as propriedades do cluster Aurora, como `writer` e `readers`, ao instanciar o construtor.
+
+
+O módulo Terraform para implantar seu banco de dados é criado na pasta `common/terraform`. Você pode usar isso em uma configuração Terraform:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+Isso provisiona um cluster Aurora, o segredo de credenciais gerado, a função Lambda de migração e os recursos de suporte necessários para publicar e executar a imagem de container de migração.
+
+Se você precisar personalizar a topologia ou dimensionamento do cluster, pode substituir entradas do módulo como `instance_count`, `serverless_min_capacity`, `serverless_max_capacity`, `engine_version` e `port`.
+
+
+
+### Concedendo Acesso
+
+Para permitir que sua API se conecte ao banco de dados, implante a API na mesma VPC do banco de dados. Você pode então fornecer a string de conexão do banco de dados para a API e permitir que os manipuladores da API se conectem à porta do banco de dados.
+
+:::note[Variáveis de Ambiente do Prisma]
+Ao usar PostgreSQL com Prisma, o cliente gerado espera `DATABASE_URL`.
+
+Ao usar MySQL com Prisma, o cliente gerado espera `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD` e `DATABASE_NAME`, conforme configurado em `lib/prisma.ts`.
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+Isso garante que cada função Lambda da API seja executada dentro da mesma VPC que o banco de dados e tenha permissão explícita para se conectar ao banco de dados em sua porta padrão.
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+Isso configura a API para ser executada na mesma VPC que o banco de dados, passa o `DATABASE_URL` gerado para a API e permite explicitamente o tráfego do grupo de segurança da API para o grupo de segurança do banco de dados na porta do banco de dados.
+
+
+
+## Personalizando a Arquitetura do seu Banco de Dados
+
+O construtor de banco de dados gerado expõe a configuração subjacente do Aurora para que você possa adaptá-la à sua carga de trabalho.
+
+### Substituindo o Writer
+
+
+
+No CDK, você pode substituir a instância writer padrão se quiser alterar como a instância primária do Aurora é provisionada:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+No Terraform, as instâncias Aurora são gerenciadas através de entradas de módulo em vez de definições separadas de `writer` e `readers`. O módulo gerado sempre provisiona a instância primária para você, e você personaliza o cluster ao redor dela usando entradas como `instance_count`, `engine_version` e as configurações de capacidade serverless:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Adicionando Readers
+
+
+
+Se sua carga de trabalho precisar de escalonamento de leitura, você pode adicionar instâncias reader no CDK usando a propriedade `readers`:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+No Terraform, o controle equivalente é `instance_count`, que determina quantas instâncias Aurora são criadas:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Configurando Capacidade Serverless
+
+Você pode controlar os limites de escalonamento do Aurora Serverless v2 para corresponder à sua carga de trabalho.
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Fixando a Versão do Engine
+
+Se você quiser usar uma versão específica do engine Aurora, pode fixá-la explicitamente em vez de confiar no padrão.
+
+
+
+No CDK, passe `engineVersion` para o construtor gerado:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+No Terraform, use `engine_version`:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Selecionando Sub-redes e Opções de Ciclo de Vida
+
+Você também pode personalizar onde o banco de dados é implantado e como ele se comporta durante a exclusão.
+
+
+
+Use `vpcSubnets` para controlar em quais sub-redes o banco de dados é colocado, e propriedades relacionadas ao ciclo de vida, como `deletionProtection` e `removalPolicy`, para controlar o comportamento de exclusão:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+Use `subnet_ids` para controlar onde o banco de dados e o Lambda de migração são implantados.
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+Você pode usar `deletion_protection` para evitar exclusão acidental, `skip_final_snapshot` para controlar o comportamento de snapshot na exclusão, e `port` para substituir a porta padrão do engine.
+
+
\ No newline at end of file
diff --git a/docs/src/content/docs/vi/guides/ts-rdb.mdx b/docs/src/content/docs/vi/guides/ts-rdb.mdx
new file mode 100644
index 000000000..8d19e9239
--- /dev/null
+++ b/docs/src/content/docs/vi/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "Cơ sở dữ liệu quan hệ"
+description: "Tạo một dự án cơ sở dữ liệu quan hệ"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+Generator này tạo một dự án cơ sở dữ liệu quan hệ mới được hỗ trợ bởi [Amazon Aurora](https://aws.amazon.com/rds/aurora/). Nó tạo ra mã ứng dụng và cơ sở hạ tầng cần thiết để cung cấp cơ sở dữ liệu bằng AWS CDK hoặc Terraform, với hỗ trợ cho Aurora PostgreSQL và Aurora MySQL.
+
+Cơ sở hạ tầng được tạo ra bao gồm một construct Aurora có thể tái sử dụng cùng với một construct cơ sở dữ liệu dành riêng cho ứng dụng. Đối với các dự án CDK, construct được tạo ra cũng bao gồm một migration handler chạy các migration cơ sở dữ liệu của bạn sau khi cluster được tạo, giúp bạn khởi tạo schema như một phần của quá trình triển khai.
+
+## Cách sử dụng
+
+### Tạo một Cơ sở dữ liệu Quan hệ
+
+Bạn có thể tạo một dự án cơ sở dữ liệu quan hệ mới theo hai cách:
+
+
+
+### Tùy chọn
+
+
+
+## Kết quả từ Generator
+
+Generator sẽ tạo cấu trúc dự án sau trong thư mục `/`:
+
+
+ - lib
+ - prisma.ts Thiết lập Prisma client để kết nối với cơ sở dữ liệu
+ - prisma
+ - schema.prisma Schema Prisma và model mẫu
+ - src
+ - index.ts Điểm vào của dự án
+ - migration-handler.ts Lambda handler được sử dụng để chạy migration cơ sở dữ liệu trong quá trình triển khai
+ - Dockerfile Định nghĩa container image cho migration handler
+ - project.json Cấu hình dự án và các build target
+ - prisma.config.ts Cấu hình cho Prisma CLI
+ - tsconfig.json Cấu hình TypeScript cơ bản cho source và test
+ - tsconfig.lib.json Cấu hình TypeScript cho source code
+ - tsconfig.spec.json Cấu hình TypeScript cho test
+ - eslint.config.mjs Cấu hình cho ESLint
+
+
+### Cơ sở hạ tầng
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts Cơ sở hạ tầng dành riêng cho cơ sở dữ liệu của bạn
+ - core
+ - rdb
+ - aurora.ts Construct cơ sở dữ liệu Aurora chung
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf Module dành riêng cho cơ sở dữ liệu của bạn
+ - core
+ - rdb
+ - aurora
+ - aurora.tf Module Aurora chung
+
+
+
+
+## Triển khai Cơ sở dữ liệu của bạn
+
+### Mô hình hóa Dữ liệu
+
+Dự án được tạo ra sử dụng [Prisma ORM](https://www.prisma.io/docs/orm) để định nghĩa schema cơ sở dữ liệu và tạo client an toàn về kiểu. Bạn có thể mô hình hóa các bảng, quan hệ và kiểu trường của mình trong `prisma/schema.prisma`, sau đó sử dụng Prisma để tạo client và tạo migration cho cơ sở dữ liệu của bạn.
+
+Để biết thêm chi tiết về cách định nghĩa model với Prisma, tham khảo hướng dẫn chính thức về [data modeling với Prisma ORM](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm).
+
+Ví dụ, một model `User` sẽ được biểu diễn như sau trong schema Prisma:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### Migration Dữ liệu
+
+Khi tạo các file migration Prisma, Prisma Migrate yêu cầu truy cập vào một shadow database. Điều này được sử dụng trong quá trình tạo migration để so sánh schema Prisma của bạn với trạng thái hiện tại của cơ sở dữ liệu và xác định các thay đổi SQL cần được tạo.
+
+Để biết thêm chi tiết về lý do tại sao điều này được yêu cầu và Prisma sử dụng nó như thế nào, tham khảo tài liệu Prisma về [shadow databases](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database).
+
+:::note[Chạy Prisma từ Thư mục Package]
+Khi sử dụng Prisma CLI, hãy chạy các lệnh từ thư mục package cơ sở dữ liệu được tạo của bạn, ví dụ `packages/postgres`, để Prisma có thể tìm thấy `prisma/schema.prisma` và `prisma.config.ts`.
+:::
+
+Để tạo migration cục bộ, bạn có thể trỏ Prisma đến một instance cơ sở dữ liệu cục bộ. Đầu tiên export `DATABASE_URL` để Prisma có thể kết nối với cơ sở dữ liệu của bạn:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+Sau đó tạo các file migration mà không áp dụng chúng:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+Chạy `prisma migrate dev --create-only` sẽ giữ cho thư mục `prisma/migrations` được cập nhật bằng cách tạo một thư mục migration mới mỗi khi các thay đổi schema của bạn yêu cầu một migration mới.
+
+Thư mục Prisma của bạn sau đó sẽ trông giống như thế này:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+Để áp dụng các migration này vào cơ sở dữ liệu cục bộ mà `DATABASE_URL` đang trỏ đến, bạn có thể chạy:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[Migration AWS Tự động]
+Đối với các triển khai lên AWS, bạn không cần chạy `prisma migrate deploy` thủ công. Migration handler được tạo ra sẽ tự động áp dụng các migration của bạn trong quá trình triển khai, miễn là thư mục `prisma/migrations` được cập nhật.
+:::
+
+
+### Database Client được Tạo ra
+
+Generator tự động cấu hình target `generate` để tạo Prisma client TypeScript an toàn về kiểu bất cứ khi nào bạn build dự án.
+
+Client được tạo ra có cấu trúc như sau:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+Client được tạo ra được sử dụng thông qua `lib/prisma.ts`, cấu hình adapter Prisma phù hợp và export một instance `prisma` client sẵn sàng sử dụng:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+Sau đó bạn có thể sử dụng `prisma` client được export trong các dự án khác trong workspace của bạn, chẳng hạn như một API `tRPC`:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## Triển khai Cơ sở dữ liệu của bạn
+
+### Cơ sở hạ tầng
+
+Generator cơ sở dữ liệu quan hệ tạo infrastructure as code CDK hoặc Terraform dựa trên `iacProvider` bạn đã chọn. Bạn có thể sử dụng điều này để triển khai cơ sở dữ liệu của mình.
+
+
+
+Construct CDK để triển khai cơ sở dữ liệu của bạn được tạo trong thư mục `common/constructs`. Bạn có thể sử dụng nó trong một ứng dụng CDK, ví dụ:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+Điều này cung cấp một Aurora cluster trong VPC của bạn, cùng với credentials secret được tạo ra và migration handler. Cơ sở dữ liệu thường nên được triển khai vào các private isolated subnet để không thể truy cập trực tiếp từ internet công cộng.
+
+Nếu bạn cần tùy chỉnh topology của cluster, bạn có thể ghi đè các thuộc tính Aurora cluster như `writer` và `readers` khi khởi tạo construct.
+
+
+Module Terraform để triển khai cơ sở dữ liệu của bạn được tạo trong thư mục `common/terraform`. Bạn có thể sử dụng nó trong một cấu hình Terraform:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+Điều này cung cấp một Aurora cluster, credentials secret được tạo ra, Lambda function migration và các tài nguyên hỗ trợ cần thiết để publish và chạy migration container image.
+
+Nếu bạn cần tùy chỉnh topology hoặc sizing của cluster, bạn có thể ghi đè các input của module như `instance_count`, `serverless_min_capacity`, `serverless_max_capacity`, `engine_version` và `port`.
+
+
+
+### Cấp quyền Truy cập
+
+Để cho phép API của bạn kết nối với cơ sở dữ liệu, hãy triển khai API vào cùng VPC với cơ sở dữ liệu. Sau đó bạn có thể cung cấp connection string cơ sở dữ liệu cho API và cho phép các API handler kết nối với cổng cơ sở dữ liệu.
+
+:::note[Biến môi trường Prisma]
+Khi sử dụng PostgreSQL với Prisma, client được tạo ra mong đợi `DATABASE_URL`.
+
+Khi sử dụng MySQL với Prisma, client được tạo ra mong đợi `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD` và `DATABASE_NAME`, như được cấu hình trong `lib/prisma.ts`.
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+Điều này đảm bảo rằng mỗi Lambda function API chạy bên trong cùng VPC với cơ sở dữ liệu và được phép rõ ràng kết nối với cơ sở dữ liệu trên cổng mặc định của nó.
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+Điều này cấu hình API chạy trong cùng VPC với cơ sở dữ liệu, truyền `DATABASE_URL` được tạo ra cho API và cho phép rõ ràng lưu lượng từ security group API đến security group cơ sở dữ liệu trên cổng cơ sở dữ liệu.
+
+
+
+## Tùy chỉnh Kiến trúc Cơ sở dữ liệu của bạn
+
+Construct cơ sở dữ liệu được tạo ra expose cấu hình Aurora bên dưới để bạn có thể điều chỉnh nó theo workload của mình.
+
+### Ghi đè Writer
+
+
+
+Trong CDK, bạn có thể ghi đè writer instance mặc định nếu bạn muốn thay đổi cách Aurora instance chính được cung cấp:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+Trong Terraform, các Aurora instance được quản lý thông qua các input của module thay vì các định nghĩa `writer` và `readers` riêng biệt. Module được tạo ra luôn cung cấp primary instance cho bạn, và bạn tùy chỉnh cluster xung quanh nó bằng cách sử dụng các input như `instance_count`, `engine_version` và các thiết lập serverless capacity:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Thêm Reader
+
+
+
+Nếu workload của bạn cần read scaling, bạn có thể thêm reader instance trong CDK bằng thuộc tính `readers`:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+Trong Terraform, điều khiển tương đương là `instance_count`, xác định số lượng Aurora instance được tạo:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Cấu hình Serverless Capacity
+
+Bạn có thể kiểm soát giới hạn scaling của Aurora Serverless v2 để phù hợp với workload của mình.
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Cố định Engine Version
+
+Nếu bạn muốn sử dụng một Aurora engine version cụ thể, bạn có thể cố định nó một cách rõ ràng thay vì dựa vào mặc định.
+
+
+
+Trong CDK, truyền `engineVersion` cho construct được tạo ra:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+Trong Terraform, sử dụng `engine_version`:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### Chọn Subnet và Tùy chọn Lifecycle
+
+Bạn cũng có thể tùy chỉnh nơi cơ sở dữ liệu được triển khai và cách nó hoạt động trong quá trình xóa.
+
+
+
+Sử dụng `vpcSubnets` để kiểm soát subnet nào cơ sở dữ liệu được đặt vào, và các thuộc tính liên quan đến lifecycle như `deletionProtection` và `removalPolicy` để kiểm soát hành vi xóa:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+Sử dụng `subnet_ids` để kiểm soát nơi cơ sở dữ liệu và migration Lambda được triển khai.
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+Bạn có thể sử dụng `deletion_protection` để ngăn chặn xóa vô tình, `skip_final_snapshot` để kiểm soát hành vi snapshot khi xóa, và `port` để ghi đè cổng engine mặc định.
+
+
\ No newline at end of file
diff --git a/docs/src/content/docs/zh/guides/ts-rdb.mdx b/docs/src/content/docs/zh/guides/ts-rdb.mdx
new file mode 100644
index 000000000..57b7dde41
--- /dev/null
+++ b/docs/src/content/docs/zh/guides/ts-rdb.mdx
@@ -0,0 +1,558 @@
+---
+title: "关系型数据库"
+description: "创建关系型数据库项目"
+---
+
+import { FileTree, Tabs, TabItem } from '@astrojs/starlight/components';
+import RunGenerator from '@components/run-generator.astro';
+import GeneratorParameters from '@components/generator-parameters.astro';
+import Infrastructure from '@components/infrastructure.astro';
+import Snippet from '@components/snippet.astro';
+
+此生成器创建一个由 [Amazon Aurora](https://aws.amazon.com/rds/aurora/) 支持的新关系数据库项目。它生成使用 AWS CDK 或 Terraform 配置数据库所需的应用程序代码和基础设施,支持 Aurora PostgreSQL 和 Aurora MySQL。
+
+生成的基础设施包括一个可重用的 Aurora 构造以及一个特定于应用程序的数据库构造。对于 CDK 项目,生成的构造还包括一个迁移处理程序,在集群创建后运行数据库迁移,帮助您在部署过程中引导架构。
+
+## 用法
+
+### 生成关系数据库
+
+您可以通过两种方式生成新的关系数据库项目:
+
+
+
+### 选项
+
+
+
+## 生成器输出
+
+生成器将在 `/` 目录中创建以下项目结构:
+
+
+ - lib
+ - prisma.ts 用于连接数据库的 Prisma 客户端设置
+ - prisma
+ - schema.prisma Prisma 架构和示例模型
+ - src
+ - index.ts 项目入口点
+ - migration-handler.ts 用于在部署期间运行数据库迁移的 Lambda 处理程序
+ - Dockerfile 迁移处理程序的容器镜像定义
+ - project.json 项目配置和构建目标
+ - prisma.config.ts Prisma CLI 的配置
+ - tsconfig.json 源代码和测试的基础 TypeScript 配置
+ - tsconfig.lib.json 源代码的 TypeScript 配置
+ - tsconfig.spec.json 测试的 TypeScript 配置
+ - eslint.config.mjs ESLint 的配置
+
+
+### 基础设施
+
+
+
+
+
+
+ - packages/common/constructs/src
+ - app
+ - dbs
+ - \.ts 特定于您的数据库的基础设施
+ - core
+ - rdb
+ - aurora.ts 通用 Aurora 数据库构造
+
+
+
+
+ - packages/common/terraform/src
+ - app
+ - dbs
+ - \
+ - \.tf 特定于您的数据库的模块
+ - core
+ - rdb
+ - aurora
+ - aurora.tf 通用 Aurora 模块
+
+
+
+
+## 实现您的数据库
+
+### 数据建模
+
+生成的项目使用 [Prisma ORM](https://www.prisma.io/docs/orm) 来定义数据库架构并生成类型安全的客户端。您可以在 `prisma/schema.prisma` 中对表、关系和字段类型进行建模,然后使用 Prisma 生成客户端并为数据库创建迁移。
+
+有关如何使用 Prisma 定义模型的更多详细信息,请参阅官方指南 [使用 Prisma ORM 进行数据建模](https://www.prisma.io/docs/orm/core-concepts/data-modeling#data-modeling-with-prisma-orm)。
+
+例如,`User` 模型在 Prisma 架构中的表示如下:
+
+```ts title="packages/postgres/prisma/schema.prisma"
+model User {
+ id Int @id @default(autoincrement())
+ firstName String
+ lastName String
+}
+```
+
+### 数据迁移
+
+在生成 Prisma 迁移文件时,Prisma Migrate 需要访问影子数据库。这在迁移生成期间用于将您的 Prisma 架构与数据库的当前状态进行比较,并确定需要创建的 SQL 更改。
+
+有关为什么需要这样做以及 Prisma 如何使用它的更多详细信息,请参阅 Prisma 文档中的[影子数据库](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database)。
+
+:::note[从包目录运行 Prisma]
+使用 Prisma CLI 时,从生成的数据库包目录运行命令,例如 `packages/postgres`,这样 Prisma 可以找到 `prisma/schema.prisma` 和 `prisma.config.ts`。
+:::
+
+要在本地生成迁移,您可以将 Prisma 指向本地数据库实例。首先导出 `DATABASE_URL`,以便 Prisma 可以连接到您的数据库:
+
+```bash
+export DATABASE_URL="postgresql://postgres:postgres@localhost:5432/myapp"
+```
+
+然后创建迁移文件而不应用它们:
+
+
+
+
+```bash
+npx prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+pnpm prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+yarn prisma migrate dev --create-only
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate dev --create-only
+```
+
+
+
+
+运行 `prisma migrate dev --create-only` 将通过在每次架构更改需要新迁移时生成新的迁移文件夹来保持 `prisma/migrations` 目录的最新状态。
+
+您的 Prisma 目录将如下所示:
+
+
+ - prisma
+ - migrations
+ - 20260405013911_initial_migrations
+ - migration.sql
+ - migration_lock.toml
+ - schema.prisma
+
+
+要将这些迁移应用到 `DATABASE_URL` 指向的本地数据库,您可以运行:
+
+
+
+
+```bash
+npx prisma migrate deploy
+```
+
+
+
+
+```bash
+pnpm prisma migrate deploy
+```
+
+
+
+
+```bash
+yarn prisma migrate deploy
+```
+
+
+
+
+```bash
+bunx --bun prisma migrate deploy
+```
+
+
+
+
+:::note[自动 AWS 迁移]
+对于部署到 AWS,您无需手动运行 `prisma migrate deploy`。生成的迁移处理程序会在部署期间自动应用您的迁移,只要 `prisma/migrations` 目录是最新的。
+:::
+
+
+### 生成的数据库客户端
+
+生成器会自动配置 `generate` 目标,以便在构建项目时生成类型安全的 TypeScript Prisma 客户端。
+
+生成的客户端结构如下:
+
+
+ - generated
+ - prisma
+ - internal
+ - models
+ - browser.ts
+ - client.ts
+ - commonInputTypes.ts
+ - enums.ts
+ - models.ts
+
+
+生成的客户端通过 `lib/prisma.ts` 使用,该文件配置适当的 Prisma 适配器并导出一个即用的 `prisma` 客户端实例:
+
+```ts title="packages/postgres/lib/prisma.ts"
+import { PrismaPg } from '@prisma/adapter-pg';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new PrismaPg({
+ connectionString: `${process.env.DATABASE_URL}`,
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
+```
+
+然后,您可以在工作区中的其他项目中使用导出的 `prisma` 客户端,例如 `tRPC` API:
+
+```ts title="packages/api/src/procedures/list-user.ts"
+import { prisma } from ':aws-nx-poc/postgres';
+import { publicProcedure } from '../init.js';
+import {
+ ListUserOutputSchema,
+} from '../schema/index.js';
+
+export const listUser = publicProcedure
+ .output(ListUserOutputSchema)
+ .query(async () => {
+ const users = await prisma.user.findMany({
+ orderBy: { id: 'asc' },
+ });
+
+ return { users };
+ });
+```
+
+## 部署您的数据库
+
+### 基础设施
+
+关系数据库生成器根据您选择的 `iacProvider` 创建 CDK 或 Terraform 基础设施即代码。您可以使用它来部署数据库。
+
+
+
+用于部署数据库的 CDK 构造在 `common/constructs` 文件夹中创建。您可以在 CDK 应用程序中使用它,例如:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { Postgres } from ':my-scope/common-constructs';
+
+export class ApplicationStack extends Stack {
+ constructor(scope: Construct, id: string, props?: StackProps) {
+ super(scope, id, props);
+
+ const vpc = new Vpc(this, 'Vpc', {
+ subnetConfiguration: [
+ {
+ name: 'private_isolated',
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ ],
+ });
+
+ const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ }
+ });
+ }
+}
+```
+
+这会在您的 VPC 中配置一个 Aurora 集群,以及生成的凭证密钥和迁移处理程序。数据库通常应部署到私有隔离子网中,以便无法从公共互联网直接访问。
+
+如果您需要自定义集群拓扑,可以在实例化构造时覆盖 Aurora 集群属性,例如 `writer` 和 `readers`。
+
+
+用于部署数据库的 Terraform 模块在 `common/terraform` 文件夹中创建。您可以在 Terraform 配置中使用它:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ tags = local.common_tags
+}
+```
+
+这会配置一个 Aurora 集群、生成的凭证密钥、迁移 Lambda 函数以及发布和运行迁移容器镜像所需的支持资源。
+
+如果您需要自定义集群拓扑或大小,可以覆盖模块输入,例如 `instance_count`、`serverless_min_capacity`、`serverless_max_capacity`、`engine_version` 和 `port`。
+
+
+
+### 授予访问权限
+
+要允许您的 API 连接到数据库,请将 API 部署到与数据库相同的 VPC 中。然后,您可以向 API 提供数据库连接字符串,并允许 API 处理程序连接到数据库端口。
+
+:::note[Prisma 环境变量]
+当使用 PostgreSQL 与 Prisma 时,生成的客户端需要 `DATABASE_URL`。
+
+当使用 MySQL 与 Prisma 时,生成的客户端需要 `DATABASE_HOST`、`DATABASE_USER`、`DATABASE_PASSWORD` 和 `DATABASE_NAME`,如 `lib/prisma.ts` 中配置的那样。
+:::
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const api = new Api(this, 'Api', {
+ integrations: Api.defaultIntegrations(this)
+ .withDefaultOptions({
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: db.databaseUrl,
+ },
+ })
+ .build(),
+});
+
+Object.entries(api.integrations).forEach(([operation, integration]) => {
+ db.allowDefaultPortFrom(
+ integration.handler,
+ `Allow ${operation} to connect to the database`,
+ );
+});
+```
+
+这确保每个 API Lambda 函数在与数据库相同的 VPC 内运行,并明确允许连接到数据库的默认端口。
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "api" {
+ source = "../../common/terraform/src/app/apis/api"
+
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+
+ env = {
+ DATABASE_URL = module.postgres.database_url
+ }
+
+ tags = local.common_tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "api_to_postgres" {
+ security_group_id = module.postgres.security_group_id
+ referenced_security_group_id = module.api.default_security_group_id
+ from_port = module.postgres.cluster_port
+ to_port = module.postgres.cluster_port
+ ip_protocol = "tcp"
+}
+```
+
+这配置 API 在与数据库相同的 VPC 中运行,将生成的 `DATABASE_URL` 传递给 API,并明确允许从 API 安全组到数据库安全组的数据库端口流量。
+
+
+
+## 自定义您的数据库架构
+
+生成的数据库构造公开了底层 Aurora 配置,以便您可以使其适应您的工作负载。
+
+### 覆盖写入器
+
+
+
+在 CDK 中,如果您想更改主 Aurora 实例的配置方式,可以覆盖默认的写入器实例:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ writer: ClusterInstance.serverlessV2('writer'),
+});
+```
+
+
+在 Terraform 中,Aurora 实例通过模块输入进行管理,而不是单独的 `writer` 和 `readers` 定义。生成的模块始终为您配置主实例,您可以使用输入(例如 `instance_count`、`engine_version` 和无服务器容量设置)来自定义集群:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 1
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### 添加读取器
+
+
+
+如果您的工作负载需要读取扩展,可以在 CDK 中使用 `readers` 属性添加读取器实例:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ readers: [ClusterInstance.serverlessV2('reader')],
+});
+```
+
+
+在 Terraform 中,等效的控制是 `instance_count`,它确定创建多少个 Aurora 实例:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ instance_count = 2
+
+ tags = local.common_tags
+}
+```
+
+
+
+### 配置无服务器容量
+
+您可以控制 Aurora Serverless v2 扩展限制以匹配您的工作负载。
+
+
+
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ serverlessV2MinCapacity: 0.5,
+ serverlessV2MaxCapacity: 8,
+});
+```
+
+
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ serverless_min_capacity = 0.5
+ serverless_max_capacity = 8
+
+ tags = local.common_tags
+}
+```
+
+
+
+### 固定引擎版本
+
+如果您想使用特定的 Aurora 引擎版本,可以明确固定它,而不是依赖默认版本。
+
+
+
+在 CDK 中,将 `engineVersion` 传递给生成的构造:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ engineVersion: AuroraPostgresEngineVersion.VER_17_7,
+});
+```
+
+
+在 Terraform 中,使用 `engine_version`:
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ engine_version = "17.7"
+
+ tags = local.common_tags
+}
+```
+
+
+
+### 选择子网和生命周期选项
+
+您还可以自定义数据库的部署位置以及删除时的行为方式。
+
+
+
+使用 `vpcSubnets` 控制数据库放置到哪些子网中,使用与生命周期相关的属性(例如 `deletionProtection` 和 `removalPolicy`)来控制删除行为:
+
+```ts title="packages/infra/src/stacks/application-stack.ts"
+import { RemovalPolicy } from 'aws-cdk-lib';
+
+const db = new Postgres(this, 'Db', {
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ deletionProtection: true,
+ removalPolicy: RemovalPolicy.SNAPSHOT,
+});
+```
+
+
+使用 `subnet_ids` 控制数据库和迁移 Lambda 的部署位置。
+
+```hcl title="packages/infra/src/main.tf"
+module "postgres" {
+ source = "../../common/terraform/src/app/dbs/postgres"
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_isolated_subnet_ids
+ deletion_protection = true
+ skip_final_snapshot = false
+ port = 5432
+
+ tags = local.common_tags
+}
+```
+
+您可以使用 `deletion_protection` 来防止意外删除,使用 `skip_final_snapshot` 来控制删除时的快照行为,使用 `port` 来覆盖默认引擎端口。
+
+
\ No newline at end of file
diff --git a/docs/src/i18n/schema-translations.json b/docs/src/i18n/schema-translations.json
index 6963d5300..87e6689b9 100644
--- a/docs/src/i18n/schema-translations.json
+++ b/docs/src/i18n/schema-translations.json
@@ -1363,5 +1363,95 @@
"zh": "Strands Agent 组件名称",
"vi": "Tên của Strands Agent component"
}
+ },
+ "ts#rdb": {
+ "name": {
+ "en": "Name of the database project to generate",
+ "es": "Nombre del proyecto de base de datos a generar",
+ "jp": "生成するデータベースプロジェクトの名前",
+ "ko": "생성할 데이터베이스 프로젝트의 이름",
+ "fr": "Nom du projet de base de données à générer",
+ "it": "Nome del progetto database da generare",
+ "pt": "Nome do projeto de banco de dados a ser gerado",
+ "zh": "要生成的数据库项目名称",
+ "vi": "Tên của dự án cơ sở dữ liệu cần tạo"
+ },
+ "directory": {
+ "en": "The directory to store the application in.",
+ "es": "El directorio donde almacenar la aplicación.",
+ "jp": "アプリケーションを保存するディレクトリ",
+ "ko": "애플리케이션을 저장할 디렉토리",
+ "fr": "Le répertoire dans lequel stocker l'application.",
+ "it": "La directory in cui memorizzare l'applicazione.",
+ "pt": "O diretório onde armazenar a aplicação.",
+ "zh": "存储应用程序的目录。",
+ "vi": "Thư mục để lưu trữ ứng dụng."
+ },
+ "service": {
+ "en": "Relational database service to provision.",
+ "es": "Servicio de base de datos relacional a aprovisionar.",
+ "jp": "プロビジョニングするリレーショナルデータベースサービス",
+ "ko": "프로비저닝할 관계형 데이터베이스 서비스",
+ "fr": "Service de base de données relationnelle à provisionner.",
+ "it": "Servizio di database relazionale da provisioning.",
+ "pt": "Serviço de banco de dados relacional a ser provisionado.",
+ "zh": "要配置的关系型数据库服务。",
+ "vi": "Dịch vụ cơ sở dữ liệu quan hệ cần cung cấp."
+ },
+ "engine": {
+ "en": "Database engine to use with the selected service.",
+ "es": "Motor de base de datos a utilizar con el servicio seleccionado.",
+ "jp": "選択したサービスで使用するデータベースエンジン",
+ "ko": "선택한 서비스와 함께 사용할 데이터베이스 엔진",
+ "fr": "Moteur de base de données à utiliser avec le service sélectionné.",
+ "it": "Motore di database da utilizzare con il servizio selezionato.",
+ "pt": "Motor de banco de dados a ser usado com o serviço selecionado.",
+ "zh": "与所选服务一起使用的数据库引擎。",
+ "vi": "Database engine sử dụng với dịch vụ đã chọn."
+ },
+ "databaseUser": {
+ "en": "Database admin username.",
+ "es": "Nombre de usuario administrador de la base de datos.",
+ "jp": "データベース管理者のユーザー名",
+ "ko": "데이터베이스 관리자 사용자 이름",
+ "fr": "Nom d'utilisateur administrateur de la base de données.",
+ "it": "Nome utente amministratore del database.",
+ "pt": "Nome de usuário do administrador do banco de dados.",
+ "zh": "数据库管理员用户名。",
+ "vi": "Tên người dùng quản trị cơ sở dữ liệu."
+ },
+ "databaseName": {
+ "en": "Initial database name.",
+ "es": "Nombre de la base de datos inicial.",
+ "jp": "初期データベース名",
+ "ko": "초기 데이터베이스 이름",
+ "fr": "Nom de la base de données initiale.",
+ "it": "Nome del database iniziale.",
+ "pt": "Nome inicial do banco de dados.",
+ "zh": "初始数据库名称。",
+ "vi": "Tên cơ sở dữ liệu ban đầu."
+ },
+ "ormFramework": {
+ "en": "ORM framework to use for the generated project.",
+ "es": "Framework ORM a utilizar para el proyecto generado.",
+ "jp": "生成されるプロジェクトで使用するORMフレームワーク",
+ "ko": "생성된 프로젝트에 사용할 ORM 프레임워크",
+ "fr": "Framework ORM à utiliser pour le projet généré.",
+ "it": "Framework ORM da utilizzare per il progetto generato.",
+ "pt": "Framework ORM a ser usado para o projeto gerado.",
+ "zh": "用于生成项目的 ORM 框架。",
+ "vi": "ORM framework sử dụng cho dự án được tạo."
+ },
+ "iacProvider": {
+ "en": "The preferred IaC provider. By default this is inherited from your initial selection.",
+ "es": "El proveedor IaC preferido. Por defecto se hereda de tu selección inicial.",
+ "jp": "優先するIaCプロバイダー。デフォルトでは初期選択から継承されます",
+ "ko": "선호하는 IaC 공급자. 기본적으로 초기 선택에서 상속됩니다.",
+ "fr": "Le fournisseur IaC préféré. Par défaut, celui-ci est hérité de votre sélection initiale.",
+ "it": "Il provider IaC preferito. Per impostazione predefinita viene ereditato dalla selezione iniziale.",
+ "pt": "O provedor IaC preferido. Por padrão, este é herdado da sua seleção inicial.",
+ "zh": "首选的 IaC 提供商。默认情况下,这将继承自您的初始选择。",
+ "vi": "Nhà cung cấp IaC ưu tiên. Mặc định được kế thừa từ lựa chọn ban đầu của bạn."
+ }
}
}
diff --git a/packages/nx-plugin/README.md b/packages/nx-plugin/README.md
index 43494f6d4..d4a459282 100644
--- a/packages/nx-plugin/README.md
+++ b/packages/nx-plugin/README.md
@@ -63,6 +63,7 @@
- `ts#mcp-server` - Add a [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) server to a TypeScript project.
- `ts#strands-agent` - Add a [Strands Agent](https://strandsagents.com/) to a TypeScript project.
- `ts#lambda-function` - Generate a TypeScript lambda function with optional type-safe event sources.
+- `ts#rdb` - Generate a TypeScript relational database project with Aurora and [Prisma](https://www.prisma.io/docs) support.
- `terraform#project` - Generate a new Terraform project.
- `py#project` - Generate a uv based Python project.
- `py#fast-api` - Generate a FastAPI backend service with [AWS Powertools](https://github.com/aws-powertools/powertools-lambda-python) pre-configured.
diff --git a/packages/nx-plugin/generators.json b/packages/nx-plugin/generators.json
index a5752ce91..8e8ec5f4d 100644
--- a/packages/nx-plugin/generators.json
+++ b/packages/nx-plugin/generators.json
@@ -232,6 +232,12 @@
"description": "provides React integration to a React website",
"metric": "g10",
"hidden": true
+ },
+ "ts#rdb": {
+ "factory": "./src/ts/rdb/generator",
+ "schema": "./src/ts/rdb/schema.json",
+ "description": "Create a relational database project",
+ "metric": "g34"
}
}
}
diff --git a/packages/nx-plugin/src/preset/__snapshots__/generator.spec.ts.snap b/packages/nx-plugin/src/preset/__snapshots__/generator.spec.ts.snap
index 04ec0e469..8bf29e90e 100644
--- a/packages/nx-plugin/src/preset/__snapshots__/generator.spec.ts.snap
+++ b/packages/nx-plugin/src/preset/__snapshots__/generator.spec.ts.snap
@@ -68,6 +68,8 @@ The following list of generators are what is currently available in the \`@aws/n
- **ts#trpc-api**: creates a trpc backend
+- **ts#rdb**: Create a relational database project
+
You also have the option of using additional [commmunity plugins](https://nx.dev/plugin-registry) as needed.
## Invoking a generator
diff --git a/packages/nx-plugin/src/ts/rdb/files/Dockerfile.template b/packages/nx-plugin/src/ts/rdb/files/Dockerfile.template
new file mode 100644
index 000000000..b2437fd27
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/files/Dockerfile.template
@@ -0,0 +1,12 @@
+
+FROM public.ecr.aws/lambda/nodejs:22
+
+WORKDIR ${LAMBDA_TASK_ROOT}
+
+RUN printf '%s' '{"private":true,"dependencies":{"prisma":"<%= prismaVersion %>"}}' > package.json && npm install --omit=dev
+
+COPY index.js ./index.js
+COPY prisma ./prisma
+COPY prisma.config.ts ./prisma.config.ts
+
+CMD ["index.handler"]
diff --git a/packages/nx-plugin/src/ts/rdb/files/lib/prisma.ts.template b/packages/nx-plugin/src/ts/rdb/files/lib/prisma.ts.template
new file mode 100644
index 000000000..bd44ca229
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/files/lib/prisma.ts.template
@@ -0,0 +1,17 @@
+import { <%= prismaAdapterClassName %> } from '<%= prismaAdapterPackage %>';
+import { PrismaClient } from '../generated/prisma/client.js';
+
+const adapter = new <%= prismaAdapterClassName %>({
+<%_ if (engine === 'MySQL') { _%>
+ host: process.env.DATABASE_HOST,
+ user: process.env.DATABASE_USER,
+ password: process.env.DATABASE_PASSWORD,
+ database: process.env.DATABASE_NAME,
+ connectionLimit: 5,
+<%_ } else { _%>
+ connectionString: `${process.env.DATABASE_URL}`,
+<%_ } _%>
+});
+const prisma = new PrismaClient({ adapter });
+
+export { prisma };
diff --git a/packages/nx-plugin/src/ts/rdb/files/prisma.config.ts.template b/packages/nx-plugin/src/ts/rdb/files/prisma.config.ts.template
new file mode 100644
index 000000000..1ee52cb2d
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/files/prisma.config.ts.template
@@ -0,0 +1,11 @@
+import { defineConfig } from 'prisma/config';
+
+export default defineConfig({
+ schema: 'prisma/schema.prisma',
+ migrations: {
+ path: 'prisma/migrations',
+ },
+ datasource: {
+ url: process.env['DATABASE_URL'],
+ },
+});
diff --git a/packages/nx-plugin/src/ts/rdb/files/prisma/schema.prisma.template b/packages/nx-plugin/src/ts/rdb/files/prisma/schema.prisma.template
new file mode 100644
index 000000000..06612366b
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/files/prisma/schema.prisma.template
@@ -0,0 +1,13 @@
+generator client {
+ provider = "prisma-client"
+ output = "../generated/prisma"
+}
+
+datasource db {
+ provider = "<%= engine === 'MySQL' ? 'mysql' : 'postgresql' %>"
+}
+
+model ExampleTable {
+ id Int @id @default(autoincrement())
+ title String
+}
diff --git a/packages/nx-plugin/src/ts/rdb/files/src/index.ts.template b/packages/nx-plugin/src/ts/rdb/files/src/index.ts.template
new file mode 100644
index 000000000..dd1d92d17
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/files/src/index.ts.template
@@ -0,0 +1 @@
+export { prisma } from '../lib/prisma.js';
diff --git a/packages/nx-plugin/src/ts/rdb/files/src/migration-handler.ts.template b/packages/nx-plugin/src/ts/rdb/files/src/migration-handler.ts.template
new file mode 100644
index 000000000..21576c3a7
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/files/src/migration-handler.ts.template
@@ -0,0 +1,37 @@
+import { execFile } from 'node:child_process';
+import path from 'node:path';
+
+export const handler = async () => {
+ const schemaPath = path.join(__dirname, 'prisma', 'schema.prisma');
+ const prismaCliPath = path.join(
+ __dirname,
+ 'node_modules',
+ 'prisma',
+ 'build',
+ 'index.js',
+ );
+
+ await new Promise((resolve, reject) => {
+ execFile(
+ process.execPath,
+ [prismaCliPath, 'migrate', 'deploy', '--schema', schemaPath],
+ { cwd: __dirname },
+ (error, stdout, stderr) => {
+ if (stdout) {
+ console.log(stdout);
+ }
+
+ if (stderr) {
+ console.error(stderr);
+ }
+
+ if (error) {
+ reject(error);
+ return;
+ }
+
+ resolve();
+ },
+ );
+ });
+};
diff --git a/packages/nx-plugin/src/ts/rdb/generator.spec.ts b/packages/nx-plugin/src/ts/rdb/generator.spec.ts
new file mode 100644
index 000000000..164ccd071
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/generator.spec.ts
@@ -0,0 +1,255 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ */
+import { joinPathFragments, Tree } from '@nx/devkit';
+import { tsRdbGenerator, TS_RDB_GENERATOR_INFO } from './generator';
+import { createTreeUsingTsSolutionSetup } from '../../utils/test';
+import { METRICS_ASPECT_FILE_PATH } from '../../utils/metrics';
+import { sharedConstructsGenerator } from '../../utils/shared-constructs';
+import { readProjectConfigurationUnqualified } from '../../utils/nx';
+
+const expectHasMetricTags = (tree: Tree, ...metrics: string[]) => {
+ const content = tree.read(METRICS_ASPECT_FILE_PATH, 'utf-8');
+ expect(content).toBeTruthy();
+
+ const tagsMatch = content!.match(
+ /const tags:\s*string\[\]\s*=\s*\[([^\]]*)\]/,
+ );
+ expect(tagsMatch).toBeTruthy();
+
+ const tagsContent = tagsMatch![1];
+ const tags = tagsContent
+ ? (tagsContent.match(/'([^']*)'/g)?.map((t) => t.slice(1, -1)) ?? [])
+ : [];
+
+ expect(tags).toEqual(expect.arrayContaining(metrics));
+};
+
+describe('ts#rdb generator', () => {
+ let tree: Tree;
+ beforeEach(() => {
+ tree = createTreeUsingTsSolutionSetup();
+ });
+
+ const defaultOptions = {
+ name: 'db',
+ directory: 'packages',
+ service: 'Aurora' as const,
+ engine: 'Postgres' as const,
+ databaseUser: 'databaseUser',
+ databaseName: 'databaseName',
+ ormFramework: 'Prisma' as const,
+ iacProvider: 'CDK' as const,
+ };
+
+ it('should generate the aurora shared construct', async () => {
+ await tsRdbGenerator(tree, defaultOptions);
+ const packageJson = JSON.parse(tree.read('package.json', 'utf-8') ?? '{}');
+ const projectConfig = readProjectConfigurationUnqualified(tree, '@proj/db');
+
+ expect(
+ tree.exists('packages/common/constructs/src/core/rdb/aurora.ts'),
+ ).toBeTruthy();
+ expect(
+ tree.exists('packages/common/constructs/src/app/dbs/db.ts'),
+ ).toBeTruthy();
+ expect(tree.exists('packages/db/lib/prisma.ts')).toBeTruthy();
+ expect(tree.exists('packages/db/prisma.config.ts')).toBeTruthy();
+ expect(tree.exists('packages/db/prisma/schema.prisma')).toBeTruthy();
+ expect(tree.exists('packages/db/src/index.ts')).toBeTruthy();
+ expect(tree.exists('packages/db/Dockerfile')).toBeTruthy();
+ expect(tree.exists('packages/db/tsconfig.lib.json')).toBeTruthy();
+ expect(tree.exists('packages/db/eslint.config.mjs')).toBeTruthy();
+ expect(tree.exists('packages/db/src/migration-handler.ts')).toBeTruthy();
+ expect(
+ tree.read('packages/common/constructs/src/core/rdb/aurora.ts', 'utf-8'),
+ ).toContain('export class Aurora');
+ expect(
+ tree.read('packages/common/constructs/src/app/dbs/db.ts', 'utf-8'),
+ ).toContain('export class Db');
+ expect(
+ tree.read('packages/common/constructs/src/app/dbs/db.ts', 'utf-8'),
+ ).toContain('const { writer, vpc, ...restProps } = props;');
+ expect(
+ tree.read('packages/common/constructs/src/app/dbs/db.ts', 'utf-8'),
+ ).toContain("databaseName: 'databaseName'");
+ expect(
+ tree.read('packages/common/constructs/src/app/dbs/db.ts', 'utf-8'),
+ ).toContain("databaseUser: 'databaseUser'");
+ expect(tree.read('packages/db/lib/prisma.ts', 'utf-8')).toContain(
+ "import { PrismaPg } from '@prisma/adapter-pg';",
+ );
+ expect(tree.read('packages/db/lib/prisma.ts', 'utf-8')).toContain(
+ 'connectionString: `${process.env.DATABASE_URL}`',
+ );
+ expect(tree.read('packages/db/prisma.config.ts', 'utf-8')).toContain(
+ "import { defineConfig } from 'prisma/config';",
+ );
+ expect(tree.read('packages/db/prisma/schema.prisma', 'utf-8')).toContain(
+ 'model ExampleTable',
+ );
+ expect(tree.read('packages/db/prisma/schema.prisma', 'utf-8')).toContain(
+ 'title String',
+ );
+ expect(tree.read('packages/db/src/index.ts', 'utf-8')?.trim()).toBe(
+ "export { prisma } from '../lib/prisma.js';",
+ );
+ expect(tree.read('packages/db/Dockerfile', 'utf-8')).toContain(
+ '"prisma":"7.6.0"',
+ );
+ expect(
+ JSON.parse(tree.read('packages/db/tsconfig.lib.json', 'utf-8') ?? '{}')
+ .include,
+ ).toEqual(['src/**/*.ts', 'lib/**/*.ts', 'generated/prisma/**/*.ts']);
+ expect(tree.read('packages/db/eslint.config.mjs', 'utf-8')).toContain(
+ "'**/generated/**'",
+ );
+ expect(tree.read('packages/db/eslint.config.mjs', 'utf-8')).toContain(
+ "'**/out-tsc'",
+ );
+ expect(
+ tree.read('packages/common/constructs/src/app/dbs/db.ts', 'utf-8'),
+ ).toContain('../../../../../../dist/packages/db/bundle/migration');
+ expect(
+ tree.read('packages/common/constructs/src/core/index.ts', 'utf-8'),
+ ).toContain('./rdb/aurora.js');
+ expect(
+ tree.read('packages/common/constructs/src/app/index.ts', 'utf-8'),
+ ).toContain('./dbs/index.js');
+ expect(
+ tree.read('packages/common/constructs/src/app/dbs/index.ts', 'utf-8'),
+ ).toContain('./db.js');
+ expect(tree.read('packages/db/rolldown.config.ts', 'utf-8')).toContain(
+ "input: 'src/migration-handler.ts'",
+ );
+ expect(tree.read('packages/db/rolldown.config.ts', 'utf-8')).toContain(
+ "file: '../../dist/packages/db/bundle/migration/index.js'",
+ );
+ expect(projectConfig.targets.bundle).toEqual({
+ cache: true,
+ outputs: ['{workspaceRoot}/dist/{projectRoot}/bundle'],
+ executor: 'nx:run-commands',
+ options: {
+ command: 'rolldown -c rolldown.config.ts',
+ cwd: '{projectRoot}',
+ },
+ dependsOn: ['compile', 'bundle-migration'],
+ });
+ expect(projectConfig.targets['bundle-migration']).toEqual({
+ cache: true,
+ executor: 'nx:run-commands',
+ outputs: ['{workspaceRoot}/dist/{projectRoot}/bundle/migration'],
+ options: {
+ commands: [
+ 'rimraf ../../dist/{projectRoot}/bundle/migration',
+ 'make-dir ../../dist/{projectRoot}/bundle/migration',
+ 'ncp prisma ../../dist/{projectRoot}/bundle/migration/prisma',
+ 'ncp prisma.config.ts ../../dist/{projectRoot}/bundle/migration/prisma.config.ts',
+ 'ncp Dockerfile ../../dist/{projectRoot}/bundle/migration/Dockerfile',
+ ],
+ cwd: '{projectRoot}',
+ parallel: false,
+ },
+ });
+ expect(projectConfig.targets.generate).toEqual({
+ executor: 'nx:run-commands',
+ outputs: ['{projectRoot}/generated/prisma'],
+ options: {
+ command: 'prisma generate',
+ cwd: '{projectRoot}',
+ },
+ });
+ expect(projectConfig.targets.build.dependsOn).toContain('bundle');
+ expect(projectConfig.targets.build.dependsOn).not.toContain(
+ 'bundle-migration',
+ );
+ expect(projectConfig.targets.compile.dependsOn).toContain('generate');
+ expect(packageJson.dependencies['@prisma/adapter-pg']).toBe('7.6.0');
+ expect(packageJson.dependencies['@prisma/client']).toBe('7.6.0');
+ expect(packageJson.dependencies.pg).toBe('8.20.0');
+ expect(packageJson.dependencies['@prisma/adapter-mariadb']).toBeUndefined();
+ expect(packageJson.devDependencies.prisma).toBe('7.6.0');
+ expect(packageJson.devDependencies.ncp).toBe('2.0.0');
+ expect(packageJson.devDependencies.rimraf).toBe('6.1.3');
+ expect(packageJson.devDependencies['make-dir-cli']).toBe('4.0.0');
+ });
+
+ it('should add mysql prisma dependencies when engine is MySQL', async () => {
+ await tsRdbGenerator(tree, {
+ ...defaultOptions,
+ engine: 'MySQL',
+ });
+ const packageJson = JSON.parse(tree.read('package.json', 'utf-8') ?? '{}');
+ const prismaFile = tree.read('packages/db/lib/prisma.ts', 'utf-8');
+ const prismaSchema = tree.read('packages/db/prisma/schema.prisma', 'utf-8');
+
+ expect(packageJson.dependencies['@prisma/adapter-mariadb']).toBe('7.6.0');
+ expect(packageJson.dependencies['@prisma/client']).toBe('7.6.0');
+ expect(packageJson.dependencies['@prisma/adapter-pg']).toBeUndefined();
+ expect(packageJson.dependencies.pg).toBeUndefined();
+ expect(packageJson.devDependencies.prisma).toBe('7.6.0');
+ expect(packageJson.devDependencies.ncp).toBe('2.0.0');
+ expect(packageJson.devDependencies.rimraf).toBe('6.1.3');
+ expect(packageJson.devDependencies['make-dir-cli']).toBe('4.0.0');
+ expect(prismaFile).toContain(
+ "import { PrismaMariaDb } from '@prisma/adapter-mariadb';",
+ );
+ expect(prismaFile).toContain('host: process.env.DATABASE_HOST');
+ expect(prismaFile).toContain('user: process.env.DATABASE_USER');
+ expect(prismaFile).toContain('password: process.env.DATABASE_PASSWORD');
+ expect(prismaFile).toContain('database: process.env.DATABASE_NAME');
+ expect(prismaFile).toContain('connectionLimit: 5');
+ expect(prismaSchema).toContain('provider = "mysql"');
+ });
+
+ it('should generate terraform modules when iacProvider is Terraform', async () => {
+ await tsRdbGenerator(tree, {
+ ...defaultOptions,
+ iacProvider: 'Terraform',
+ });
+
+ expect(
+ tree.exists('packages/common/terraform/src/core/rdb/aurora/aurora.tf'),
+ ).toBeTruthy();
+ expect(
+ tree.exists('packages/common/terraform/src/app/dbs/db/db.tf'),
+ ).toBeTruthy();
+ expect(
+ tree.read(
+ 'packages/common/terraform/src/app/dbs/db/db.tf',
+ 'utf-8',
+ ),
+ ).toContain('source = "../../../core/rdb/aurora"');
+ expect(
+ tree.read(
+ 'packages/common/terraform/src/app/dbs/db/db.tf',
+ 'utf-8',
+ ),
+ ).toContain('../../../../../../../dist/packages/db/bundle/migration');
+ });
+
+ it('should keep an existing aurora shared construct', async () => {
+ await sharedConstructsGenerator(tree, { iacProvider: 'CDK' });
+ tree.write(
+ 'packages/common/constructs/src/core/rdb/aurora.ts',
+ '// preserve custom aurora construct',
+ );
+
+ await tsRdbGenerator(tree, defaultOptions);
+
+ expect(
+ tree
+ .read('packages/common/constructs/src/core/rdb/aurora.ts', 'utf-8')
+ ?.trim(),
+ ).toBe('// preserve custom aurora construct');
+ });
+
+ it('should add generator metric to app.ts', async () => {
+ await sharedConstructsGenerator(tree, { iacProvider: 'CDK' });
+
+ await tsRdbGenerator(tree, defaultOptions);
+
+ expectHasMetricTags(tree, TS_RDB_GENERATOR_INFO.metric);
+ });
+});
diff --git a/packages/nx-plugin/src/ts/rdb/generator.ts b/packages/nx-plugin/src/ts/rdb/generator.ts
new file mode 100644
index 000000000..65432a9ad
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/generator.ts
@@ -0,0 +1,167 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ */
+import {
+ addDependenciesToPackageJson,
+ GeneratorCallback,
+ Tree,
+ generateFiles,
+ installPackagesTask,
+ joinPathFragments,
+ readProjectConfiguration,
+ updateJson,
+ updateProjectConfiguration,
+} from '@nx/devkit';
+import { TsRdbGeneratorSchema } from './schema';
+import {
+ addDependencyToTargetIfNotPresent,
+ NxGeneratorInfo,
+ getGeneratorInfo,
+} from '../../utils/nx';
+import { addGeneratorMetricsIfApplicable } from '../../utils/metrics';
+import { formatFilesInSubtree } from '../../utils/format';
+import { sharedConstructsGenerator } from '../../utils/shared-constructs';
+import { addRdbInfra } from '../../utils/rdb-constructs/rdb-constructs';
+import { toClassName, toKebabCase } from '../../utils/names';
+import { FsCommands } from '../../utils/fs';
+import { getRelativePathToRootByDirectory } from '../../utils/paths';
+import tsProjectGenerator, { getTsLibDetails } from '../lib/generator';
+import { addIgnoresToEslintConfig } from '../lib/eslint';
+import { addTypeScriptBundleTarget } from '../../utils/bundle/bundle';
+import { TS_VERSIONS, withVersions } from '../../utils/versions';
+import { resolveIacProvider } from '../../utils/iac';
+
+export const TS_RDB_GENERATOR_INFO: NxGeneratorInfo =
+ getGeneratorInfo(__filename);
+
+export const tsRdbGenerator = async (
+ tree: Tree,
+ options: TsRdbGeneratorSchema,
+): Promise => {
+ const nameKebabCase = toKebabCase(options.name) ?? options.name;
+ const iacProvider = await resolveIacProvider(tree, options.iacProvider);
+ const { fullyQualifiedName, dir } = getTsLibDetails(tree, {
+ name: options.name,
+ directory: options.directory,
+ });
+
+ await tsProjectGenerator(tree, {
+ name: options.name,
+ directory: options.directory,
+ });
+
+ updateJson(tree, joinPathFragments(dir, 'tsconfig.lib.json'), (tsConfig) => ({
+ ...tsConfig,
+ include: ['src/**/*.ts', 'lib/**/*.ts', 'generated/prisma/**/*.ts'],
+ }));
+ await addIgnoresToEslintConfig(
+ tree,
+ joinPathFragments(dir, 'eslint.config.mjs'),
+ ['**/generated/**', '**/out-tsc'],
+ );
+
+ const templateOptions = {
+ engine: options.engine,
+ prismaVersion: TS_VERSIONS.prisma,
+ prismaAdapterPackage:
+ options.engine === 'MySQL'
+ ? '@prisma/adapter-mariadb'
+ : '@prisma/adapter-pg',
+ prismaAdapterClassName:
+ options.engine === 'MySQL' ? 'PrismaMariaDb' : 'PrismaPg',
+ };
+
+ generateFiles(
+ tree,
+ joinPathFragments(__dirname, 'files'),
+ dir,
+ templateOptions,
+ );
+
+ const projectConfig = readProjectConfiguration(tree, fullyQualifiedName);
+ const relativePathToRoot = getRelativePathToRootByDirectory(
+ projectConfig.root,
+ );
+ const fs = new FsCommands(tree);
+ await addTypeScriptBundleTarget(tree, projectConfig, {
+ targetFilePath: 'src/migration-handler.ts',
+ bundleOutputDir: 'migration',
+ });
+ projectConfig.targets['bundle-migration'] = {
+ cache: true,
+ executor: 'nx:run-commands',
+ outputs: ['{workspaceRoot}/dist/{projectRoot}/bundle/migration'],
+ options: {
+ commands: [
+ fs.rm(`${relativePathToRoot}dist/{projectRoot}/bundle/migration`),
+ fs.mkdir(`${relativePathToRoot}dist/{projectRoot}/bundle/migration`),
+ fs.cp(
+ 'prisma',
+ `${relativePathToRoot}dist/{projectRoot}/bundle/migration/prisma`,
+ ),
+ fs.cp(
+ 'prisma.config.ts',
+ `${relativePathToRoot}dist/{projectRoot}/bundle/migration/prisma.config.ts`,
+ ),
+ fs.cp(
+ 'Dockerfile',
+ `${relativePathToRoot}dist/{projectRoot}/bundle/migration/Dockerfile`,
+ ),
+ ],
+ cwd: '{projectRoot}',
+ parallel: false,
+ },
+ };
+ addDependencyToTargetIfNotPresent(
+ projectConfig,
+ 'bundle',
+ 'bundle-migration',
+ );
+ projectConfig.targets.generate = {
+ executor: 'nx:run-commands',
+ outputs: ['{projectRoot}/generated/prisma'],
+ options: {
+ command: 'prisma generate',
+ cwd: '{projectRoot}',
+ },
+ };
+ addDependencyToTargetIfNotPresent(projectConfig, 'compile', 'generate');
+ updateProjectConfiguration(tree, fullyQualifiedName, projectConfig);
+
+ await sharedConstructsGenerator(tree, { iacProvider });
+ await addRdbInfra(tree, {
+ iacProvider,
+ nameClassName: toClassName(options.name),
+ nameKebabCase,
+ databaseName: options.databaseName,
+ databaseUser: options.databaseUser,
+ engine: options.engine === 'MySQL' ? 'mysql' : 'postgres',
+ migrationBundlePathFromRoot: joinPathFragments(
+ 'dist',
+ projectConfig.root,
+ 'bundle',
+ 'migration',
+ ),
+ });
+
+ const runtimeDependencies =
+ options.engine === 'MySQL'
+ ? withVersions(['@prisma/client', '@prisma/adapter-mariadb'])
+ : withVersions(['@prisma/client', '@prisma/adapter-pg', 'pg']);
+
+ addDependenciesToPackageJson(
+ tree,
+ runtimeDependencies,
+ withVersions(['prisma']),
+ );
+
+ await addGeneratorMetricsIfApplicable(tree, [TS_RDB_GENERATOR_INFO]);
+
+ await formatFilesInSubtree(tree);
+ return () => {
+ installPackagesTask(tree);
+ };
+};
+
+export default tsRdbGenerator;
diff --git a/packages/nx-plugin/src/ts/rdb/schema.d.ts b/packages/nx-plugin/src/ts/rdb/schema.d.ts
new file mode 100644
index 000000000..71b456fe0
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/schema.d.ts
@@ -0,0 +1,16 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ */
+import { IacProviderOption } from '../../utils/iac';
+
+export interface TsRdbGeneratorSchema {
+ name: string;
+ directory?: string;
+ service: 'Aurora';
+ engine: 'Postgres' | 'MySQL';
+ databaseUser: string;
+ databaseName: string;
+ ormFramework: 'Prisma';
+ iacProvider: IacProviderOption;
+}
diff --git a/packages/nx-plugin/src/ts/rdb/schema.json b/packages/nx-plugin/src/ts/rdb/schema.json
new file mode 100644
index 000000000..3edd496fe
--- /dev/null
+++ b/packages/nx-plugin/src/ts/rdb/schema.json
@@ -0,0 +1,78 @@
+{
+ "$schema": "https://json-schema.org/schema",
+ "$id": "ts#rdb",
+ "title": "ts#rdb",
+ "description": "Create a relational database project",
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name of the database project to generate",
+ "$default": {
+ "$source": "argv",
+ "index": 0
+ },
+ "x-priority": "important",
+ "x-prompt": "What name would you like your database project to have? i.e: MyDb"
+ },
+ "directory": {
+ "type": "string",
+ "description": "The directory to store the application in.",
+ "default": "packages",
+ "x-priority": "important",
+ "x-prompt": "Which directory do you want to create the project in?"
+ },
+ "service": {
+ "type": "string",
+ "enum": ["Aurora"],
+ "default": "Aurora",
+ "description": "Relational database service to provision.",
+ "x-priority": "important",
+ "x-prompt": "Which database service would you like to use?"
+ },
+ "engine": {
+ "type": "string",
+ "enum": ["Postgres", "MySQL"],
+ "default": "Postgres",
+ "description": "Database engine to use with the selected service.",
+ "x-priority": "important",
+ "x-prompt": "Which database engine would you like to use?"
+ },
+ "databaseUser": {
+ "type": "string",
+ "description": "Database admin username.",
+ "x-priority": "important",
+ "x-prompt": "What database username would you like to use?"
+ },
+ "databaseName": {
+ "type": "string",
+ "description": "Initial database name.",
+ "x-priority": "important",
+ "x-prompt": "What database name would you like to use?"
+ },
+ "ormFramework": {
+ "type": "string",
+ "enum": ["Prisma"],
+ "default": "Prisma",
+ "description": "ORM framework to use for the generated project.",
+ "x-priority": "important",
+ "x-prompt": "Which ORM framework would you like to use?"
+ },
+ "iacProvider": {
+ "type": "string",
+ "description": "The preferred IaC provider. By default this is inherited from your initial selection.",
+ "enum": ["Inherit", "CDK", "Terraform"],
+ "x-priority": "important",
+ "default": "Inherit",
+ "x-prompt": "Which provider would you like to manage your infrastructure? (default: Inherit)"
+ }
+ },
+ "required": [
+ "name",
+ "service",
+ "engine",
+ "databaseUser",
+ "databaseName",
+ "ormFramework"
+ ]
+}
diff --git a/packages/nx-plugin/src/utils/rdb-constructs/files/cdk/app/dbs/__nameKebabCase__.ts.template b/packages/nx-plugin/src/utils/rdb-constructs/files/cdk/app/dbs/__nameKebabCase__.ts.template
new file mode 100644
index 000000000..02ed1f288
--- /dev/null
+++ b/packages/nx-plugin/src/utils/rdb-constructs/files/cdk/app/dbs/__nameKebabCase__.ts.template
@@ -0,0 +1,77 @@
+import * as url from 'url';
+import { Duration } from 'aws-cdk-lib';
+import { Platform } from 'aws-cdk-lib/aws-ecr-assets';
+import {
+ Architecture,
+ DockerImageCode,
+ DockerImageFunction,
+ Tracing,
+} from 'aws-cdk-lib/aws-lambda';
+import { ClusterInstance } from 'aws-cdk-lib/aws-rds';
+import { Construct } from 'constructs';
+import {
+ AuroraDatabase,
+ AuroraDatabaseEngines,
+ AuroraDatabaseProps,
+} from '../../core/rdb/aurora.js';
+import { Trigger } from 'aws-cdk-lib/triggers';
+import { SubnetType } from 'aws-cdk-lib/aws-ec2';
+
+export type <%= nameClassName %>Props = Omit<
+ AuroraDatabaseProps,
+ 'databaseName' | 'databaseUser' | 'engine'
+>;
+
+/**
+ * CDK construct that provisions an Aurora Serverless v2 cluster.
+ */
+export class <%= nameClassName %> extends AuroraDatabase {
+ constructor(scope: Construct, id: string, props: <%= nameClassName %>Props) {
+ const { writer, vpc, ...restProps } = props;
+
+ super(scope, id, {
+ ...restProps,
+ vpc,
+ databaseName: '<%= databaseName %>',
+ databaseUser: '<%= databaseUser %>',
+ writer: writer ?? ClusterInstance.serverlessV2('writer'),
+ engine: AuroraDatabaseEngines.<%= engine %>({}),
+ });
+
+ const migrationHandler = new DockerImageFunction(this, 'MigrationHandler', {
+ code: DockerImageCode.fromImageAsset(
+ url.fileURLToPath(
+ new URL(
+ '../../../../../../<%- migrationBundlePathFromRoot %>',
+ import.meta.url,
+ ),
+ ),
+ {
+ platform: Platform.LINUX_ARM64,
+ },
+ ),
+ memorySize: 1024,
+ timeout: Duration.minutes(5),
+ tracing: Tracing.ACTIVE,
+ vpc,
+ vpcSubnets: {
+ subnetType: SubnetType.PRIVATE_ISOLATED,
+ },
+ environment: {
+ DATABASE_URL: this.databaseUrl,
+ },
+ architecture: Architecture.ARM_64,
+ });
+
+ this.allowDefaultPortFrom(
+ migrationHandler,
+ 'Allow the migration handler to connect to the database',
+ );
+
+ const trigger = new Trigger(this, 'MigrationTrigger', {
+ handler: migrationHandler,
+ });
+ // make sure migration is executed after the database cluster is available.
+ trigger.node.addDependency(this.cluster);
+ }
+}
diff --git a/packages/nx-plugin/src/utils/rdb-constructs/files/cdk/core/rdb/aurora.ts.template b/packages/nx-plugin/src/utils/rdb-constructs/files/cdk/core/rdb/aurora.ts.template
new file mode 100644
index 000000000..647c44fb4
--- /dev/null
+++ b/packages/nx-plugin/src/utils/rdb-constructs/files/cdk/core/rdb/aurora.ts.template
@@ -0,0 +1,199 @@
+import { Duration } from 'aws-cdk-lib';
+import { IConnectable, IVpc } from 'aws-cdk-lib/aws-ec2';
+import { IGrantable } from 'aws-cdk-lib/aws-iam';
+import { Key } from 'aws-cdk-lib/aws-kms';
+import {
+ AuroraMysqlEngineVersion,
+ AuroraPostgresEngineVersion,
+ Credentials,
+ DatabaseCluster,
+ DatabaseClusterEngine,
+ DatabaseClusterProps,
+ IClusterEngine,
+} from 'aws-cdk-lib/aws-rds';
+import { ISecret } from 'aws-cdk-lib/aws-secretsmanager';
+import { Construct } from 'constructs';
+import { suppressRules } from '../checkov.js';
+
+export type AuroraDatabaseEngineVersion =
+ | AuroraMysqlEngineVersion
+ | AuroraPostgresEngineVersion;
+
+export interface AuroraDatabaseEngine {
+ /**
+ * Default Aurora engine version used when one is not explicitly provided.
+ */
+ readonly defaultVersion: AuroraDatabaseEngineVersion;
+
+ /**
+ * Connection string scheme used when generating the database URL.
+ */
+ readonly databaseUrlScheme: string;
+
+ /**
+ * Default query parameters appended to the generated database URL.
+ */
+ readonly defaultDatabaseUrlParameters?: Readonly>;
+
+ /**
+ * Builds the CDK cluster engine for the provided engine version.
+ */
+ clusterEngine(version: AuroraDatabaseEngineVersion): IClusterEngine;
+}
+
+export class AuroraDatabaseEngines {
+ public static mysql({
+ defaultVersion = AuroraMysqlEngineVersion.VER_3_12_0,
+ }: {
+ defaultVersion?: AuroraMysqlEngineVersion;
+ }): AuroraDatabaseEngine {
+ return {
+ defaultVersion,
+ databaseUrlScheme: 'mysql',
+ clusterEngine: (version) =>
+ DatabaseClusterEngine.auroraMysql({
+ version: version as AuroraMysqlEngineVersion,
+ }),
+ };
+ }
+
+ public static postgres({
+ defaultVersion = AuroraPostgresEngineVersion.VER_17_7,
+ }: {
+ defaultVersion?: AuroraPostgresEngineVersion;
+ }): AuroraDatabaseEngine {
+ return {
+ defaultVersion,
+ databaseUrlScheme: 'postgresql',
+ defaultDatabaseUrlParameters: {
+ pool_timeout: '20',
+ connect_timeout: '20',
+ sslmode: 'no-verify',
+ },
+ clusterEngine: (version) =>
+ DatabaseClusterEngine.auroraPostgres({
+ version: version as AuroraPostgresEngineVersion,
+ }),
+ };
+ }
+}
+
+export interface AuroraDatabaseProps extends Omit<
+ DatabaseClusterProps,
+ 'credentials' | 'defaultDatabaseName' | 'engine'
+> {
+ /**
+ * VPC where the Aurora cluster will be deployed.
+ */
+ readonly vpc: IVpc;
+
+ /**
+ * Aurora engine preset used to build the cluster.
+ */
+ readonly engine: AuroraDatabaseEngine;
+
+ /**
+ * The engine version to deploy.
+ *
+ * @default - engine.defaultVersion
+ */
+ readonly engineVersion?: AuroraDatabaseEngineVersion;
+
+ /**
+ * Admin username used when generating credentials.
+ */
+ readonly databaseUser: string;
+
+ /**
+ * The initial database created in the cluster.
+ */
+ readonly databaseName: string;
+
+ /**
+ * Additional query parameters appended to the generated database URL.
+ *
+ * Values override any engine defaults with the same key.
+ */
+ readonly databaseUrlQueryParameters?: Readonly>;
+}
+
+/**
+ * Reusable Aurora database construct that supports different Aurora engines
+ * through typed engine presets.
+ */
+export class AuroraDatabase extends Construct {
+ public readonly cluster: DatabaseCluster;
+ public readonly databaseHost: string;
+ public readonly databaseName: string;
+ public readonly databasePassword: string;
+ public readonly databaseUser: string;
+ public readonly databaseUrl: string;
+ public readonly secret: ISecret;
+
+ constructor(
+ scope: Construct,
+ id: string,
+ {
+ databaseUser,
+ databaseName,
+ databaseUrlQueryParameters,
+ engine,
+ engineVersion,
+ ...clusterProps
+ }: AuroraDatabaseProps,
+ ) {
+ super(scope, id);
+
+ const key = new Key(this, 'EncryptionKey', {
+ enableKeyRotation: true,
+ });
+ this.cluster = new DatabaseCluster(this, 'DatabaseCluster', {
+ ...clusterProps,
+ engine: engine.clusterEngine(engineVersion ?? engine.defaultVersion),
+ credentials: Credentials.fromGeneratedSecret(databaseUser, {
+ encryptionKey: key,
+ }),
+ monitoringInterval: Duration.seconds(5),
+ defaultDatabaseName: databaseName,
+ storageEncrypted: true,
+ storageEncryptionKey: key,
+ });
+
+ suppressRules(
+ this.cluster,
+ ['CKV_AWS_162'],
+ 'IAM authentication is disabled because ORM frameworks use Secrets Manager credentials',
+ );
+
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
+ this.secret = this.cluster.secret!;
+ this.databaseHost = this.cluster.clusterEndpoint.hostname;
+ this.databaseName = databaseName;
+ this.databasePassword =
+ this.secret.secretValueFromJson('password').unsafeUnwrap();
+ this.databaseUser = this.secret.secretValueFromJson('username').unsafeUnwrap();
+
+ const queryParameters = new URLSearchParams({
+ ...engine.defaultDatabaseUrlParameters,
+ ...databaseUrlQueryParameters,
+ });
+ const queryString = queryParameters.toString();
+
+ this.databaseUrl =
+ `${engine.databaseUrlScheme}://` +
+ `${this.databaseUser}` +
+ `:${this.databasePassword}` +
+ `@${this.databaseHost}` +
+ `:${this.cluster.clusterEndpoint.port}` +
+ `/${this.databaseName}` +
+ (queryString ? `?${queryString}` : '');
+ }
+
+ public allowDefaultPortFrom(other: IConnectable, description?: string): void {
+ this.cluster.connections.allowDefaultPortFrom(other, description);
+ }
+
+ public grantSecretRead(grantee: IGrantable) {
+ return this.secret.grantRead(grantee);
+ }
+}
diff --git a/packages/nx-plugin/src/utils/rdb-constructs/files/terraform/app/dbs/__nameKebabCase__/__nameKebabCase__.tf.template b/packages/nx-plugin/src/utils/rdb-constructs/files/terraform/app/dbs/__nameKebabCase__/__nameKebabCase__.tf.template
new file mode 100644
index 000000000..66adbc3d5
--- /dev/null
+++ b/packages/nx-plugin/src/utils/rdb-constructs/files/terraform/app/dbs/__nameKebabCase__/__nameKebabCase__.tf.template
@@ -0,0 +1,346 @@
+terraform {
+ required_version = ">= 1.0"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 6.33"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "~> 3.6"
+ }
+ }
+}
+
+variable "vpc_id" {
+ description = "VPC where the Aurora cluster will be deployed."
+ type = string
+}
+
+variable "subnet_ids" {
+ description = "Private subnet IDs used by the Aurora DB subnet group and migration Lambda."
+ type = list(string)
+}
+
+variable "engine_version" {
+ description = "Aurora engine version."
+ type = string
+ default = null
+}
+
+variable "database_url_query_parameters" {
+ description = "Additional query parameters appended to the generated database URL."
+ type = map(string)
+ default = {}
+}
+
+variable "port" {
+ description = "Database port for the selected Aurora engine."
+ type = number
+ default = null
+}
+
+variable "serverless_min_capacity" {
+ description = "Minimum Aurora Serverless v2 ACUs."
+ type = number
+ default = 0.5
+}
+
+variable "serverless_max_capacity" {
+ description = "Maximum Aurora Serverless v2 ACUs."
+ type = number
+ default = 4
+}
+
+variable "instance_count" {
+ description = "Number of Aurora instances to create."
+ type = number
+ default = 1
+}
+
+variable "deletion_protection" {
+ description = "Whether deletion protection is enabled."
+ type = bool
+ default = true
+}
+
+variable "skip_final_snapshot" {
+ description = "Whether to skip the final snapshot on deletion."
+ type = bool
+ default = false
+}
+
+variable "tags" {
+ description = "Tags to apply to all resources."
+ type = map(string)
+ default = {}
+}
+
+data "aws_region" "current" {}
+data "aws_caller_identity" "current" {}
+
+resource "random_string" "suffix" {
+ length = 8
+ special = false
+ upper = false
+}
+
+locals {
+ migration_bundle_path = "${path.module}/../../../../../../../<%- migrationBundlePathFromRoot %>"
+ migration_bundle_hash = sha256(join("", [
+ for file in sort(fileset(local.migration_bundle_path, "**")) :
+ "${file}:${filesha256("${local.migration_bundle_path}/${file}")}"
+ ]))
+ migration_function_name = "<%= nameKebabCase %>-migration-${random_string.suffix.result}"
+}
+
+module "aurora" {
+ source = "../../../core/rdb/aurora"
+
+ name = "<%= nameKebabCase %>"
+ vpc_id = var.vpc_id
+ subnet_ids = var.subnet_ids
+ engine = "aurora-<%= engine %>"
+ engine_version = var.engine_version
+ database_name = "<%= databaseName %>"
+ database_user = "<%= databaseUser %>"
+ database_url_query_parameters = var.database_url_query_parameters
+ port = var.port
+ serverless_min_capacity = var.serverless_min_capacity
+ serverless_max_capacity = var.serverless_max_capacity
+ instance_count = var.instance_count
+ deletion_protection = var.deletion_protection
+ skip_final_snapshot = var.skip_final_snapshot
+ tags = var.tags
+}
+
+resource "aws_ecr_repository" "migration_handler" {
+ #checkov:skip=CKV_AWS_136:AES256 encryption is sufficient for ECR repositories
+ name = "<%= nameKebabCase %>-migration-${random_string.suffix.result}"
+ image_tag_mutability = "MUTABLE"
+ force_delete = true
+
+ image_scanning_configuration {
+ scan_on_push = true
+ }
+
+ tags = var.tags
+}
+
+resource "aws_ecr_repository_policy" "migration_handler" {
+ repository = aws_ecr_repository.migration_handler.name
+
+ policy = jsonencode({
+ Version = "2012-10-17"
+ Statement = [
+ {
+ Sid = "AllowPushPull"
+ Effect = "Allow"
+ Principal = {
+ AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"
+ }
+ Action = [
+ "ecr:BatchCheckLayerAvailability",
+ "ecr:CompleteLayerUpload",
+ "ecr:GetDownloadUrlForLayer",
+ "ecr:InitiateLayerUpload",
+ "ecr:PutImage",
+ "ecr:UploadLayerPart"
+ ]
+ },
+ {
+ Sid = "AllowLambdaPull"
+ Effect = "Allow"
+ Principal = {
+ Service = "lambda.amazonaws.com"
+ }
+ Action = [
+ "ecr:BatchCheckLayerAvailability",
+ "ecr:BatchGetImage",
+ "ecr:GetDownloadUrlForLayer"
+ ]
+ }
+ ]
+ })
+}
+
+resource "null_resource" "docker_publish" {
+ triggers = {
+ bundle_hash = local.migration_bundle_hash
+ repository_url = aws_ecr_repository.migration_handler.repository_url
+ }
+
+ provisioner "local-exec" {
+ command = <<-EOT
+ aws ecr get-login-password --region ${data.aws_region.current.name} | docker login --username AWS --password-stdin ${self.triggers.repository_url}
+ docker build --platform linux/arm64 -t ${self.triggers.repository_url}:${self.triggers.bundle_hash} ${local.migration_bundle_path}
+ docker push ${self.triggers.repository_url}:${self.triggers.bundle_hash}
+ EOT
+ }
+
+ depends_on = [aws_ecr_repository_policy.migration_handler]
+}
+
+resource "aws_iam_role" "migration_handler" {
+ name = "<%= nameClassName %>MigrationHandlerRole-${random_string.suffix.result}"
+
+ assume_role_policy = jsonencode({
+ Version = "2012-10-17"
+ Statement = [
+ {
+ Effect = "Allow"
+ Principal = {
+ Service = "lambda.amazonaws.com"
+ }
+ Action = "sts:AssumeRole"
+ }
+ ]
+ })
+
+ tags = var.tags
+}
+
+resource "aws_iam_role_policy_attachment" "migration_handler_basic_execution" {
+ role = aws_iam_role.migration_handler.name
+ policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
+}
+
+resource "aws_iam_role_policy_attachment" "migration_handler_vpc_access" {
+ role = aws_iam_role.migration_handler.name
+ policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole"
+}
+
+resource "aws_iam_role_policy_attachment" "migration_handler_xray" {
+ role = aws_iam_role.migration_handler.name
+ policy_arn = "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess"
+}
+
+resource "aws_cloudwatch_log_group" "migration_handler" {
+ #checkov:skip=CKV_AWS_158:Using default CloudWatch log encryption
+ #checkov:skip=CKV_AWS_338:Log retention set to forever
+ #checkov:skip=CKV_AWS_66:Log retention set to forever
+ name = "/aws/lambda/${local.migration_function_name}"
+ tags = var.tags
+}
+
+resource "aws_security_group" "migration_handler" {
+ name_prefix = "<%= nameKebabCase %>-migration-"
+ description = "Security group for the migration Lambda function"
+ vpc_id = var.vpc_id
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = var.tags
+}
+
+resource "aws_vpc_security_group_ingress_rule" "migration_handler_to_database" {
+ security_group_id = module.aurora.security_group_id
+ referenced_security_group_id = aws_security_group.migration_handler.id
+ from_port = module.aurora.cluster_port
+ to_port = module.aurora.cluster_port
+ ip_protocol = "tcp"
+}
+
+resource "aws_lambda_function" "migration_handler" {
+ #checkov:skip=CKV_AWS_116:Dead Letter Queue not required for the migration handler
+ #checkov:skip=CKV_AWS_115:Concurrent execution limit not required for the migration handler
+ #checkov:skip=CKV_AWS_173:Lambda environment variables encrypted by managed key
+ package_type = "Image"
+ function_name = local.migration_function_name
+ role = aws_iam_role.migration_handler.arn
+ image_uri = "${aws_ecr_repository.migration_handler.repository_url}:${local.migration_bundle_hash}"
+ memory_size = 1024
+ timeout = 300
+ architectures = ["arm64"]
+
+ tracing_config {
+ mode = "Active"
+ }
+
+ vpc_config {
+ subnet_ids = var.subnet_ids
+ security_group_ids = [aws_security_group.migration_handler.id]
+ }
+
+ environment {
+ variables = {
+ DATABASE_URL = module.aurora.database_url
+ }
+ }
+
+ tags = var.tags
+
+ depends_on = [
+ null_resource.docker_publish,
+ aws_iam_role_policy_attachment.migration_handler_basic_execution,
+ aws_iam_role_policy_attachment.migration_handler_vpc_access,
+ aws_iam_role_policy_attachment.migration_handler_xray,
+ aws_cloudwatch_log_group.migration_handler,
+ aws_vpc_security_group_ingress_rule.migration_handler_to_database
+ ]
+}
+
+resource "null_resource" "migration_trigger" {
+ triggers = {
+ bundle_hash = local.migration_bundle_hash
+ cluster_arn = module.aurora.cluster_arn
+ function_name = aws_lambda_function.migration_handler.function_name
+ }
+
+ provisioner "local-exec" {
+ command = <<-EOT
+ output_file=$(mktemp)
+ response=$(aws lambda invoke \
+ --region ${data.aws_region.current.name} \
+ --function-name ${self.triggers.function_name} \
+ --cli-binary-format raw-in-base64-out \
+ "$output_file")
+ cat "$output_file"
+ echo "$response"
+ if echo "$response" | grep -q '"FunctionError"'; then
+ rm -f "$output_file"
+ exit 1
+ fi
+ rm -f "$output_file"
+ EOT
+ }
+
+ depends_on = [aws_lambda_function.migration_handler, module.aurora]
+}
+
+output "cluster_arn" {
+ description = "ARN of the Aurora cluster."
+ value = module.aurora.cluster_arn
+}
+
+output "cluster_endpoint" {
+ description = "Writer endpoint of the Aurora cluster."
+ value = module.aurora.cluster_endpoint
+}
+
+output "reader_endpoint" {
+ description = "Reader endpoint of the Aurora cluster."
+ value = module.aurora.reader_endpoint
+}
+
+output "cluster_port" {
+ description = "Port exposed by the Aurora cluster."
+ value = module.aurora.cluster_port
+}
+
+output "secret_arn" {
+ description = "ARN of the generated admin credentials secret."
+ value = module.aurora.secret_arn
+}
+
+output "database_url" {
+ description = "Database connection URL composed from the generated secret."
+ value = module.aurora.database_url
+ sensitive = true
+}
diff --git a/packages/nx-plugin/src/utils/rdb-constructs/files/terraform/core/rdb/aurora/aurora.tf.template b/packages/nx-plugin/src/utils/rdb-constructs/files/terraform/core/rdb/aurora/aurora.tf.template
new file mode 100644
index 000000000..98e6ce098
--- /dev/null
+++ b/packages/nx-plugin/src/utils/rdb-constructs/files/terraform/core/rdb/aurora/aurora.tf.template
@@ -0,0 +1,323 @@
+# Core Aurora module
+# This module creates an Aurora cluster and a generated admin secret.
+
+terraform {
+ required_version = ">= 1.0"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 6.33"
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "~> 3.6"
+ }
+ }
+}
+
+data "aws_iam_policy" "enhanced_monitoring" {
+ arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"
+}
+
+variable "name" {
+ description = "Base name applied to Aurora resources."
+ type = string
+}
+
+variable "vpc_id" {
+ description = "VPC where the Aurora cluster will be deployed."
+ type = string
+}
+
+variable "subnet_ids" {
+ description = "Private subnet IDs used by the Aurora DB subnet group."
+ type = list(string)
+}
+
+variable "engine" {
+ description = "Aurora engine to use."
+ type = string
+ default = "aurora-postgresql"
+
+ validation {
+ condition = contains(["aurora-postgresql", "aurora-mysql"], var.engine)
+ error_message = "engine must be aurora-postgresql or aurora-mysql."
+ }
+}
+
+variable "engine_version" {
+ description = "Aurora engine version."
+ type = string
+ default = null
+}
+
+variable "database_name" {
+ description = "Initial database created in the cluster."
+ type = string
+}
+
+variable "database_user" {
+ description = "Admin username stored in the generated Secrets Manager secret."
+ type = string
+}
+
+variable "database_url_query_parameters" {
+ description = "Additional query parameters appended to the generated database URL."
+ type = map(string)
+ default = {}
+}
+
+variable "port" {
+ description = "Database port for the selected Aurora engine."
+ type = number
+ default = null
+}
+
+variable "serverless_min_capacity" {
+ description = "Minimum Aurora Serverless v2 ACUs."
+ type = number
+ default = 0.5
+}
+
+variable "serverless_max_capacity" {
+ description = "Maximum Aurora Serverless v2 ACUs."
+ type = number
+ default = 4
+}
+
+variable "instance_count" {
+ description = "Number of Aurora instances to create."
+ type = number
+ default = 1
+}
+
+variable "deletion_protection" {
+ description = "Whether deletion protection is enabled."
+ type = bool
+ default = true
+}
+
+variable "skip_final_snapshot" {
+ description = "Whether to skip the final snapshot on deletion."
+ type = bool
+ default = false
+}
+
+variable "tags" {
+ description = "Tags to apply to all resources."
+ type = map(string)
+ default = {}
+}
+
+locals {
+ default_port = var.engine == "aurora-mysql" ? 3306 : 5432
+ default_engine_version = var.engine == "aurora-mysql" ? "8.0.mysql_aurora.3.12.0" : "17.7"
+ database_url_scheme = var.engine == "aurora-mysql" ? "mysql" : "postgresql"
+ default_database_url_parameters = var.engine == "aurora-postgresql" ? {
+ pool_timeout = "20"
+ connect_timeout = "20"
+ sslmode = "no-verify"
+ } : {}
+ database_url_parameters = merge(
+ local.default_database_url_parameters,
+ var.database_url_query_parameters,
+ )
+ database_url_query_string = join("&", [
+ for key in sort(keys(local.database_url_parameters)) :
+ "${urlencode(key)}=${urlencode(local.database_url_parameters[key])}"
+ ])
+}
+
+resource "aws_kms_key" "database" {
+ description = "KMS key for Aurora cluster ${var.name}"
+ enable_key_rotation = true
+
+ tags = merge(var.tags, {
+ Name = "${var.name}-aurora"
+ })
+}
+
+resource "aws_iam_role" "enhanced_monitoring" {
+ name_prefix = "${var.name}-aurora-monitoring-"
+
+ assume_role_policy = jsonencode({
+ Version = "2012-10-17"
+ Statement = [
+ {
+ Effect = "Allow"
+ Principal = {
+ Service = "monitoring.rds.amazonaws.com"
+ }
+ Action = "sts:AssumeRole"
+ }
+ ]
+ })
+
+ tags = merge(var.tags, {
+ Name = "${var.name}-aurora-monitoring"
+ })
+}
+
+resource "aws_iam_role_policy_attachment" "enhanced_monitoring" {
+ role = aws_iam_role.enhanced_monitoring.name
+ policy_arn = data.aws_iam_policy.enhanced_monitoring.arn
+}
+
+resource "aws_security_group" "database" {
+ name_prefix = "${var.name}-aurora-"
+ description = "Security group for Aurora cluster ${var.name}"
+ vpc_id = var.vpc_id
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = merge(var.tags, {
+ Name = "${var.name}-aurora"
+ })
+}
+
+resource "aws_db_subnet_group" "database" {
+ name = "${var.name}-aurora"
+ subnet_ids = var.subnet_ids
+
+ tags = merge(var.tags, {
+ Name = "${var.name}-aurora"
+ })
+}
+
+resource "random_password" "master_password" {
+ length = 32
+ special = true
+ override_special = "!#$%&*()-_=+[]{}<>:?"
+}
+
+resource "aws_secretsmanager_secret" "credentials" {
+ name_prefix = "${var.name}-aurora-credentials-"
+ kms_key_id = aws_kms_key.database.arn
+
+ tags = merge(var.tags, {
+ Name = "${var.name}-aurora-credentials"
+ })
+}
+
+resource "aws_secretsmanager_secret_version" "credentials" {
+ secret_id = aws_secretsmanager_secret.credentials.id
+ secret_string = jsonencode({
+ username = var.database_user
+ password = random_password.master_password.result
+ })
+}
+
+resource "aws_rds_cluster" "database" {
+ cluster_identifier = "${var.name}-aurora"
+ engine = var.engine
+ engine_version = coalesce(var.engine_version, local.default_engine_version)
+ database_name = var.database_name
+ master_username = var.database_user
+ master_password = random_password.master_password.result
+ db_subnet_group_name = aws_db_subnet_group.database.name
+ vpc_security_group_ids = [aws_security_group.database.id]
+ port = coalesce(var.port, local.default_port)
+ storage_encrypted = true
+ kms_key_id = aws_kms_key.database.arn
+ deletion_protection = var.deletion_protection
+ skip_final_snapshot = var.skip_final_snapshot
+ iam_database_authentication_enabled = false
+ monitoring_interval = 5
+ monitoring_role_arn = aws_iam_role.enhanced_monitoring.arn
+
+ serverlessv2_scaling_configuration {
+ min_capacity = var.serverless_min_capacity
+ max_capacity = var.serverless_max_capacity
+ }
+
+ tags = merge(var.tags, {
+ Name = "${var.name}-aurora"
+ })
+}
+
+resource "aws_rds_cluster_instance" "database" {
+ count = var.instance_count
+
+ identifier = "${var.name}-aurora-${count.index + 1}"
+ cluster_identifier = aws_rds_cluster.database.id
+ instance_class = "db.serverless"
+ engine = aws_rds_cluster.database.engine
+ engine_version = aws_rds_cluster.database.engine_version
+ db_subnet_group_name = aws_db_subnet_group.database.name
+
+ tags = merge(var.tags, {
+ Name = "${var.name}-aurora-${count.index + 1}"
+ })
+}
+
+output "cluster_arn" {
+ description = "ARN of the Aurora cluster."
+ value = aws_rds_cluster.database.arn
+}
+
+output "cluster_endpoint" {
+ description = "Writer endpoint of the Aurora cluster."
+ value = aws_rds_cluster.database.endpoint
+}
+
+output "reader_endpoint" {
+ description = "Reader endpoint of the Aurora cluster."
+ value = aws_rds_cluster.database.reader_endpoint
+}
+
+output "cluster_port" {
+ description = "Port exposed by the Aurora cluster."
+ value = aws_rds_cluster.database.port
+}
+
+output "security_group_id" {
+ description = "Security group protecting the Aurora cluster."
+ value = aws_security_group.database.id
+}
+
+output "secret_arn" {
+ description = "ARN of the generated admin credentials secret."
+ value = aws_secretsmanager_secret.credentials.arn
+}
+
+output "database_host" {
+ description = "Hostname of the Aurora cluster writer endpoint."
+ value = aws_rds_cluster.database.endpoint
+}
+
+output "database_name" {
+ description = "Initial database created in the cluster."
+ value = var.database_name
+}
+
+output "database_password" {
+ description = "Database password stored in the generated secret."
+ value = jsondecode(aws_secretsmanager_secret_version.credentials.secret_string).password
+ sensitive = true
+}
+
+output "database_user" {
+ description = "Database username stored in the generated secret."
+ value = jsondecode(aws_secretsmanager_secret_version.credentials.secret_string).username
+}
+
+output "database_url" {
+ description = "Database connection URL composed from the generated secret."
+ value = format(
+ "%s://%s:%s@%s:%s%s%s",
+ local.database_url_scheme,
+ jsondecode(aws_secretsmanager_secret_version.credentials.secret_string).username,
+ jsondecode(aws_secretsmanager_secret_version.credentials.secret_string).password,
+ aws_rds_cluster.database.endpoint,
+ aws_rds_cluster.database.port,
+ "/${var.database_name}",
+ local.database_url_query_string != "" ? "?${local.database_url_query_string}" : "",
+ )
+ sensitive = true
+}
diff --git a/packages/nx-plugin/src/utils/rdb-constructs/rdb-constructs.ts b/packages/nx-plugin/src/utils/rdb-constructs/rdb-constructs.ts
new file mode 100644
index 000000000..decefbc31
--- /dev/null
+++ b/packages/nx-plugin/src/utils/rdb-constructs/rdb-constructs.ts
@@ -0,0 +1,130 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ */
+import {
+ generateFiles,
+ joinPathFragments,
+ OverwriteStrategy,
+ Tree,
+} from '@nx/devkit';
+import {
+ PACKAGES_DIR,
+ SHARED_CONSTRUCTS_DIR,
+ SHARED_TERRAFORM_DIR,
+} from '../shared-constructs-constants';
+import { addStarExport } from '../ast';
+import { IacProvider } from '../iac';
+
+export interface AddRdbConstructOptions {
+ nameClassName: string;
+ nameKebabCase: string;
+ databaseName: string;
+ databaseUser: string;
+ engine: 'postgres' | 'mysql';
+ migrationBundlePathFromRoot: string;
+}
+
+export const addRdbInfra = async (
+ tree: Tree,
+ options: AddRdbConstructOptions & { iacProvider: IacProvider },
+) => {
+ if (options.iacProvider === 'CDK') {
+ await addRdbCdkConstructs(tree, options);
+ } else if (options.iacProvider === 'Terraform') {
+ addRdbTerraformModules(tree, options);
+ } else {
+ throw new Error(`Unsupported iacProvider ${options.iacProvider}`);
+ }
+};
+
+export const addRdbCdkConstructs = async (
+ tree: Tree,
+ options: AddRdbConstructOptions,
+) => {
+ generateFiles(
+ tree,
+ joinPathFragments(__dirname, 'files', 'cdk', 'core', 'rdb'),
+ joinPathFragments(
+ PACKAGES_DIR,
+ SHARED_CONSTRUCTS_DIR,
+ 'src',
+ 'core',
+ 'rdb',
+ ),
+ {},
+ {
+ overwriteStrategy: OverwriteStrategy.KeepExisting,
+ },
+ );
+
+ generateFiles(
+ tree,
+ joinPathFragments(__dirname, 'files', 'cdk', 'app', 'dbs'),
+ joinPathFragments(PACKAGES_DIR, SHARED_CONSTRUCTS_DIR, 'src', 'app', 'dbs'),
+ options,
+ {
+ overwriteStrategy: OverwriteStrategy.KeepExisting,
+ },
+ );
+
+ await addStarExport(
+ tree,
+ joinPathFragments(
+ PACKAGES_DIR,
+ SHARED_CONSTRUCTS_DIR,
+ 'src',
+ 'core',
+ 'index.ts',
+ ),
+ './rdb/aurora.js',
+ );
+ await addStarExport(
+ tree,
+ joinPathFragments(
+ PACKAGES_DIR,
+ SHARED_CONSTRUCTS_DIR,
+ 'src',
+ 'app',
+ 'dbs',
+ 'index.ts',
+ ),
+ `./${options.nameKebabCase}.js`,
+ );
+ await addStarExport(
+ tree,
+ joinPathFragments(
+ PACKAGES_DIR,
+ SHARED_CONSTRUCTS_DIR,
+ 'src',
+ 'app',
+ 'index.ts',
+ ),
+ './dbs/index.js',
+ );
+};
+
+export const addRdbTerraformModules = (
+ tree: Tree,
+ options: AddRdbConstructOptions,
+) => {
+ generateFiles(
+ tree,
+ joinPathFragments(__dirname, 'files', 'terraform', 'core', 'rdb'),
+ joinPathFragments(PACKAGES_DIR, SHARED_TERRAFORM_DIR, 'src', 'core', 'rdb'),
+ {},
+ {
+ overwriteStrategy: OverwriteStrategy.KeepExisting,
+ },
+ );
+
+ generateFiles(
+ tree,
+ joinPathFragments(__dirname, 'files', 'terraform', 'app', 'dbs'),
+ joinPathFragments(PACKAGES_DIR, SHARED_TERRAFORM_DIR, 'src', 'app', 'dbs'),
+ options,
+ {
+ overwriteStrategy: OverwriteStrategy.KeepExisting,
+ },
+ );
+};
diff --git a/packages/nx-plugin/src/utils/versions.ts b/packages/nx-plugin/src/utils/versions.ts
index 359593ed3..505adcdb9 100644
--- a/packages/nx-plugin/src/utils/versions.ts
+++ b/packages/nx-plugin/src/utils/versions.ts
@@ -62,6 +62,9 @@ export const TS_VERSIONS = {
esbuild: '0.27.4',
'event-source-polyfill': '1.0.31',
'@types/event-source-polyfill': '1.0.5',
+ '@prisma/adapter-mariadb': '7.6.0',
+ '@prisma/adapter-pg': '7.6.0',
+ '@prisma/client': '7.6.0',
'@typescript-eslint/eslint-plugin': '8.58.0',
'@typescript-eslint/parser': '8.58.0',
'eslint-plugin-prettier': '5.5.5',
@@ -73,6 +76,8 @@ export const TS_VERSIONS = {
'npm-check-updates': '19.6.5',
'oidc-client-ts': '3.5.0',
prettier: '3.8.1',
+ pg: '8.20.0',
+ prisma: '7.6.0',
'react-oidc-context': '3.3.1',
react: '19.2.4',
'react-dom': '19.2.4',
diff --git a/packages/nx-plugin/tsconfig.json b/packages/nx-plugin/tsconfig.json
index bc1767bca..54ceceb2e 100644
--- a/packages/nx-plugin/tsconfig.json
+++ b/packages/nx-plugin/tsconfig.json
@@ -2,7 +2,8 @@
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"module": "commonjs",
- "resolveJsonModule": true
+ "resolveJsonModule": true,
+ "moduleResolution": "bundler"
},
"files": [],
"include": [],