From e06f582eec7e067c38edba76bda0846188fbdb34 Mon Sep 17 00:00:00 2001 From: Aleksandar Maksimovic Date: Mon, 22 Dec 2025 16:25:57 -0800 Subject: [PATCH] Standardize folder structure and format Restructures Python, JavaScript, and Java examples with example_preferred as the golden path (pool + connector). - Python/JS: Copied pool examples from connector repos, restored SDK-only from pre-#221 - Java: Merged pgjdbc_hikaricp into pgjdbc, restored SDK-only from pre-#198 - Added README in each alternatives/ folder - Added smoke tests for all examples --- .../java-pgjdbc-hikaricp-integ-tests.yml | 61 ------- .../workflows/python-psycopg2-integ-tests.yml | 2 +- .../workflows/python-psycopg3-integ-tests.yml | 2 +- java/pgjdbc/pom.xml | 20 ++- .../java/org/example/ExamplePreferred.java} | 6 +- .../java/org/example/alternatives/README.md | 22 +++ .../ExampleWithNoConnectionPool.java} | 6 +- .../ExampleWithNoConnector.java | 105 ++++++++++++ .../java/org/example/DsqlExampleTest.java | 12 -- .../org/example/ExamplePreferredTest.java | 12 ++ .../ExampleWithNoConnectionPoolTest.java | 12 ++ .../ExampleWithNoConnectorTest.java | 12 ++ java/pgjdbc_hikaricp/README.md | 152 ------------------ java/pgjdbc_hikaricp/build.gradle.kts | 45 ------ java/pgjdbc_hikaricp/pom.xml | 88 ---------- java/pgjdbc_hikaricp/settings.gradle.kts | 1 - .../org/example/DsqlHikariCPExampleTest.java | 12 -- javascript/node-postgres/package.json | 2 +- .../node-postgres/src/alternatives/README.md | 26 +++ .../example_with_no_connection_pool.js} | 15 +- .../example_with_no_connector.js | 86 ++++++++++ ...mple_with_nonconcurrent_connection_pool.js | 74 +++++++++ .../node-postgres/src/example_preferred.js | 54 +++++++ .../example_with_no_connection_pool.test.js | 5 + .../example_with_no_connector.test.js | 5 + ...with_nonconcurrent_connection_pool.test.js | 5 + .../test/example_preferred.test.js | 5 + javascript/node-postgres/test/smoke.test.js | 6 - javascript/postgres-js/package.json | 2 +- .../postgres-js/src/alternatives/README.md | 22 +++ .../example_with_no_connection_pool.js | 74 +++++++++ .../example_with_no_connector.js} | 56 +++++-- .../postgres-js/src/example_preferred.js | 54 +++++++ .../example_with_no_connection_pool.test.js | 5 + .../example_with_no_connector.test.js | 5 + .../test/example_preferred.test.js | 5 + javascript/postgres-js/test/smoke.test.js | 5 - python/psycopg/README.md | 2 +- python/psycopg/requirements.txt | 1 + python/psycopg/src/alternatives/README.md | 28 ++++ .../example_async_with_no_connection_pool.py | 112 +++++++++++++ .../example_with_no_connection_pool.py | 111 +++++++++++++ .../example_with_no_connector.py} | 45 +++++- .../example_with_async_connection_pool.py | 69 ++++++++ ...mple_with_nonconcurrent_connection_pool.py | 70 ++++++++ python/psycopg/src/example_preferred.py | 95 +++++++++++ ...t_example_async_with_no_connection_pool.py | 17 ++ .../test_example_with_no_connection_pool.py | 16 ++ .../test_example_with_no_connector.py | 16 ++ ...test_example_with_async_connection_pool.py | 17 ++ ...mple_with_nonconcurrent_connection_pool.py | 16 ++ python/psycopg/test/test_example.py | 11 -- python/psycopg/test/test_example_preferred.py | 16 ++ python/psycopg2/README.md | 2 +- python/psycopg2/src/alternatives/README.md | 26 +++ .../example_with_no_connection_pool.py | 110 +++++++++++++ .../example_with_no_connector.py} | 41 ++++- ...mple_with_nonconcurrent_connection_pool.py | 69 ++++++++ python/psycopg2/src/example_preferred.py | 91 +++++++++++ .../test_example_with_no_connection_pool.py | 16 ++ .../test_example_with_no_connector.py | 16 ++ ...mple_with_nonconcurrent_connection_pool.py | 16 ++ python/psycopg2/test/test_example.py | 11 -- .../psycopg2/test/test_example_preferred.py | 16 ++ 64 files changed, 1687 insertions(+), 450 deletions(-) delete mode 100644 .github/workflows/java-pgjdbc-hikaricp-integ-tests.yml rename java/{pgjdbc_hikaricp/src/main/java/org/example/Example.java => pgjdbc/src/main/java/org/example/ExamplePreferred.java} (97%) create mode 100644 java/pgjdbc/src/main/java/org/example/alternatives/README.md rename java/pgjdbc/src/main/java/org/example/{Example.java => alternatives/no_connection_pool/ExampleWithNoConnectionPool.java} (93%) create mode 100644 java/pgjdbc/src/main/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnector.java delete mode 100644 java/pgjdbc/src/test/java/org/example/DsqlExampleTest.java create mode 100644 java/pgjdbc/src/test/java/org/example/ExamplePreferredTest.java create mode 100644 java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectionPoolTest.java create mode 100644 java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectorTest.java delete mode 100644 java/pgjdbc_hikaricp/README.md delete mode 100644 java/pgjdbc_hikaricp/build.gradle.kts delete mode 100644 java/pgjdbc_hikaricp/pom.xml delete mode 100644 java/pgjdbc_hikaricp/settings.gradle.kts delete mode 100644 java/pgjdbc_hikaricp/src/test/java/org/example/DsqlHikariCPExampleTest.java create mode 100644 javascript/node-postgres/src/alternatives/README.md rename javascript/node-postgres/src/{index.js => alternatives/no_connection_pool/example_with_no_connection_pool.js} (82%) create mode 100644 javascript/node-postgres/src/alternatives/no_connection_pool/example_with_no_connector.js create mode 100644 javascript/node-postgres/src/alternatives/pool/example_with_nonconcurrent_connection_pool.js create mode 100644 javascript/node-postgres/src/example_preferred.js create mode 100644 javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js create mode 100644 javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connector.test.js create mode 100644 javascript/node-postgres/test/alternatives/pool/example_with_nonconcurrent_connection_pool.test.js create mode 100644 javascript/node-postgres/test/example_preferred.test.js delete mode 100644 javascript/node-postgres/test/smoke.test.js create mode 100644 javascript/postgres-js/src/alternatives/README.md create mode 100644 javascript/postgres-js/src/alternatives/no_connection_pool/example_with_no_connection_pool.js rename javascript/postgres-js/src/{index.js => alternatives/no_connection_pool/example_with_no_connector.js} (50%) create mode 100644 javascript/postgres-js/src/example_preferred.js create mode 100644 javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js create mode 100644 javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connector.test.js create mode 100644 javascript/postgres-js/test/example_preferred.test.js delete mode 100644 javascript/postgres-js/test/smoke.test.js create mode 100644 python/psycopg/src/alternatives/README.md create mode 100644 python/psycopg/src/alternatives/no_connection_pool/example_async_with_no_connection_pool.py create mode 100644 python/psycopg/src/alternatives/no_connection_pool/example_with_no_connection_pool.py rename python/psycopg/src/{example.py => alternatives/no_connection_pool/example_with_no_connector.py} (57%) create mode 100644 python/psycopg/src/alternatives/pool/example_with_async_connection_pool.py create mode 100644 python/psycopg/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py create mode 100644 python/psycopg/src/example_preferred.py create mode 100644 python/psycopg/test/alternatives/no_connection_pool/test_example_async_with_no_connection_pool.py create mode 100644 python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py create mode 100644 python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connector.py create mode 100644 python/psycopg/test/alternatives/pool/test_example_with_async_connection_pool.py create mode 100644 python/psycopg/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py delete mode 100644 python/psycopg/test/test_example.py create mode 100644 python/psycopg/test/test_example_preferred.py create mode 100644 python/psycopg2/src/alternatives/README.md create mode 100644 python/psycopg2/src/alternatives/no_connection_pool/example_with_no_connection_pool.py rename python/psycopg2/src/{example.py => alternatives/no_connection_pool/example_with_no_connector.py} (59%) create mode 100644 python/psycopg2/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py create mode 100644 python/psycopg2/src/example_preferred.py create mode 100644 python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py create mode 100644 python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connector.py create mode 100644 python/psycopg2/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py delete mode 100644 python/psycopg2/test/test_example.py create mode 100644 python/psycopg2/test/test_example_preferred.py diff --git a/.github/workflows/java-pgjdbc-hikaricp-integ-tests.yml b/.github/workflows/java-pgjdbc-hikaricp-integ-tests.yml deleted file mode 100644 index 0f718799..00000000 --- a/.github/workflows/java-pgjdbc-hikaricp-integ-tests.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Java pgJDBC integration tests - -permissions: {} - -on: - push: - branches: [main] - paths: - - "java/pgjdbc_hikaricp/**" - - ".github/workflows/java-pgjdbc-hikari-integcp-tests.yml" - pull_request: - branches: [main] - paths: - - "java/pgjdbc_hikaricp/**" - - ".github/workflows/java-pgjdbc-hikari-integcp-tests.yml" - # Give us a button to allow running the workflow on demand for testing. - workflow_dispatch: - inputs: - tags: - description: "Manual Workflow Run" - required: false - type: string - -jobs: - test: - runs-on: ubuntu-latest - permissions: - id-token: write # required by aws-actions/configure-aws-credentials - concurrency: - # Ensure only 1 job uses the workflow cluster at a time. - group: ${{ github.workflow }} - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Set up JDK 17 - uses: actions/setup-java@v4 - with: - java-version: "17" - distribution: "corretto" - architecture: x64 - cache: maven - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v4 - with: - role-to-assume: ${{ secrets.JAVA_IAM_ROLE }} - aws-region: us-east-1 - - - name: Configure and run integration for pgjdbc - admin - working-directory: ./java/pgjdbc_hikaricp - env: - CLUSTER_ENDPOINT: ${{ secrets.JAVA_PGJDBC_HIKARI_CLUSTER_ENDPOINT }} - CLUSTER_USER: admin - REGION: ${{ secrets.JAVA_PGJDBC_HIKARI_CLUSTER_REGION }} - run: | - mvn validate - mvn initialize - mvn compile - mvn test diff --git a/.github/workflows/python-psycopg2-integ-tests.yml b/.github/workflows/python-psycopg2-integ-tests.yml index 39e24d4f..2dfa9f79 100644 --- a/.github/workflows/python-psycopg2-integ-tests.yml +++ b/.github/workflows/python-psycopg2-integ-tests.yml @@ -68,4 +68,4 @@ jobs: pip list echo "$GITHUB_WORKSPACE" >> $GITHUB_PATH wget https://www.amazontrust.com/repository/AmazonRootCA1.pem -O root.pem - python src/example.py + pytest diff --git a/.github/workflows/python-psycopg3-integ-tests.yml b/.github/workflows/python-psycopg3-integ-tests.yml index f8a7f27a..c2b6ed06 100644 --- a/.github/workflows/python-psycopg3-integ-tests.yml +++ b/.github/workflows/python-psycopg3-integ-tests.yml @@ -68,5 +68,5 @@ jobs: pip list echo "$GITHUB_WORKSPACE" >> $GITHUB_PATH wget https://www.amazontrust.com/repository/AmazonRootCA1.pem -O root.pem - python src/example.py + pytest diff --git a/java/pgjdbc/pom.xml b/java/pgjdbc/pom.xml index 2a1eb244..7cd6d277 100644 --- a/java/pgjdbc/pom.xml +++ b/java/pgjdbc/pom.xml @@ -11,18 +11,36 @@ 17 UTF-8 - org.example.Example + org.example.ExamplePreferred 1.2.0 + 5.1.0 + 2.31.32 5.10.0 false + + com.zaxxer + HikariCP + ${hikaricp.version} + software.amazon.dsql aurora-dsql-jdbc-connector ${connector.version} + + + software.amazon.awssdk + dsql + ${aws.sdk.version} + + + org.postgresql + postgresql + 42.7.7 + org.junit.jupiter junit-jupiter diff --git a/java/pgjdbc_hikaricp/src/main/java/org/example/Example.java b/java/pgjdbc/src/main/java/org/example/ExamplePreferred.java similarity index 97% rename from java/pgjdbc_hikaricp/src/main/java/org/example/Example.java rename to java/pgjdbc/src/main/java/org/example/ExamplePreferred.java index 778dfa57..07af01a4 100644 --- a/java/pgjdbc_hikaricp/src/main/java/org/example/Example.java +++ b/java/pgjdbc/src/main/java/org/example/ExamplePreferred.java @@ -21,11 +21,11 @@ *
  • Production-ready configuration with connection validation
  • * */ -public class Example { +public class ExamplePreferred { private final HikariDataSource dataSource; - public Example(String endpoint, String user) { + public ExamplePreferred(String endpoint, String user) { this.dataSource = initializeConnectionPool(endpoint, user); } @@ -122,7 +122,7 @@ public static void main(String[] args) throws SQLException { assert clusterUser != null : "CLUSTER_USER environment variable is not set"; System.out.println("Initializing Aurora DSQL example..."); - Example example = new Example(clusterEndpoint, clusterUser); + ExamplePreferred example = new ExamplePreferred(clusterEndpoint, clusterUser); System.out.println("Example initialized!"); System.out.println(); diff --git a/java/pgjdbc/src/main/java/org/example/alternatives/README.md b/java/pgjdbc/src/main/java/org/example/alternatives/README.md new file mode 100644 index 00000000..9d365e3c --- /dev/null +++ b/java/pgjdbc/src/main/java/org/example/alternatives/README.md @@ -0,0 +1,22 @@ +# Alternative Examples + +The recommended approach is `ExamplePreferred.java` in the parent directory, which uses HikariCP connection pool with the Aurora DSQL JDBC Connector. + +## Why Connection Pooling with the Connector? + +Aurora DSQL has specific connection characteristics: +- **60-minute max connection lifetime** - connections are terminated after 1 hour +- **15-minute token expiry** - IAM auth tokens must be refreshed +- **Optimized for concurrency** - more concurrent connections with smaller batches yields better throughput + +The connector + pool combination handles this automatically: +- Generates fresh IAM tokens per connection +- Recycles connections before the 60-minute limit (via `maxLifetime < 3600000`) +- Reuses warmed connections for better performance + +## Alternatives + +### `no_connection_pool/` +Examples without pooling: +- `ExampleWithNoConnectionPool.java` - Single connection with connector +- `ExampleWithNoConnector.java` - SDK-only, for environments where the connector cannot be used (requires manual token management) diff --git a/java/pgjdbc/src/main/java/org/example/Example.java b/java/pgjdbc/src/main/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectionPool.java similarity index 93% rename from java/pgjdbc/src/main/java/org/example/Example.java rename to java/pgjdbc/src/main/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectionPool.java index 00eebe80..7aee8da5 100644 --- a/java/pgjdbc/src/main/java/org/example/Example.java +++ b/java/pgjdbc/src/main/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectionPool.java @@ -1,4 +1,4 @@ -package org.example; +package org.example.alternatives.no_connection_pool; import java.sql.Connection; @@ -8,7 +8,7 @@ import java.sql.Statement; import java.util.Properties; -public class Example { +public class ExampleWithNoConnectionPool { // Get a connection to Aurora DSQL. public static Connection getConnection(String endpoint, String user) throws SQLException { @@ -33,7 +33,7 @@ public static void main(String[] args) throws SQLException { String clusterUser = System.getenv("CLUSTER_USER"); assert clusterUser != null : "CLUSTER_USER environment variable is not set"; - try (Connection conn = Example.getConnection(clusterEndpoint, clusterUser)) { + try (Connection conn = ExampleWithNoConnectionPool.getConnection(clusterEndpoint, clusterUser)) { if (!clusterUser.equals("admin")) { Statement setSchema = conn.createStatement(); setSchema.execute("SET search_path=myschema"); diff --git a/java/pgjdbc/src/main/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnector.java b/java/pgjdbc/src/main/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnector.java new file mode 100644 index 00000000..50f6edfb --- /dev/null +++ b/java/pgjdbc/src/main/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnector.java @@ -0,0 +1,105 @@ +package org.example.alternatives.no_connection_pool; + +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.services.dsql.DsqlUtilities; +import software.amazon.awssdk.services.dsql.model.GenerateAuthTokenRequest; +import software.amazon.awssdk.regions.Region; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class ExampleWithNoConnector { + + // Get a connection to Aurora DSQL. + public static Connection getConnection(String endpoint, String user, String region) throws SQLException { + DsqlUtilities utilities = DsqlUtilities.builder() + .region(Region.of(region)) + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); + + // The token expiration time is optional, and the default value is 900 seconds + GenerateAuthTokenRequest tokenGenerator = GenerateAuthTokenRequest.builder() + .hostname(endpoint) + .region(Region.of(region)) + .build(); + + // Generate a fresh password token for each connection, to ensure the token is + // not expired when the connection is established + String password; + if (user.equals("admin")) { + password = utilities.generateDbConnectAdminAuthToken(tokenGenerator); + } else { + password = utilities.generateDbConnectAuthToken(tokenGenerator); + } + + Properties props = new Properties(); + props.setProperty("user", user); + props.setProperty("password", password); + // Use the DefaultJavaSSLFactory so that Java's default trust store can be used + // to verify the server's root cert. + props.setProperty("sslmode", "verify-full"); + props.setProperty("sslfactory", "org.postgresql.ssl.DefaultJavaSSLFactory"); + props.setProperty("sslNegotiation", "direct"); + + String url = "jdbc:postgresql://" + endpoint + ":5432/postgres"; + + return DriverManager.getConnection(url, props); + } + + public static void main(String[] args) throws SQLException { + String clusterEndpoint = System.getenv("CLUSTER_ENDPOINT"); + assert clusterEndpoint != null : "CLUSTER_ENDPOINT environment variable is not set"; + + String clusterUser = System.getenv("CLUSTER_USER"); + assert clusterUser != null : "CLUSTER_USER environment variable is not set"; + + String region = System.getenv("REGION"); + assert region != null : "REGION environment variable is not set"; + + try (Connection conn = ExampleWithNoConnector.getConnection(clusterEndpoint, clusterUser, region)) { + if (!clusterUser.equals("admin")) { + Statement setSchema = conn.createStatement(); + setSchema.execute("SET search_path=myschema"); + setSchema.close(); + } + // Create a new table named owner + Statement create = conn.createStatement(); + create.executeUpdate(""" + CREATE TABLE IF NOT EXISTS owner( + id uuid NOT NULL DEFAULT gen_random_uuid(), + name varchar(30) NOT NULL, + city varchar(80) NOT NULL, + telephone varchar(20) DEFAULT NULL, + PRIMARY KEY (id))"""); + create.close(); + + // Insert some data + Statement insert = conn.createStatement(); + insert.executeUpdate( + "INSERT INTO owner (name, city, telephone) VALUES ('John Doe', 'Anytown', '555-555-1999')"); + insert.close(); + + // Read back the data and assert they are present + String selectSQL = "SELECT * FROM owner"; + Statement read = conn.createStatement(); + ResultSet rs = read.executeQuery(selectSQL); + while (rs.next()) { + assert rs.getString("id") != null; + assert rs.getString("name").equals("John Doe"); + assert rs.getString("city").equals("Anytown"); + assert rs.getString("telephone").equals("555-555-1999"); + } + + // Delete some data + String deleteSql = String.format("DELETE FROM owner where name='John Doe'"); + Statement delete = conn.createStatement(); + delete.executeUpdate(deleteSql); + delete.close(); + } + System.out.println("Connection exercised successfully"); + } +} diff --git a/java/pgjdbc/src/test/java/org/example/DsqlExampleTest.java b/java/pgjdbc/src/test/java/org/example/DsqlExampleTest.java deleted file mode 100644 index 839deb0a..00000000 --- a/java/pgjdbc/src/test/java/org/example/DsqlExampleTest.java +++ /dev/null @@ -1,12 +0,0 @@ -package org.example; - -import static org.junit.jupiter.api.Assertions.*; - -import org.junit.jupiter.api.Test; - -public class DsqlExampleTest { - @Test - public void testExample() { - assertAll(() -> Example.main(new String[]{})); - } -} \ No newline at end of file diff --git a/java/pgjdbc/src/test/java/org/example/ExamplePreferredTest.java b/java/pgjdbc/src/test/java/org/example/ExamplePreferredTest.java new file mode 100644 index 00000000..3ae0461b --- /dev/null +++ b/java/pgjdbc/src/test/java/org/example/ExamplePreferredTest.java @@ -0,0 +1,12 @@ +package org.example; + +import static org.junit.jupiter.api.Assertions.*; + +import org.junit.jupiter.api.Test; + +public class ExamplePreferredTest { + @Test + public void testExamplePreferred() { + assertAll(() -> ExamplePreferred.main(new String[]{})); + } +} \ No newline at end of file diff --git a/java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectionPoolTest.java b/java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectionPoolTest.java new file mode 100644 index 00000000..73ff6028 --- /dev/null +++ b/java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectionPoolTest.java @@ -0,0 +1,12 @@ +package org.example.alternatives.no_connection_pool; + +import static org.junit.jupiter.api.Assertions.*; + +import org.junit.jupiter.api.Test; + +public class ExampleWithNoConnectionPoolTest { + @Test + public void testExampleWithNoConnectionPool() { + assertAll(() -> ExampleWithNoConnectionPool.main(new String[]{})); + } +} diff --git a/java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectorTest.java b/java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectorTest.java new file mode 100644 index 00000000..6a641ae4 --- /dev/null +++ b/java/pgjdbc/src/test/java/org/example/alternatives/no_connection_pool/ExampleWithNoConnectorTest.java @@ -0,0 +1,12 @@ +package org.example.alternatives.no_connection_pool; + +import static org.junit.jupiter.api.Assertions.*; + +import org.junit.jupiter.api.Test; + +public class ExampleWithNoConnectorTest { + @Test + public void testExampleWithNoConnector() { + assertAll(() -> ExampleWithNoConnector.main(new String[]{})); + } +} diff --git a/java/pgjdbc_hikaricp/README.md b/java/pgjdbc_hikaricp/README.md deleted file mode 100644 index 76eb7ec9..00000000 --- a/java/pgjdbc_hikaricp/README.md +++ /dev/null @@ -1,152 +0,0 @@ -# Aurora DSQL with HikariCP Connection Pool Example - -## Overview - -This example demonstrates how to connect to Aurora DSQL using HikariCP connection pooling with `pgJDBC`. HikariCP provides: - -- **Connection Pooling**: Reuses database connections to improve performance -- **Connection Management**: Automatically handles connection lifecycle -- **Monitoring**: Built-in metrics and leak detection -- **Configuration**: tuning options to align with token refresh - -This example uses the Aurora DSQL JDBC Connector to handle IAM authentication automatically. - -## About the code example - -The example maintains the flexible connection approach as maintained in the standalone `pgJDBC` and Amazon Aurora DSQL example and continues to work for both admin and non-admin users. It introduces connection pooling via the HikariCP library. - -### IAM Authentication - -Aurora DSQL uses IAM-based authentication with short-lived tokens (15 minutes by default). The Aurora DSQL JDBC Connector handles token generation and refresh automatically, creating admin or non-admin tokens based on the user type. - -## ⚠️ Important - -- Running this code might result in charges to your AWS account. -- We recommend that you grant your code the least privilege. At most, grant only the - minimum permissions required to perform the task. For more information, see - [Grant least privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege). -- This code is not tested in every AWS Region. For more information, see - [AWS Regional Services](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services). - -## Run the example - -### Prerequisites - -- You must have an AWS account, and have your default credentials and AWS Region - configured as described in the - [Globally configuring AWS SDKs and tools](https://docs.aws.amazon.com/credref/latest/refdocs/creds-config-files.html) - guide. -- Java Development Kit (JDK): Ensure you have JDK 17+ installed. - _To verify that Java is installed, you can run:_ - ```bash - java -version - ``` -- Build Tool (Maven or Gradle) - - _Maven_: Ensure Maven is installed if that is your preferred option. You can download it from the [official website](https://maven.apache.org/download.cgi). - - _Gradle_: Ensure Gradle is installed if that is your preferred option. You can download it from the [official website](https://gradle.org/install/). -- AWS SDK: Ensure that you set up the latest version of the AWS Java SDK from the [official website](https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/setup.html) -- You must have an Aurora DSQL cluster. For information about creating an Aurora DSQL cluster, see the - [Getting started with Aurora DSQL](https://docs.aws.amazon.com/aurora-dsql/latest/userguide/getting-started.html) - guide. -- If connecting as a non-admin user, ensure the user is linked to an IAM role and is granted access to the `myschema` - schema. See the - [Using database roles with IAM roles](https://docs.aws.amazon.com/aurora-dsql/latest/userguide/using-database-and-iam-roles.html) - guide. - -The example demonstrates the following operations: - -- Creating a pool of connections to an Aurora DSQL cluster -- Creating a table -- Inserting and querying data from different connections from the pool - The example is designed to work with both admin and non-admin users: -- When run as an admin user, it uses the `public` schema -- When run as a non-admin user, it uses the `myschema` schema - -### Run the code - -Set the following environment variables or update the hardcoded values in the code: - -```bash -# e.g. "admin" -export CLUSTER_USER="" - -# e.g. "foo0bar1baz2quux3quuux4.dsql.us-east-1.on.aws" -export CLUSTER_ENDPOINT="" -``` - -_Maven_: - -```bash -mvn compile -mvn test -``` - -_Gradle_: - -```bash -gradle run -``` - -The example demonstrates successful connection pooling with Aurora DSQL, showing multiple connections being obtained from the pool and database operations being performed successfully. - -## HikariCP Configuration - -This example uses HikariCP with configuration settings optimized for Aurora DSQL. The key configuration parameters include: - -### Pool Size Settings - -- **Maximum Pool Size**: 20 connections - Production-ready pool size that balances resource usage with performance -- **Minimum Idle**: 5 connections - Keeps connections ready for immediate use, reducing connection acquisition latency - -### Connection Lifecycle Management - -- **Connection Timeout**: 30 seconds - Maximum time to wait for a connection from the pool -- **Idle Timeout**: 5 minutes (300,000ms) - Connections idle longer than this are removed from the pool -- **Max Lifetime**: 10 minutes (600,000ms) - Maximum lifetime of connections in the pool - -⚠️ **Aurora DSQL Connection Limitation**: Aurora DSQL has a maximum duration limit See the See the `Maximum connection duration` time limit in the [Cluster quotas and database limits in Amazon Aurora DSQL](https://docs.aws.amazon.com/aurora-dsql/latest/userguide/CHAP_quotas.html) page. . The `maxLifetime` setting must be configured to be significantly shorter than this limit to prevent connection failures. The example uses 10 minutes to provide a safe margin. - -### Connection Validation and Monitoring - -- **Connection Test Query**: `SELECT 1` - Simple query to validate connection health -- **Validation Timeout**: 5 seconds - Maximum time for connection validation -- **Leak Detection Threshold**: 60 seconds - Detects potential connection leaks for debugging - -### Performance Optimizations - -- **Auto Commit**: Enabled by default for optimal performance -- **Schema Setting**: Automatically sets `myschema` for non-admin users -- **MBean Registration**: Enabled for JMX monitoring and metrics - -### SSL Configuration - -The example configures SSL settings required for Aurora DSQL: - -- **SSL Mode**: `verify-full` - Ensures secure connections with certificate verification -- **SSL Factory**: Uses PostgreSQL's default Java SSL factory -- **SSL Negotiation**: Direct SSL negotiation for optimal performance - -### Dynamic Token Management - -The example handles Aurora DSQL's unique authentication requirements: - -1. **Fresh Token Generation**: Each connection request generates a new IAM token using `DsqlUtilities` -2. **Token Type Selection**: Automatically selects admin or non-admin token based on user type -3. **Connection Lifecycle Alignment**: HikariCP timeouts are configured to work within DSQL's connection limit and 15-minute token expiry -4. **Automatic Refresh**: No manual token refresh needed - new tokens are generated per connection - -These settings provide a production-ready configuration that handles Aurora DSQL's unique requirements including the connection limit, dynamic token refresh, and secure connections. - -## Additional resources - -- [Amazon Aurora DSQL Documentation](https://docs.aws.amazon.com/aurora-dsql/latest/userguide/what-is-aurora-dsql.html) -- [Amazon Aurora DSQL JDBC Connector](https://github.com/awslabs/aurora-dsql-jdbc-connector) -- [HikariCP](https://github.com/brettwooldridge/HikariCP) -- [pgJDBC Documentation](https://jdbc.postgresql.org/documentation/) -- [AWS SDK for Java Documentation](https://docs.aws.amazon.com/sdk-for-java/) - ---- - -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -SPDX-License-Identifier: MIT-0 - diff --git a/java/pgjdbc_hikaricp/build.gradle.kts b/java/pgjdbc_hikaricp/build.gradle.kts deleted file mode 100644 index a04c52e6..00000000 --- a/java/pgjdbc_hikaricp/build.gradle.kts +++ /dev/null @@ -1,45 +0,0 @@ -import org.gradle.api.tasks.testing.logging.TestExceptionFormat - -plugins { - id("java") - id("application") -} - -application { - mainClass = "org.example.Example" -} - -group = "org.example" -version = "1.0-SNAPSHOT" - -repositories { - mavenCentral() -} - -dependencies { - implementation("com.zaxxer:HikariCP:5.1.0") - implementation("software.amazon.dsql:aurora-dsql-jdbc-connector:1.2.0") - - testImplementation(platform("org.junit:junit-bom:5.10.0")) - testImplementation("org.junit.jupiter:junit-jupiter") - testRuntimeOnly("org.junit.platform:junit-platform-launcher") -} - -tasks.test { - useJUnitPlatform() - - testLogging { - events("passed", "skipped", "failed", "standardOut", "standardError") - exceptionFormat = TestExceptionFormat.FULL - } -} - -tasks.withType { - this.testLogging { - this.showStandardStreams = true - } -} - -tasks.withType { - this.enableAssertions = true -} diff --git a/java/pgjdbc_hikaricp/pom.xml b/java/pgjdbc_hikaricp/pom.xml deleted file mode 100644 index 47583b74..00000000 --- a/java/pgjdbc_hikaricp/pom.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - 4.0.0 - - org.example - AuroraDSQLHikariCPExample - 1.0-SNAPSHOT - - - 17 - UTF-8 - org.example.Example - 1.2.0 - 5.1.0 - 5.10.0 - false - - - - - com.zaxxer - HikariCP - ${hikaricp.version} - - - software.amazon.dsql - aurora-dsql-jdbc-connector - ${connector.version} - - - org.junit.jupiter - junit-jupiter - ${junit.version} - test - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - 3.0.0-M5 - - - org.apache.maven.plugins - maven-jar-plugin - 3.2.0 - - - - true - ${mainClass} - - - - - - - maven-assembly-plugin - - - - ${mainClass} - - - - jar-with-dependencies - - - - - - - - local-maven-repository - file://${local.repository.folder} - - true - - - true - - - - diff --git a/java/pgjdbc_hikaricp/settings.gradle.kts b/java/pgjdbc_hikaricp/settings.gradle.kts deleted file mode 100644 index df34d239..00000000 --- a/java/pgjdbc_hikaricp/settings.gradle.kts +++ /dev/null @@ -1 +0,0 @@ -rootProject.name = "DSQLPGJDBCHikariCPExample" diff --git a/java/pgjdbc_hikaricp/src/test/java/org/example/DsqlHikariCPExampleTest.java b/java/pgjdbc_hikaricp/src/test/java/org/example/DsqlHikariCPExampleTest.java deleted file mode 100644 index f0dc9c3a..00000000 --- a/java/pgjdbc_hikaricp/src/test/java/org/example/DsqlHikariCPExampleTest.java +++ /dev/null @@ -1,12 +0,0 @@ -package org.example; - -import static org.junit.jupiter.api.Assertions.*; - -import org.junit.jupiter.api.Test; - -public class DsqlHikariCPExampleTest { - @Test - public void testHikariCPExample() { - assertAll(() -> Example.main(new String[]{})); - } -} diff --git a/javascript/node-postgres/package.json b/javascript/node-postgres/package.json index 61fb8649..652bf819 100644 --- a/javascript/node-postgres/package.json +++ b/javascript/node-postgres/package.json @@ -5,7 +5,7 @@ "main": "index.js", "type": "module", "scripts": { - "test": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern='test/smoke.test.js' --runInBand --detectOpenHandles --forceExit" + "test": "node --experimental-vm-modules node_modules/.bin/jest --runInBand --detectOpenHandles --forceExit" }, "author": "", "license": "ISC", diff --git a/javascript/node-postgres/src/alternatives/README.md b/javascript/node-postgres/src/alternatives/README.md new file mode 100644 index 00000000..eaf56faa --- /dev/null +++ b/javascript/node-postgres/src/alternatives/README.md @@ -0,0 +1,26 @@ +# Alternative Examples + +The recommended approach is `example_preferred.js` in the parent directory, which uses a concurrent connection pool with the Aurora DSQL connector. + +## Why Connection Pooling with the Connector? + +Aurora DSQL has specific connection characteristics: +- **60-minute max connection lifetime** - connections are terminated after 1 hour +- **15-minute token expiry** - IAM auth tokens must be refreshed +- **Optimized for concurrency** - more concurrent connections with smaller batches yields better throughput + +The connector + pool combination handles this automatically: +- Generates fresh IAM tokens per connection +- Recycles connections before the 60-minute limit +- Reuses warmed connections (2-24ms faster than new connections) + +## Alternatives + +### `pool/` +Other pool configurations using the connector: +- `example_with_nonconcurrent_connection_pool.js` - Simple pool without concurrency + +### `no_connection_pool/` +Examples without pooling: +- `example_with_no_connection_pool.js` - Single connection with connector +- `example_with_no_connector.js` - SDK-only, for environments where the connector cannot be used (requires manual token management) diff --git a/javascript/node-postgres/src/index.js b/javascript/node-postgres/src/alternatives/no_connection_pool/example_with_no_connection_pool.js similarity index 82% rename from javascript/node-postgres/src/index.js rename to javascript/node-postgres/src/alternatives/no_connection_pool/example_with_no_connection_pool.js index e613df1f..7ba3f94d 100644 --- a/javascript/node-postgres/src/index.js +++ b/javascript/node-postgres/src/alternatives/no_connection_pool/example_with_no_connection_pool.js @@ -1,5 +1,10 @@ -import { AuroraDSQLClient } from "@aws/aurora-dsql-node-postgres-connector"; +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + import assert from "node:assert"; +import { AuroraDSQLClient } from "@aws/aurora-dsql-node-postgres-connector"; const ADMIN = "admin"; const NON_ADMIN_SCHEMA = "myschema"; @@ -11,15 +16,14 @@ async function getConnection(clusterEndpoint, user) { }); await client.connect(); - console.log("Successfully opened connection"); return client; } async function example() { const clusterEndpoint = process.env.CLUSTER_ENDPOINT; - assert(clusterEndpoint); + assert(clusterEndpoint, "CLUSTER_ENDPOINT environment variable is not set"); const user = process.env.CLUSTER_USER; - assert(user); + assert(user, "CLUSTER_USER environment variable is not set"); let client; try { @@ -51,11 +55,12 @@ async function example() { assert.notEqual(result.rows[0].id, null); await client.query("DELETE FROM owner where name='John Doe'"); + console.log("Completed successfully"); } catch (error) { console.error(error); throw error; } finally { - client?.end(); + await client?.end(); } } diff --git a/javascript/node-postgres/src/alternatives/no_connection_pool/example_with_no_connector.js b/javascript/node-postgres/src/alternatives/no_connection_pool/example_with_no_connector.js new file mode 100644 index 00000000..91e89381 --- /dev/null +++ b/javascript/node-postgres/src/alternatives/no_connection_pool/example_with_no_connector.js @@ -0,0 +1,86 @@ +import { DsqlSigner } from "@aws-sdk/dsql-signer"; +import pg from "pg"; +import assert from "node:assert"; +const { Client } = pg; + +const ADMIN = "admin"; +const NON_ADMIN_SCHEMA = "myschema"; + +async function getConnection(clusterEndpoint, user, region) { + const signer = new DsqlSigner({ + hostname: clusterEndpoint, + region, + }); + let token; + // Generate a fresh password token for each connection, to ensure the token is + // not expired when the connection is established + if (user === ADMIN) { + token = await signer.getDbConnectAdminAuthToken(); + } + else { + signer.user = user; + token = await signer.getDbConnectAuthToken() + } + let client = new Client({ + host: clusterEndpoint, + user: user, + password: token, + database: "postgres", + port: 5432, + ssl: { + rejectUnauthorized: true, + } + }); + + // Connect + await client.connect(); + console.log("Successfully opened connection"); + return client; +} + +async function example() { + + const clusterEndpoint = process.env.CLUSTER_ENDPOINT; + assert(clusterEndpoint); + const user = process.env.CLUSTER_USER; + assert(user); + const region = process.env.REGION; + assert(region); + + let client; + try { + client = await getConnection(clusterEndpoint, user, region); + + if (user !== ADMIN) { + await client.query("SET search_path=" + NON_ADMIN_SCHEMA) + } + + // Create a new table + await client.query(`CREATE TABLE IF NOT EXISTS owner ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name VARCHAR(30) NOT NULL, + city VARCHAR(80) NOT NULL, + telephone VARCHAR(20) + )`); + + // Insert some data + await client.query("INSERT INTO owner(name, city, telephone) VALUES($1, $2, $3)", + ["John Doe", "Anytown", "555-555-1900"] + ); + + // Check that data is inserted by reading it back + const result = await client.query("SELECT id, city FROM owner where name='John Doe'"); + assert.deepEqual(result.rows[0].city, "Anytown") + assert.notEqual(result.rows[0].id, null) + + await client.query("DELETE FROM owner where name='John Doe'"); + + } catch (error) { + console.error(error); + throw error; + } finally { + client?.end() + } +} + +export { example } diff --git a/javascript/node-postgres/src/alternatives/pool/example_with_nonconcurrent_connection_pool.js b/javascript/node-postgres/src/alternatives/pool/example_with_nonconcurrent_connection_pool.js new file mode 100644 index 00000000..5b5a393b --- /dev/null +++ b/javascript/node-postgres/src/alternatives/pool/example_with_nonconcurrent_connection_pool.js @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +import assert from "node:assert"; +import { AuroraDSQLPool } from "@aws/aurora-dsql-node-postgres-connector"; + +const ADMIN = "admin"; +const NON_ADMIN_SCHEMA = "myschema"; + +function createPool(clusterEndpoint, user) { + return new AuroraDSQLPool({ + host: clusterEndpoint, + user: user, + max: 10, + idleTimeoutMillis: 30000, + connectionTimeoutMillis: 10000, + }); +} + +async function example() { + const clusterEndpoint = process.env.CLUSTER_ENDPOINT; + assert(clusterEndpoint, "CLUSTER_ENDPOINT environment variable is not set"); + const user = process.env.CLUSTER_USER; + assert(user, "CLUSTER_USER environment variable is not set"); + + const pool = createPool(clusterEndpoint, user); + + try { + // Get a client from the pool + const client = await pool.connect(); + + try { + if (user !== ADMIN) { + await client.query("SET search_path=" + NON_ADMIN_SCHEMA); + } + + // Create a new table + await client.query(`CREATE TABLE IF NOT EXISTS owner ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name VARCHAR(30) NOT NULL, + city VARCHAR(80) NOT NULL, + telephone VARCHAR(20) + )`); + + // Insert some data + await client.query( + "INSERT INTO owner(name, city, telephone) VALUES($1, $2, $3)", + ["John Doe", "Anytown", "555-555-1900"] + ); + + // Check that data is inserted by reading it back + const result = await client.query( + "SELECT id, city FROM owner where name='John Doe'" + ); + assert.deepEqual(result.rows[0].city, "Anytown"); + assert.notEqual(result.rows[0].id, null); + + await client.query("DELETE FROM owner where name='John Doe'"); + console.log("Completed successfully"); + } finally { + // Release the client back to the pool + client.release(); + } + } catch (error) { + console.error(error); + throw error; + } finally { + await pool.end(); + } +} + +export { example }; diff --git a/javascript/node-postgres/src/example_preferred.js b/javascript/node-postgres/src/example_preferred.js new file mode 100644 index 00000000..74428617 --- /dev/null +++ b/javascript/node-postgres/src/example_preferred.js @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +import assert from "node:assert"; +import { AuroraDSQLPool } from "@aws/aurora-dsql-node-postgres-connector"; + +const NUM_CONCURRENT_QUERIES = 8; + +function createPool(clusterEndpoint, user) { + return new AuroraDSQLPool({ + host: clusterEndpoint, + user: user, + max: 10, + idleTimeoutMillis: 30000, + connectionTimeoutMillis: 10000, + }); +} + +async function worker(pool, workerId) { + const result = await pool.query("SELECT $1::int as worker_id", [workerId]); + console.log(`Worker ${workerId} result: ${result.rows[0].worker_id}`); + assert.strictEqual(result.rows[0].worker_id, workerId); +} + +async function example() { + const clusterEndpoint = process.env.CLUSTER_ENDPOINT; + assert(clusterEndpoint, "CLUSTER_ENDPOINT environment variable is not set"); + const user = process.env.CLUSTER_USER; + assert(user, "CLUSTER_USER environment variable is not set"); + + const pool = createPool(clusterEndpoint, user); + + try { + // Run concurrent queries using the connection pool + const workers = []; + for (let i = 1; i <= NUM_CONCURRENT_QUERIES; i++) { + workers.push(worker(pool, i)); + } + + // Wait for all workers to complete + await Promise.all(workers); + + console.log("Connection pool with concurrent connections exercised successfully"); + } catch (error) { + console.error(error); + throw error; + } finally { + await pool.end(); + } +} + +export { example }; diff --git a/javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js b/javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js new file mode 100644 index 00000000..fcc1b6a8 --- /dev/null +++ b/javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js @@ -0,0 +1,5 @@ +import { example } from '../../../src/alternatives/no_connection_pool/example_with_no_connection_pool.js'; + +test('Smoke test - example_with_no_connection_pool', async () => { + await example(); +}, 30000); diff --git a/javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connector.test.js b/javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connector.test.js new file mode 100644 index 00000000..e8287b33 --- /dev/null +++ b/javascript/node-postgres/test/alternatives/no_connection_pool/example_with_no_connector.test.js @@ -0,0 +1,5 @@ +import { example } from '../../../src/alternatives/no_connection_pool/example_with_no_connector.js'; + +test('Smoke test - example_with_no_connector', async () => { + await example(); +}, 30000); diff --git a/javascript/node-postgres/test/alternatives/pool/example_with_nonconcurrent_connection_pool.test.js b/javascript/node-postgres/test/alternatives/pool/example_with_nonconcurrent_connection_pool.test.js new file mode 100644 index 00000000..770112b9 --- /dev/null +++ b/javascript/node-postgres/test/alternatives/pool/example_with_nonconcurrent_connection_pool.test.js @@ -0,0 +1,5 @@ +import { example } from '../../../src/alternatives/pool/example_with_nonconcurrent_connection_pool.js'; + +test('Smoke test - example_with_nonconcurrent_connection_pool', async () => { + await example(); +}, 30000); diff --git a/javascript/node-postgres/test/example_preferred.test.js b/javascript/node-postgres/test/example_preferred.test.js new file mode 100644 index 00000000..ff3580e7 --- /dev/null +++ b/javascript/node-postgres/test/example_preferred.test.js @@ -0,0 +1,5 @@ +import { example } from '../src/example_preferred.js'; + +test('Smoke test - example_preferred', async () => { + await example(); +}, 30000); diff --git a/javascript/node-postgres/test/smoke.test.js b/javascript/node-postgres/test/smoke.test.js deleted file mode 100644 index 66f955db..00000000 --- a/javascript/node-postgres/test/smoke.test.js +++ /dev/null @@ -1,6 +0,0 @@ -import { example } from '../src/index.js'; - -test('Smoke test', async () => { - await example(); - return Promise.resolve(); -}); diff --git a/javascript/postgres-js/package.json b/javascript/postgres-js/package.json index e1510686..626e085e 100644 --- a/javascript/postgres-js/package.json +++ b/javascript/postgres-js/package.json @@ -5,7 +5,7 @@ "main": "index.js", "type": "module", "scripts": { - "test": "node --experimental-vm-modules node_modules/.bin/jest --testPathPattern='test/smoke.test.js' --runInBand --detectOpenHandles --forceExit" + "test": "node --experimental-vm-modules node_modules/.bin/jest --runInBand --detectOpenHandles --forceExit" }, "author": "", "license": "ISC", diff --git a/javascript/postgres-js/src/alternatives/README.md b/javascript/postgres-js/src/alternatives/README.md new file mode 100644 index 00000000..2aa151ba --- /dev/null +++ b/javascript/postgres-js/src/alternatives/README.md @@ -0,0 +1,22 @@ +# Alternative Examples + +The recommended approach is `example_preferred.js` in the parent directory, which uses a concurrent connection pool with the Aurora DSQL connector. + +## Why Connection Pooling with the Connector? + +Aurora DSQL has specific connection characteristics: +- **60-minute max connection lifetime** - connections are terminated after 1 hour +- **15-minute token expiry** - IAM auth tokens must be refreshed +- **Optimized for concurrency** - more concurrent connections with smaller batches yields better throughput + +The connector + pool combination handles this automatically: +- Generates fresh IAM tokens per connection +- Recycles connections before the 60-minute limit +- Reuses warmed connections (2-24ms faster than new connections) + +## Alternatives + +### `no_connection_pool/` +Examples without pooling: +- `example_with_no_connection_pool.js` - Single connection with connector +- `example_with_no_connector.js` - SDK-only, for environments where the connector cannot be used (requires manual token management) diff --git a/javascript/postgres-js/src/alternatives/no_connection_pool/example_with_no_connection_pool.js b/javascript/postgres-js/src/alternatives/no_connection_pool/example_with_no_connection_pool.js new file mode 100644 index 00000000..a2d0c1db --- /dev/null +++ b/javascript/postgres-js/src/alternatives/no_connection_pool/example_with_no_connection_pool.js @@ -0,0 +1,74 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +import assert from "node:assert"; +import { auroraDSQLPostgres } from "@aws/aurora-dsql-postgresjs-connector"; + +const ADMIN = "admin"; +const PUBLIC = "public"; +const NON_ADMIN_SCHEMA = "myschema"; + +function getConnection(clusterEndpoint, user) { + return auroraDSQLPostgres({ + host: clusterEndpoint, + user: user, + // Other DSQL options: + // region: 'us-east-1', + // profile: awsProfile, + // tokenDurationSecs: 30, + // customCredentialsProvider: credentialsProvider, + // + // Other Postgres.js settings are also valid here, see Postgres.js documentation for more information + // https://github.com/porsager/postgres#all-postgres-options + }); +} + +async function example() { + const clusterEndpoint = process.env.CLUSTER_ENDPOINT; + assert(clusterEndpoint, "CLUSTER_ENDPOINT environment variable is not set"); + const user = process.env.CLUSTER_USER; + assert(user, "CLUSTER_USER environment variable is not set"); + + let client; + try { + client = getConnection(clusterEndpoint, user); + const schema = user === ADMIN ? PUBLIC : NON_ADMIN_SCHEMA; + + // Note that due to connection pooling, we cannot execute 'set search_path=myschema' + // because we cannot assume the same connection will be used. + await client`CREATE TABLE IF NOT EXISTS ${client(schema)}.owner + ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name VARCHAR(30) NOT NULL, + city VARCHAR(80) NOT NULL, + telephone VARCHAR(20) + )`; + + // Insert some data + await client`INSERT INTO ${client(schema)}.owner(name, city, telephone) + VALUES ('John Doe', 'Anytown', '555-555-0150')`; + + // Check that data is inserted by reading it back + const result = await client`SELECT id, city + FROM ${client(schema)}.owner + where name = 'John Doe'`; + assert.deepEqual(result[0].city, "Anytown"); + assert.notEqual(result[0].id, null); + + // Delete data we just inserted + await client`DELETE + FROM ${client(schema)}.owner + where name = 'John Doe'`; + + console.log("Completed successfully"); + } catch (error) { + console.error(error); + throw error; + } finally { + await client?.end(); + } +} + +export { example }; diff --git a/javascript/postgres-js/src/index.js b/javascript/postgres-js/src/alternatives/no_connection_pool/example_with_no_connector.js similarity index 50% rename from javascript/postgres-js/src/index.js rename to javascript/postgres-js/src/alternatives/no_connection_pool/example_with_no_connector.js index cfb6f77b..cc3ab5a8 100644 --- a/javascript/postgres-js/src/index.js +++ b/javascript/postgres-js/src/alternatives/no_connection_pool/example_with_no_connector.js @@ -1,21 +1,46 @@ -import { auroraDSQLPostgres } from "@aws/aurora-dsql-postgresjs-connector"; +import { DsqlSigner } from "@aws-sdk/dsql-signer"; +import postgres from "postgres" + import assert from "node:assert"; const ADMIN = "admin"; const PUBLIC = "public"; const NON_ADMIN_SCHEMA = "myschema"; -async function getConnection(clusterEndpoint, user) { - const client = auroraDSQLPostgres({ +async function getConnection(clusterEndpoint, user, region) { + + let client = postgres({ host: clusterEndpoint, user: user, + // We can pass a function to password instead of a value, which will be triggered whenever + // connections are opened. + password: async () => await getPasswordToken(clusterEndpoint, user, region), + database: "postgres", + port: 5432, idle_timeout: 2, + ssl: { + rejectUnauthorized: true, + } // max: 1, // Optionally set maximum connection pool size - }); + }) return client; } +async function getPasswordToken(clusterEndpoint, user, region) { + const signer = new DsqlSigner({ + hostname: clusterEndpoint, + region, + }); + if (user === ADMIN) { + return await signer.getDbConnectAdminAuthToken(); + } + else { + signer.user = user; + return await signer.getDbConnectAuthToken() + } +} + async function example() { let client; @@ -23,9 +48,12 @@ async function example() { assert(clusterEndpoint); const user = process.env.CLUSTER_USER; assert(user); - + const region = process.env.REGION; + assert(region); + try { - client = await getConnection(clusterEndpoint, user); + + client = await getConnection(clusterEndpoint, user, region) let schema = user === ADMIN ? PUBLIC : NON_ADMIN_SCHEMA; // Note that due to connection pooling, we cannot execute 'set search_path=myschema' @@ -38,22 +66,22 @@ async function example() { )`; // Insert some data - await client`INSERT INTO ${client(schema)}.owner(name, city, telephone) VALUES('John Doe', 'Anytown', '555-555-0150')`; + await client`INSERT INTO ${client(schema)}.owner(name, city, telephone) VALUES('John Doe', 'Anytown', '555-555-0150')` // Check that data is inserted by reading it back - const result = - await client`SELECT id, city FROM ${client(schema)}.owner where name='John Doe'`; - assert.deepEqual(result[0].city, "Anytown"); - assert.notEqual(result[0].id, null); + const result = await client`SELECT id, city FROM ${client(schema)}.owner where name='John Doe'`; + assert.deepEqual(result[0].city, "Anytown") + assert.notEqual(result[0].id, null) // Delete data we just inserted - await client`DELETE FROM ${client(schema)}.owner where name='John Doe'`; + await client`DELETE FROM ${client(schema)}.owner where name='John Doe'` + } catch (error) { console.error(error); throw error; - } finally { + } finally { await client?.end(); } } -export { example }; +export { example } diff --git a/javascript/postgres-js/src/example_preferred.js b/javascript/postgres-js/src/example_preferred.js new file mode 100644 index 00000000..9de1c0d6 --- /dev/null +++ b/javascript/postgres-js/src/example_preferred.js @@ -0,0 +1,54 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +import assert from "node:assert"; +import { auroraDSQLPostgres } from "@aws/aurora-dsql-postgresjs-connector"; + +const NUM_CONCURRENT_QUERIES = 8; + +function createPooledConnection(clusterEndpoint, user) { + return auroraDSQLPostgres({ + host: clusterEndpoint, + user: user, + max: 10, // Connection pool size + idle_timeout: 30, // Idle connection timeout in seconds + connect_timeout: 10, // Connection timeout in seconds + }); +} + +async function worker(sql, workerId) { + const result = await sql`SELECT ${workerId}::int as worker_id`; + console.log(`Worker ${workerId} result: ${result[0].worker_id}`); + assert.strictEqual(result[0].worker_id, workerId); +} + +async function example() { + const clusterEndpoint = process.env.CLUSTER_ENDPOINT; + assert(clusterEndpoint, "CLUSTER_ENDPOINT environment variable is not set"); + const user = process.env.CLUSTER_USER; + assert(user, "CLUSTER_USER environment variable is not set"); + + const sql = createPooledConnection(clusterEndpoint, user); + + try { + // Run concurrent queries using the connection pool + const workers = []; + for (let i = 1; i <= NUM_CONCURRENT_QUERIES; i++) { + workers.push(worker(sql, i)); + } + + // Wait for all workers to complete + await Promise.all(workers); + + console.log("Connection pool with concurrent connections exercised successfully"); + } catch (error) { + console.error(error); + throw error; + } finally { + await sql.end(); + } +} + +export { example }; diff --git a/javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js b/javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js new file mode 100644 index 00000000..fcc1b6a8 --- /dev/null +++ b/javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connection_pool.test.js @@ -0,0 +1,5 @@ +import { example } from '../../../src/alternatives/no_connection_pool/example_with_no_connection_pool.js'; + +test('Smoke test - example_with_no_connection_pool', async () => { + await example(); +}, 30000); diff --git a/javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connector.test.js b/javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connector.test.js new file mode 100644 index 00000000..e8287b33 --- /dev/null +++ b/javascript/postgres-js/test/alternatives/no_connection_pool/example_with_no_connector.test.js @@ -0,0 +1,5 @@ +import { example } from '../../../src/alternatives/no_connection_pool/example_with_no_connector.js'; + +test('Smoke test - example_with_no_connector', async () => { + await example(); +}, 30000); diff --git a/javascript/postgres-js/test/example_preferred.test.js b/javascript/postgres-js/test/example_preferred.test.js new file mode 100644 index 00000000..ff3580e7 --- /dev/null +++ b/javascript/postgres-js/test/example_preferred.test.js @@ -0,0 +1,5 @@ +import { example } from '../src/example_preferred.js'; + +test('Smoke test - example_preferred', async () => { + await example(); +}, 30000); diff --git a/javascript/postgres-js/test/smoke.test.js b/javascript/postgres-js/test/smoke.test.js deleted file mode 100644 index 84ed920d..00000000 --- a/javascript/postgres-js/test/smoke.test.js +++ /dev/null @@ -1,5 +0,0 @@ -import { example } from '../src/index.js'; - -test('Smoke test', async () => { - await example(); -}, 20000); diff --git a/python/psycopg/README.md b/python/psycopg/README.md index 85986c42..ffd53a11 100644 --- a/python/psycopg/README.md +++ b/python/psycopg/README.md @@ -91,7 +91,7 @@ export CLUSTER_ENDPOINT="" Run the example: ```bash -python src/example.py +python src/example_preferred.py ``` The example contains comments explaining the code and the operations being performed. diff --git a/python/psycopg/requirements.txt b/python/psycopg/requirements.txt index b6724ef3..bccf8e31 100644 --- a/python/psycopg/requirements.txt +++ b/python/psycopg/requirements.txt @@ -1,3 +1,4 @@ aurora-dsql-python-connector psycopg[binary]>=3 +psycopg-pool>=3 pytest>=8 diff --git a/python/psycopg/src/alternatives/README.md b/python/psycopg/src/alternatives/README.md new file mode 100644 index 00000000..4dd77fb9 --- /dev/null +++ b/python/psycopg/src/alternatives/README.md @@ -0,0 +1,28 @@ +# Alternative Examples + +The recommended approach is `example_preferred.py` in the parent directory, which uses a concurrent connection pool with the Aurora DSQL connector. + +## Why Connection Pooling with the Connector? + +Aurora DSQL has specific connection characteristics: +- **60-minute max connection lifetime** - connections are terminated after 1 hour +- **15-minute token expiry** - IAM auth tokens must be refreshed +- **Optimized for concurrency** - more concurrent connections with smaller batches yields better throughput + +The connector + pool combination handles this automatically: +- Generates fresh IAM tokens per connection +- Recycles connections before the 60-minute limit (via `max_lifetime < 3600`) +- Reuses warmed connections (2-24ms faster than new connections) + +## Alternatives + +### `pool/` +Other pool configurations using the connector: +- `example_with_async_connection_pool.py` - Async pool for asyncio applications +- `example_with_nonconcurrent_connection_pool.py` - Simple pool without concurrency + +### `no_connection_pool/` +Examples without pooling: +- `example_with_no_connection_pool.py` - Single connection with connector +- `example_async_with_no_connection_pool.py` - Async single connection with connector +- `example_with_no_connector.py` - SDK-only, for environments where the connector cannot be used (requires manual token management) diff --git a/python/psycopg/src/alternatives/no_connection_pool/example_async_with_no_connection_pool.py b/python/psycopg/src/alternatives/no_connection_pool/example_async_with_no_connection_pool.py new file mode 100644 index 00000000..19e1541b --- /dev/null +++ b/python/psycopg/src/alternatives/no_connection_pool/example_async_with_no_connection_pool.py @@ -0,0 +1,112 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os + +from psycopg import pq + +import aurora_dsql_psycopg as dsql + + +async def create_connection(cluster_user, cluster_endpoint, region): + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + } + + # Use the more efficient connection method if it's supported. + if pq.version() >= 170000: + conn_params["sslnegotiation"] = "direct" + + # Make a connection to the cluster + conn = await dsql.DSQLAsyncConnection.connect(**conn_params) + + if cluster_user == "admin": + schema = "public" + else: + schema = "myschema" + + try: + async with conn.cursor() as cur: + await cur.execute(f"SET search_path = {schema};") + await conn.commit() + except Exception as e: + await conn.close() + raise e + + return conn + + +async def exercise_connection(conn): + await conn.set_autocommit(True) + + async with conn.cursor() as cur: + await cur.execute( + """ + CREATE TABLE IF NOT EXISTS owner( + id uuid NOT NULL DEFAULT gen_random_uuid(), + name varchar(30) NOT NULL, + city varchar(80) NOT NULL, + telephone varchar(20) DEFAULT NULL, + PRIMARY KEY (id)) + """ + ) + + # Insert some rows + await cur.execute( + "INSERT INTO owner(name, city, telephone) VALUES('John Doe', 'Anytown', '555-555-1999')" + ) + + await cur.execute("SELECT * FROM owner WHERE name='John Doe'") + row = await cur.fetchone() + + # Verify the result we got is what we inserted before + assert row[0] is not None + assert row[1] == "John Doe" + assert row[2] == "Anytown" + assert row[3] == "555-555-1999" + + # Clean up the table after the example. If we run the example again + # we do not have to worry about data inserted by previous runs + await cur.execute("DELETE FROM owner where name = 'John Doe'") + + +async def main(): + conn = None + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + + conn = await create_connection(cluster_user, cluster_endpoint, region) + await exercise_connection(conn) + finally: + if conn is not None: + await conn.close() + + print("Connection exercised successfully") + + +if __name__ == "__main__": + import asyncio + + asyncio.run(main()) diff --git a/python/psycopg/src/alternatives/no_connection_pool/example_with_no_connection_pool.py b/python/psycopg/src/alternatives/no_connection_pool/example_with_no_connection_pool.py new file mode 100644 index 00000000..7a325030 --- /dev/null +++ b/python/psycopg/src/alternatives/no_connection_pool/example_with_no_connection_pool.py @@ -0,0 +1,111 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os + +from psycopg import pq + +import aurora_dsql_psycopg as dsql + + +def create_connection(cluster_user, cluster_endpoint, region): + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + } + + # Use the more efficient connection method if it's supported. + if pq.version() >= 170000: + conn_params["sslnegotiation"] = "direct" + + # Make a connection to the cluster + conn = dsql.connect(**conn_params) + + if cluster_user == "admin": + schema = "public" + else: + schema = "myschema" + + try: + with conn.cursor() as cur: + cur.execute(f"SET search_path = {schema};") + conn.commit() + except Exception as e: + conn.close() + raise e + + return conn + + +def exercise_connection(conn): + conn.set_autocommit(True) + + cur = conn.cursor() + + cur.execute( + """ + CREATE TABLE IF NOT EXISTS owner( + id uuid NOT NULL DEFAULT gen_random_uuid(), + name varchar(30) NOT NULL, + city varchar(80) NOT NULL, + telephone varchar(20) DEFAULT NULL, + PRIMARY KEY (id)) + """ + ) + + # Insert some rows + cur.execute( + "INSERT INTO owner(name, city, telephone) VALUES('John Doe', 'Anytown', '555-555-1999')" + ) + + cur.execute("SELECT * FROM owner WHERE name='John Doe'") + row = cur.fetchone() + + # Verify the result we got is what we inserted before + assert row[0] is not None + assert row[1] == "John Doe" + assert row[2] == "Anytown" + assert row[3] == "555-555-1999" + + # Clean up the table after the example. If we run the example again + # we do not have to worry about data inserted by previous runs + cur.execute("DELETE FROM owner where name = 'John Doe'") + + +def main(): + conn = None + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + + conn = create_connection(cluster_user, cluster_endpoint, region) + exercise_connection(conn) + finally: + if conn is not None: + conn.close() + + print("Connection exercised successfully") + + +if __name__ == "__main__": + main() diff --git a/python/psycopg/src/example.py b/python/psycopg/src/alternatives/no_connection_pool/example_with_no_connector.py similarity index 57% rename from python/psycopg/src/example.py rename to python/psycopg/src/alternatives/no_connection_pool/example_with_no_connector.py index 487fac57..51b65022 100644 --- a/python/psycopg/src/example.py +++ b/python/psycopg/src/alternatives/no_connection_pool/example_with_no_connector.py @@ -1,12 +1,40 @@ -import aurora_dsql_psycopg as dsql +import boto3 +import psycopg import os +import sys +from psycopg import pq -def create_connection(cluster_user, cluster_endpoint): - conn = dsql.connect( - host=cluster_endpoint, - user=cluster_user, - ) +def create_connection(cluster_user, cluster_endpoint, region): + # Generate a fresh password token for each connection, to ensure the token is not expired + # when the connection is established + client = boto3.client("dsql", region_name=region) + + if cluster_user == "admin": + password_token = client.generate_db_connect_admin_auth_token(cluster_endpoint, region) + else: + password_token = client.generate_db_connect_auth_token(cluster_endpoint, region) + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + "password": password_token + } + + # Use the more efficient connection method if it's supported. + if pq.version() >= 170000: + conn_params["sslnegotiation"] = "direct" + + # Make a connection to the cluster + conn = psycopg.connect(**conn_params) if cluster_user == "admin": schema = "public" @@ -64,7 +92,10 @@ def main(): cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) assert cluster_endpoint is not None, "CLUSTER_ENDPOINT environment variable is not set" - conn = create_connection(cluster_user, cluster_endpoint) + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + + conn = create_connection(cluster_user, cluster_endpoint, region) exercise_connection(conn) finally: if conn is not None: diff --git a/python/psycopg/src/alternatives/pool/example_with_async_connection_pool.py b/python/psycopg/src/alternatives/pool/example_with_async_connection_pool.py new file mode 100644 index 00000000..47a66ecb --- /dev/null +++ b/python/psycopg/src/alternatives/pool/example_with_async_connection_pool.py @@ -0,0 +1,69 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os + +from psycopg_pool import AsyncConnectionPool as PsycopgPoolAsync + +import aurora_dsql_psycopg as dsql + + +async def connect_with_pool(cluster_user, cluster_endpoint, region): + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + } + + async with PsycopgPoolAsync( + "", + connection_class=dsql.DSQLAsyncConnection, + kwargs=conn_params, # Pass params as kwargs + min_size=2, + max_size=10, + max_lifetime=3300, + ) as pool: + + async with pool.connection() as conn: + async with conn.cursor() as cur: + await cur.execute("SELECT 1") + result = await cur.fetchone() + print(f"Query result: {result}") + assert result[0] == 1 + + +async def main(): + + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + await connect_with_pool(cluster_user, cluster_endpoint, region) + finally: + pass + + print("Async connection pool exercised successfully") + + +if __name__ == "__main__": + import asyncio + + asyncio.run(main()) diff --git a/python/psycopg/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py b/python/psycopg/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py new file mode 100644 index 00000000..c052726f --- /dev/null +++ b/python/psycopg/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py @@ -0,0 +1,70 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os + +from psycopg_pool import ConnectionPool as PsycopgPool + +import aurora_dsql_psycopg as dsql + + +def connect_with_pool(cluster_user, cluster_endpoint, region): + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + } + + pool = PsycopgPool( + "", # Empty conninfo + connection_class=dsql.DSQLConnection, + kwargs=conn_params, # Pass params as kwargs + min_size=2, + max_size=8, + max_lifetime=3300, + ) + + # Use the pool as a context manager + with pool as p: + # Request a connection from the pool + with p.connection() as conn: + # Execute a query + with conn.cursor() as cur: + cur.execute("SELECT 1") + result = cur.fetchone() + print(f"Query result: {result}") + assert result[0] == 1 + + +def main(): + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + connect_with_pool(cluster_user, cluster_endpoint, region) + finally: + pass + + print("Connection pool exercised successfully") + + +if __name__ == "__main__": + main() diff --git a/python/psycopg/src/example_preferred.py b/python/psycopg/src/example_preferred.py new file mode 100644 index 00000000..0e4efefd --- /dev/null +++ b/python/psycopg/src/example_preferred.py @@ -0,0 +1,95 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os +import threading + +from psycopg_pool import ConnectionPool as PsycopgPool + +import aurora_dsql_psycopg as dsql + + +def connect_with_pool_concurrent_connections(cluster_user, cluster_endpoint, region): + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + } + + pool = PsycopgPool( + "", # Empty conninfo + connection_class=dsql.DSQLConnection, + kwargs=conn_params, # Pass params as kwargs + min_size=2, + max_size=8, + max_lifetime=3300, + open=True, + ) + + # Shared list to collect exceptions from worker threads + exceptions = [] + + def worker(thread_id): + try: + with pool.connection() as conn: + with conn.cursor() as cur: + cur.execute("SELECT %s", (thread_id,)) + result = cur.fetchone() + print(f"Thread {thread_id} result: {result}") + assert result[0] == thread_id + + except Exception as e: + print(f"Thread {thread_id} failed: {e}") + exceptions.append((thread_id, e)) # Store exception with thread ID + + NUM_THREADS = 8 + threads = [] + for i in range(NUM_THREADS): + thread = threading.Thread(target=worker, args=(i + 1,)) + threads.append(thread) + thread.start() + + # Wait for all threads + for thread in threads: + thread.join() + + # Check if any threads had exceptions + if exceptions: + print(f"Errors occurred in {len(exceptions)} threads:") + for thread_id, exc in exceptions: + print(f" Thread {thread_id}: {exc}") + raise RuntimeError(f"One or more worker threads failed: {exceptions}") + + +def main(): + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + connect_with_pool_concurrent_connections(cluster_user, cluster_endpoint, region) + finally: + pass + + print("Connection pool with concurrent connections exercised successfully") + + +if __name__ == "__main__": + main() diff --git a/python/psycopg/test/alternatives/no_connection_pool/test_example_async_with_no_connection_pool.py b/python/psycopg/test/alternatives/no_connection_pool/test_example_async_with_no_connection_pool.py new file mode 100644 index 00000000..b8383630 --- /dev/null +++ b/python/psycopg/test/alternatives/no_connection_pool/test_example_async_with_no_connection_pool.py @@ -0,0 +1,17 @@ +import sys +import os +import asyncio + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'no_connection_pool')) + +from example_async_with_no_connection_pool import main + +import pytest + + +def test_example_async_with_no_connection_pool(): + try: + asyncio.run(main()) + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py b/python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py new file mode 100644 index 00000000..60511dca --- /dev/null +++ b/python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'no_connection_pool')) + +from example_with_no_connection_pool import main + +import pytest + + +def test_example_with_no_connection_pool(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connector.py b/python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connector.py new file mode 100644 index 00000000..68b1019c --- /dev/null +++ b/python/psycopg/test/alternatives/no_connection_pool/test_example_with_no_connector.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'no_connection_pool')) + +from example_with_no_connector import main + +import pytest + + +def test_example_with_no_connector(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg/test/alternatives/pool/test_example_with_async_connection_pool.py b/python/psycopg/test/alternatives/pool/test_example_with_async_connection_pool.py new file mode 100644 index 00000000..1bbce5a3 --- /dev/null +++ b/python/psycopg/test/alternatives/pool/test_example_with_async_connection_pool.py @@ -0,0 +1,17 @@ +import sys +import os +import asyncio + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'pool')) + +from example_with_async_connection_pool import main + +import pytest + + +def test_example_with_async_connection_pool(): + try: + asyncio.run(main()) + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py b/python/psycopg/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py new file mode 100644 index 00000000..5d745c08 --- /dev/null +++ b/python/psycopg/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'pool')) + +from example_with_nonconcurrent_connection_pool import main + +import pytest + + +def test_example_with_nonconcurrent_connection_pool(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg/test/test_example.py b/python/psycopg/test/test_example.py deleted file mode 100644 index c1ff694d..00000000 --- a/python/psycopg/test/test_example.py +++ /dev/null @@ -1,11 +0,0 @@ -from example import main - -import pytest - - -# Smoke tests that our example works fine -def test_example(): - try: - main() - except Exception as e: - pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg/test/test_example_preferred.py b/python/psycopg/test/test_example_preferred.py new file mode 100644 index 00000000..dd848ea6 --- /dev/null +++ b/python/psycopg/test/test_example_preferred.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) + +from example_preferred import main + +import pytest + + +def test_example_preferred(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg2/README.md b/python/psycopg2/README.md index c17cb159..173ab75f 100644 --- a/python/psycopg2/README.md +++ b/python/psycopg2/README.md @@ -91,7 +91,7 @@ export CLUSTER_ENDPOINT="" Run the example: ```bash -python src/example.py +python src/example_preferred.py ``` The example contains comments explaining the code and the operations being performed. diff --git a/python/psycopg2/src/alternatives/README.md b/python/psycopg2/src/alternatives/README.md new file mode 100644 index 00000000..16022d0d --- /dev/null +++ b/python/psycopg2/src/alternatives/README.md @@ -0,0 +1,26 @@ +# Alternative Examples + +The recommended approach is `example_preferred.py` in the parent directory, which uses a concurrent connection pool with the Aurora DSQL connector. + +## Why Connection Pooling with the Connector? + +Aurora DSQL has specific connection characteristics: +- **60-minute max connection lifetime** - connections are terminated after 1 hour +- **15-minute token expiry** - IAM auth tokens must be refreshed +- **Optimized for concurrency** - more concurrent connections with smaller batches yields better throughput + +The connector + pool combination handles this automatically: +- Generates fresh IAM tokens per connection +- Recycles connections before the 60-minute limit +- Reuses warmed connections (2-24ms faster than new connections) + +## Alternatives + +### `pool/` +Other pool configurations using the connector: +- `example_with_nonconcurrent_connection_pool.py` - Simple pool without concurrency + +### `no_connection_pool/` +Examples without pooling: +- `example_with_no_connection_pool.py` - Single connection with connector +- `example_with_no_connector.py` - SDK-only, for environments where the connector cannot be used (requires manual token management) diff --git a/python/psycopg2/src/alternatives/no_connection_pool/example_with_no_connection_pool.py b/python/psycopg2/src/alternatives/no_connection_pool/example_with_no_connection_pool.py new file mode 100644 index 00000000..d2f7a1c5 --- /dev/null +++ b/python/psycopg2/src/alternatives/no_connection_pool/example_with_no_connection_pool.py @@ -0,0 +1,110 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os + +import psycopg2 +import psycopg2.extensions + +import aurora_dsql_psycopg2 as dsql + + +def create_connection(cluster_user, cluster_endpoint, region): + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": "./root.pem", + } + + # Use the more efficient connection method if it's supported. + if psycopg2.extensions.libpq_version() >= 170000: + conn_params["sslnegotiation"] = "direct" + + # Make a connection to the cluster + conn = dsql.connect(**conn_params) + + if cluster_user == "admin": + schema = "public" + else: + schema = "myschema" + + try: + with conn.cursor() as cur: + cur.execute(f"SET search_path = {schema};") + conn.commit() + except Exception as e: + conn.close() + raise e + + return conn + + +def exercise_connection(conn): + conn.set_session(autocommit=True) + + cur = conn.cursor() + + cur.execute("DROP TABLE IF EXISTS owner") + + cur.execute( + """ + CREATE TABLE IF NOT EXISTS owner( + id uuid NOT NULL DEFAULT gen_random_uuid(), + name varchar(30) NOT NULL, + city varchar(80) NOT NULL, + telephone varchar(20) DEFAULT NULL, + PRIMARY KEY (id)) + """ + ) + + # Insert some rows + cur.execute( + "INSERT INTO owner(name, city, telephone) VALUES('John Doe', 'Anytown', '555-555-1999')" + ) + + cur.execute("SELECT * FROM owner WHERE name='John Doe'") + row = cur.fetchone() + + # Verify the result we got is what we inserted before + assert row[0] is not None + assert row[1] == "John Doe" + assert row[2] == "Anytown" + assert row[3] == "555-555-1999" + + # Clean up the table after the example. If we run the example again + # we do not have to worry about data inserted by previous runs + cur.execute("DELETE FROM owner where name = 'John Doe'") + + +def main(): + conn = None + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + + conn = create_connection(cluster_user, cluster_endpoint, region) + exercise_connection(conn) + finally: + if conn is not None: + conn.close() + + print("Connection exercised successfully") + + +if __name__ == "__main__": + main() diff --git a/python/psycopg2/src/example.py b/python/psycopg2/src/alternatives/no_connection_pool/example_with_no_connector.py similarity index 59% rename from python/psycopg2/src/example.py rename to python/psycopg2/src/alternatives/no_connection_pool/example_with_no_connector.py index d36ddf13..d2069852 100644 --- a/python/psycopg2/src/example.py +++ b/python/psycopg2/src/alternatives/no_connection_pool/example_with_no_connector.py @@ -1,12 +1,36 @@ -import aurora_dsql_psycopg2 as dsql +import boto3 +import psycopg2 +import psycopg2.extensions import os +import sys -def create_connection(cluster_user, cluster_endpoint): - conn = dsql.connect( - host=cluster_endpoint, - user=cluster_user, - ) +def create_connection(cluster_user, cluster_endpoint, region): + # Generate a fresh password token for each connection, to ensure the token is not expired + # when the connection is established + client = boto3.client("dsql", region_name=region) + + if cluster_user == "admin": + password_token = client.generate_db_connect_admin_auth_token(cluster_endpoint, region) + else: + password_token = client.generate_db_connect_auth_token(cluster_endpoint, region) + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "sslmode": "verify-full", + "sslrootcert": "./root.pem", + "password": password_token + } + + # Use the more efficient connection method if it's supported. + if psycopg2.extensions.libpq_version() >= 170000: + conn_params["sslnegotiation"] = "direct" + + # Make a connection to the cluster + conn = psycopg2.connect(**conn_params) if cluster_user == "admin": schema = "public" @@ -64,7 +88,10 @@ def main(): cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) assert cluster_endpoint is not None, "CLUSTER_ENDPOINT environment variable is not set" - conn = create_connection(cluster_user, cluster_endpoint) + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + + conn = create_connection(cluster_user, cluster_endpoint, region) exercise_connection(conn) finally: if conn is not None: diff --git a/python/psycopg2/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py b/python/psycopg2/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py new file mode 100644 index 00000000..f396552d --- /dev/null +++ b/python/psycopg2/src/alternatives/pool/example_with_nonconcurrent_connection_pool.py @@ -0,0 +1,69 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os + +import aurora_dsql_psycopg2 as dsql + + +def connect_with_pool(cluster_user, cluster_endpoint, region): + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + } + + pool = dsql.AuroraDSQLThreadedConnectionPool( + minconn=2, + maxconn=8, + **conn_params, + ) + + # Use the pool as a context manager + with pool as p: + # Request a connection from the pool + conn = p.getconn() + try: + # Execute a query + with conn.cursor() as cur: + cur.execute("SELECT 1") + result = cur.fetchone() + print(f"Query result: {result}") + assert result[0] == 1 + finally: + # Return connection to pool + p.putconn(conn) + + +def main(): + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + connect_with_pool(cluster_user, cluster_endpoint, region) + finally: + pass + + print("Connection pool exercised successfully") + + +if __name__ == "__main__": + main() diff --git a/python/psycopg2/src/example_preferred.py b/python/psycopg2/src/example_preferred.py new file mode 100644 index 00000000..70e0b222 --- /dev/null +++ b/python/psycopg2/src/example_preferred.py @@ -0,0 +1,91 @@ +""" +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +""" + +import os +import threading + +import aurora_dsql_psycopg2 as dsql + + +def connect_with_pool_concurrent_connections(cluster_user, cluster_endpoint, region): + + ssl_cert_path = "./root.pem" + if not os.path.isfile(ssl_cert_path): + raise FileNotFoundError(f"SSL certificate file not found: {ssl_cert_path}") + + conn_params = { + "dbname": "postgres", + "user": cluster_user, + "host": cluster_endpoint, + "port": "5432", + "region": region, + "sslmode": "verify-full", + "sslrootcert": ssl_cert_path, + } + + pool = dsql.AuroraDSQLThreadedConnectionPool( + minconn=2, + maxconn=8, + **conn_params, + ) + + # Shared list to collect exceptions from worker threads + exceptions = [] + + def worker(thread_id): + try: + conn = pool.getconn() + try: + with conn.cursor() as cur: + cur.execute("SELECT %s", (thread_id,)) + result = cur.fetchone() + print(f"Thread {thread_id} result: {result}") + assert result[0] == thread_id + finally: + pool.putconn(conn) + except Exception as e: + print(f"Thread {thread_id} failed: {e}") + exceptions.append((thread_id, e)) # Store exception with thread ID + + NUM_THREADS = 8 + threads = [] + for i in range(NUM_THREADS): + thread = threading.Thread(target=worker, args=(i + 1,)) + threads.append(thread) + thread.start() + + # Wait for all threads + for thread in threads: + thread.join() + + # Check if any threads had exceptions + if exceptions: + print(f"Errors occurred in {len(exceptions)} threads:") + for thread_id, exc in exceptions: + print(f" Thread {thread_id}: {exc}") + raise RuntimeError(f"One or more worker threads failed: {exceptions}") + + +def main(): + try: + cluster_user = os.environ.get("CLUSTER_USER", None) + assert cluster_user is not None, "CLUSTER_USER environment variable is not set" + + cluster_endpoint = os.environ.get("CLUSTER_ENDPOINT", None) + assert ( + cluster_endpoint is not None + ), "CLUSTER_ENDPOINT environment variable is not set" + + region = os.environ.get("REGION", None) + assert region is not None, "REGION environment variable is not set" + connect_with_pool_concurrent_connections(cluster_user, cluster_endpoint, region) + finally: + pass + + print("Connection pool with concurrent connections exercised successfully") + + +if __name__ == "__main__": + main() diff --git a/python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py b/python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py new file mode 100644 index 00000000..60511dca --- /dev/null +++ b/python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connection_pool.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'no_connection_pool')) + +from example_with_no_connection_pool import main + +import pytest + + +def test_example_with_no_connection_pool(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connector.py b/python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connector.py new file mode 100644 index 00000000..68b1019c --- /dev/null +++ b/python/psycopg2/test/alternatives/no_connection_pool/test_example_with_no_connector.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'no_connection_pool')) + +from example_with_no_connector import main + +import pytest + + +def test_example_with_no_connector(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg2/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py b/python/psycopg2/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py new file mode 100644 index 00000000..5d745c08 --- /dev/null +++ b/python/psycopg2/test/alternatives/pool/test_example_with_nonconcurrent_connection_pool.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src', 'alternatives', 'pool')) + +from example_with_nonconcurrent_connection_pool import main + +import pytest + + +def test_example_with_nonconcurrent_connection_pool(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg2/test/test_example.py b/python/psycopg2/test/test_example.py deleted file mode 100644 index c1ff694d..00000000 --- a/python/psycopg2/test/test_example.py +++ /dev/null @@ -1,11 +0,0 @@ -from example import main - -import pytest - - -# Smoke tests that our example works fine -def test_example(): - try: - main() - except Exception as e: - pytest.fail(f"Unexpected exception: {e}") diff --git a/python/psycopg2/test/test_example_preferred.py b/python/psycopg2/test/test_example_preferred.py new file mode 100644 index 00000000..dd848ea6 --- /dev/null +++ b/python/psycopg2/test/test_example_preferred.py @@ -0,0 +1,16 @@ +import sys +import os + +# Add src directory to path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src')) + +from example_preferred import main + +import pytest + + +def test_example_preferred(): + try: + main() + except Exception as e: + pytest.fail(f"Unexpected exception: {e}")