diff --git a/.github/workflows/flow-lcp-ab-test.yml b/.github/workflows/flow-lcp-ab-test.yml
new file mode 100644
index 0000000..0643581
--- /dev/null
+++ b/.github/workflows/flow-lcp-ab-test.yml
@@ -0,0 +1,132 @@
+name: Flow - LCP A/B Test Scenario
+
+on:
+ schedule:
+ # Run every 6 hours
+ - cron: '0 */6 * * *'
+ workflow_dispatch: # Allow manual trigger
+
+jobs:
+ lcp-ab-test:
+ runs-on: ubuntu-latest
+ environment: events
+
+ steps:
+ - name: Enable LCP slowness for 50% of users
+ run: |
+ echo "Enabling LCP slowness A/B test (50% of users, 3000ms delay)..."
+ curl -X POST "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/lcp-slowness?enabled=true&percentage=50.0&delay_ms=3000" \
+ --fail-with-body || {
+ echo "Failed to enable LCP slowness scenario"
+ exit 1
+ }
+
+ - name: Verify scenario is enabled
+ run: |
+ echo "Verifying LCP slowness scenario configuration..."
+ response=$(curl -s "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/config")
+ echo "Response: $response"
+
+ enabled=$(echo $response | jq -r '.config.lcp_slowness_enabled')
+ percentage=$(echo $response | jq -r '.config.lcp_slowness_percentage')
+ delay=$(echo $response | jq -r '.config.lcp_slowness_delay_ms')
+
+ if [ "$enabled" != "true" ]; then
+ echo "ERROR: LCP slowness not enabled"
+ exit 1
+ fi
+
+ echo "✓ LCP slowness enabled: $percentage% of users with ${delay}ms delay"
+
+ - name: Create deployment marker (A/B test enabled)
+ env:
+ NEW_RELIC_API_KEY: ${{ secrets.NEW_RELIC_API_KEY }}
+ NEW_RELIC_APP_ID: ${{ secrets.NEW_RELIC_BROWSER_APP_ID }}
+ run: |
+ echo "Creating deployment marker in New Relic..."
+ curl -X POST "https://api.newrelic.com/v2/applications/${NEW_RELIC_APP_ID}/deployments.json" \
+ -H "Api-Key: ${NEW_RELIC_API_KEY}" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "deployment": {
+ "revision": "LCP-AB-Test-Enabled-50%",
+ "description": "Feature flag: LCP slowness enabled for 50% of users (3000ms delay)",
+ "user": "GitHub Actions",
+ "changelog": "A/B test started to measure LCP impact"
+ }
+ }' || echo "Warning: Could not create deployment marker"
+
+ - name: Wait for data collection (20 minutes)
+ run: |
+ echo "Collecting data for 20 minutes..."
+ echo "Monitor LCP metrics at: https://one.newrelic.com/"
+ echo ""
+ echo "NRQL Query to see A/B test impact:"
+ echo "SELECT percentile(largestContentfulPaint, 50, 75, 95) as 'LCP (ms)', count(*) as 'Sessions'"
+ echo "FROM PageViewTiming"
+ echo "WHERE appName = 'ReliBank Frontend' AND pageUrl LIKE '%/dashboard%'"
+ echo "FACET custom.lcp_treatment TIMESERIES"
+ echo ""
+ sleep 1200 # 20 minutes
+
+ - name: Disable LCP slowness
+ run: |
+ echo "Disabling LCP slowness A/B test..."
+ curl -X POST "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/lcp-slowness?enabled=false" \
+ --fail-with-body || {
+ echo "Warning: Failed to disable LCP slowness scenario"
+ }
+
+ - name: Verify scenario is disabled
+ run: |
+ echo "Verifying LCP slowness scenario is disabled..."
+ response=$(curl -s "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/config")
+ enabled=$(echo $response | jq -r '.config.lcp_slowness_enabled')
+
+ if [ "$enabled" == "true" ]; then
+ echo "WARNING: LCP slowness still enabled!"
+ exit 1
+ fi
+
+ echo "✓ LCP slowness disabled successfully"
+
+ - name: Create rollback marker (A/B test disabled)
+ env:
+ NEW_RELIC_API_KEY: ${{ secrets.NEW_RELIC_API_KEY }}
+ NEW_RELIC_APP_ID: ${{ secrets.NEW_RELIC_BROWSER_APP_ID }}
+ run: |
+ echo "Creating rollback marker in New Relic..."
+ curl -X POST "https://api.newrelic.com/v2/applications/${NEW_RELIC_APP_ID}/deployments.json" \
+ -H "Api-Key: ${NEW_RELIC_API_KEY}" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "deployment": {
+ "revision": "LCP-AB-Test-Disabled",
+ "description": "Rollback: LCP slowness disabled, all users back to normal",
+ "user": "GitHub Actions",
+ "changelog": "A/B test completed"
+ }
+ }' || echo "Warning: Could not create rollback marker"
+
+ - name: Test Summary
+ if: always()
+ run: |
+ echo "## LCP A/B Test Summary" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "- **Duration**: 20 minutes" >> $GITHUB_STEP_SUMMARY
+ echo "- **Cohort Split**: 50% slow / 50% normal" >> $GITHUB_STEP_SUMMARY
+ echo "- **LCP Delay**: 3000ms for slow cohort" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### Analysis" >> $GITHUB_STEP_SUMMARY
+ echo "Review LCP metrics in New Relic to compare:" >> $GITHUB_STEP_SUMMARY
+ echo "- \`lcp_treatment = normal\` (control group)" >> $GITHUB_STEP_SUMMARY
+ echo "- \`lcp_treatment = slow\` (test group with 3s delay)" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "### NRQL Query" >> $GITHUB_STEP_SUMMARY
+ echo '```sql' >> $GITHUB_STEP_SUMMARY
+ echo 'SELECT percentile(largestContentfulPaint, 50, 75, 95) as "LCP (ms)", count(*) as "Sessions"' >> $GITHUB_STEP_SUMMARY
+ echo 'FROM PageViewTiming' >> $GITHUB_STEP_SUMMARY
+ echo "WHERE appName = 'ReliBank Frontend' AND pageUrl LIKE '%/dashboard%'" >> $GITHUB_STEP_SUMMARY
+ echo 'FACET custom.lcp_treatment' >> $GITHUB_STEP_SUMMARY
+ echo 'SINCE 30 minutes ago' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml
index d1c6c70..5a5feee 100644
--- a/.github/workflows/test-suite.yml
+++ b/.github/workflows/test-suite.yml
@@ -59,9 +59,9 @@ jobs:
run: |
cd tests
# Run scenario tests sequentially to avoid race conditions
- pytest test_scenario_service.py test_payment_scenarios.py --tb=line --timeout=300
+ pytest test_scenario_service.py test_payment_scenarios.py test_ab_testing_scenarios.py --tb=line --timeout=300
# Run other tests in parallel
- pytest . --ignore=test_scenario_service.py --ignore=test_payment_scenarios.py -n auto --tb=line --timeout=300
+ pytest . --ignore=test_scenario_service.py --ignore=test_payment_scenarios.py --ignore=test_ab_testing_scenarios.py -n auto --tb=line --timeout=300
- name: Run end-to-end tests only
if: ${{ github.event.inputs.test_suite == 'e2e' }}
@@ -135,6 +135,7 @@ jobs:
test-summary:
runs-on: ubuntu-latest
+ environment: events
needs: [python-tests, frontend-tests]
if: always()
diff --git a/accounts_service/accounts_service.py b/accounts_service/accounts_service.py
index f259e4f..6c90c11 100644
--- a/accounts_service/accounts_service.py
+++ b/accounts_service/accounts_service.py
@@ -3,6 +3,7 @@
import sys
import json
import logging
+import hashlib
import psycopg2
from psycopg2 import extras, pool
from pydantic import BaseModel, Field
@@ -42,6 +43,29 @@ def get_propagation_headers(request: Request) -> dict:
return headers_to_propagate
+async def get_ab_test_config():
+ """Fetch A/B test configuration from scenario service"""
+ try:
+ async with httpx.AsyncClient() as client:
+ response = await client.get(
+ f"{SCENARIO_SERVICE_URL}/scenario-runner/api/ab-testing/config",
+ timeout=2.0
+ )
+ if response.status_code == 200:
+ data = response.json()
+ return data.get("config", {})
+ except Exception as e:
+ logging.debug(f"Could not fetch A/B test config: {e}")
+
+ # Return defaults if scenario service unavailable
+ return {
+ "lcp_slowness_percentage_enabled": False,
+ "lcp_slowness_percentage": 0.0,
+ "lcp_slowness_percentage_delay_ms": 0,
+ "lcp_slowness_cohort_enabled": False,
+ "lcp_slowness_cohort_delay_ms": 0
+ }
+
# Database connection details from environment variables
DB_HOST = os.getenv("DB_HOST", "accounts-db")
DB_NAME = os.getenv("DB_NAME", "accountsdb")
@@ -54,6 +78,25 @@ def get_propagation_headers(request: Request) -> dict:
# if not, default to local development variables
TRANSACTION_SERVICE_URL = f"http://{os.getenv("TRANSACTION_SERVICE_SERVICE_HOST", "transaction-service")}:{os.getenv("TRANSACTION_SERVICE_SERVICE_PORT", "5001")}"
+# Scenario service API URL
+SCENARIO_SERVICE_URL = f"http://{os.getenv("SCENARIO_RUNNER_SERVICE_SERVICE_HOST", "scenario-runner-service")}:{os.getenv("SCENARIO_RUNNER_SERVICE_SERVICE_PORT", "8000")}"
+
+# Hardcoded list of 11 test users for LCP slowness A/B testing
+# These users will experience LCP delays when the scenario is enabled
+LCP_SLOW_USERS = {
+ 'b2a5c9f1-3d7f-4b0d-9a8c-9c7b5a1f2e4d', # Alice Johnson
+ 'f5e8d1c6-2a9b-4c3e-8f1a-6e5b0d2c9f1a', # Bob Williams
+ 'e1f2b3c4-5d6a-7e8f-9a0b-1c2d3e4f5a6b', # Charlie Brown
+ 'f47ac10b-58cc-4372-a567-0e02b2c3d471', # Solaire Astora
+ 'd9b1e2a3-f4c5-4d6e-8f7a-9b0c1d2e3f4a', # Malenia Miquella
+ '8c7d6e5f-4a3b-2c1d-0e9f-8a7b6c5d4e3f', # Artorias Abyss
+ '7f6e5d4c-3b2a-1c0d-9e8f-7a6b5c4d3e2f', # Priscilla Painted
+ '6e5d4c3b-2a1c-0d9e-8f7a-6b5c4d3e2f1a', # Gwyn Cinder
+ '5d4c3b2a-1c0d-9e8f-7a6b-5c4d3e2f1a0b', # Siegmeyer Catarina
+ '4c3b2a1c-0d9e-8f7a-6b5c-4d3e2f1a0b9c', # Ornstein Dragon
+ '3b2a1c0d-9e8f-7a6b-5c4d-3e2f1a0b9c8d', # Smough Executioner
+}
+
# Global connection pool
connection_pool = None
@@ -463,21 +506,46 @@ async def get_browser_user(request: Request):
with conn.cursor(cursor_factory=extras.RealDictCursor) as cursor:
if browser_user_id:
- # Validate UUID format before querying database
+ # Validate UUID format
try:
import uuid
uuid.UUID(browser_user_id) # Validate UUID format
- # Query database for this user ID
- cursor.execute("SELECT id FROM user_account WHERE id = %s", (browser_user_id,))
- user = cursor.fetchone()
- if user:
- logging.info(f"[Browser User] Using header-provided user ID: {browser_user_id}")
- return {"user_id": browser_user_id, "source": "header"}
+ # Accept the header user ID for A/B testing even if not in database
+ # This allows deterministic cohort assignment for testing and analytics
+ logging.info(f"[Browser User] Using header-provided user ID: {browser_user_id}")
+
+ # Fetch A/B test config and assign LCP slowness cohort
+ ab_config = await get_ab_test_config()
+ lcp_delay_ms = 0
+
+ # Check percentage-based scenario first
+ if ab_config.get("lcp_slowness_percentage_enabled"):
+ percentage = ab_config.get("lcp_slowness_percentage", 0.0)
+ # Deterministically assign cohort based on user_id hash
+ user_hash = int(hashlib.md5(browser_user_id.encode()).hexdigest(), 16)
+ if (user_hash % 100) < percentage:
+ lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
+ logging.info(f"[Browser User] User {browser_user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")
+
+ # Check cohort-based scenario (can override percentage if both enabled)
+ elif ab_config.get("lcp_slowness_cohort_enabled"):
+ # Check if user is in the hardcoded slow cohort (11 test users)
+ if browser_user_id in LCP_SLOW_USERS:
+ lcp_delay_ms = ab_config.get("lcp_slowness_cohort_delay_ms", 0)
+ logging.info(f"[Browser User] User {browser_user_id} assigned to SLOW LCP cohort via COHORT ({lcp_delay_ms}ms delay)")
+ else:
+ logging.info(f"[Browser User] User {browser_user_id} assigned to NORMAL LCP cohort")
else:
- logging.warning(f"[Browser User] Header-provided ID {browser_user_id} not found in database, falling back to random")
+ logging.info(f"[Browser User] User {browser_user_id} assigned to NORMAL LCP cohort (no scenarios enabled)")
+
+ return {
+ "user_id": browser_user_id,
+ "source": "header",
+ "lcp_delay_ms": lcp_delay_ms
+ }
except (ValueError, Exception) as e:
- logging.warning(f"[Browser User] Invalid UUID format or database error: {e}, falling back to random")
+ logging.warning(f"[Browser User] Invalid UUID format: {e}, falling back to random")
# Fall back to random selection
cursor.execute("SELECT id FROM user_account ORDER BY RANDOM() LIMIT 1")
@@ -491,7 +559,36 @@ async def get_browser_user(request: Request):
user_id = random_user["id"]
logging.info(f"[Browser User] Randomly selected user ID: {user_id}")
- return {"user_id": user_id, "source": "random"}
+
+ # Fetch A/B test config and assign LCP slowness cohort
+ ab_config = await get_ab_test_config()
+ lcp_delay_ms = 0
+
+ # Check percentage-based scenario first
+ if ab_config.get("lcp_slowness_percentage_enabled"):
+ percentage = ab_config.get("lcp_slowness_percentage", 0.0)
+ # Deterministically assign cohort based on user_id hash
+ user_hash = int(hashlib.md5(user_id.encode()).hexdigest(), 16)
+ if (user_hash % 100) < percentage:
+ lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
+ logging.info(f"[Browser User] User {user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")
+
+ # Check cohort-based scenario (can override percentage if both enabled)
+ elif ab_config.get("lcp_slowness_cohort_enabled"):
+ # Check if user is in the hardcoded slow cohort (11 test users)
+ if user_id in LCP_SLOW_USERS:
+ lcp_delay_ms = ab_config.get("lcp_slowness_cohort_delay_ms", 0)
+ logging.info(f"[Browser User] User {user_id} assigned to SLOW LCP cohort via COHORT ({lcp_delay_ms}ms delay)")
+ else:
+ logging.info(f"[Browser User] User {user_id} assigned to NORMAL LCP cohort")
+ else:
+ logging.info(f"[Browser User] User {user_id} assigned to NORMAL LCP cohort (no scenarios enabled)")
+
+ return {
+ "user_id": user_id,
+ "source": "random",
+ "lcp_delay_ms": lcp_delay_ms
+ }
except HTTPException:
raise
diff --git a/frontend_service/app/root.tsx b/frontend_service/app/root.tsx
index ce05906..5db66d6 100644
--- a/frontend_service/app/root.tsx
+++ b/frontend_service/app/root.tsx
@@ -119,13 +119,16 @@ export function Layout({ children }: { children: React.ReactNode }) {
if (typeof window !== 'undefined' && isHydrated) {
// Check sessionStorage first
const storedUserId = sessionStorage.getItem('browserUserId');
- if (storedUserId) {
+ const storedLcpDelay = sessionStorage.getItem('lcpDelayMs');
+
+ if (storedUserId && storedLcpDelay !== null) {
console.log('[Browser User] Loaded from sessionStorage:', storedUserId);
+ console.log('[Browser User] LCP Delay from sessionStorage:', storedLcpDelay + 'ms');
setBrowserUserId(storedUserId);
return;
}
- // Fetch from API
+ // Fetch from API (either no user ID or no LCP delay stored)
const fetchBrowserUserId = async () => {
try {
const response = await fetch('/accounts-service/browser-user');
@@ -134,6 +137,17 @@ export function Layout({ children }: { children: React.ReactNode }) {
console.log(`[Browser User] Received: ${data.user_id} Source: ${data.source}`);
setBrowserUserId(data.user_id);
sessionStorage.setItem('browserUserId', data.user_id);
+
+ // Store LCP delay for A/B testing
+ const lcpDelay = data.lcp_delay_ms || 0;
+ sessionStorage.setItem('lcpDelayMs', lcpDelay.toString());
+ console.log(`[Browser User] LCP Delay: ${lcpDelay}ms`);
+
+ // Set New Relic custom attribute for A/B test cohort tracking
+ if (window.newrelic && typeof window.newrelic.setCustomAttribute === 'function') {
+ window.newrelic.setCustomAttribute('lcp_delay_ms', lcpDelay);
+ window.newrelic.setCustomAttribute('lcp_treatment', lcpDelay > 0 ? 'slow' : 'normal');
+ }
} else {
console.error('[Browser User] Failed to fetch user ID:', response.status);
}
diff --git a/frontend_service/app/routes/dashboard.tsx b/frontend_service/app/routes/dashboard.tsx
index 71c0ac4..5cfc1ec 100644
--- a/frontend_service/app/routes/dashboard.tsx
+++ b/frontend_service/app/routes/dashboard.tsx
@@ -224,6 +224,24 @@ const DashboardPage = () => {
const [transactions, setTransactions] = useState(mockTransactions);
// NEW: Loading state for the additional, client-side fetch
const [isLoadingDetails, setIsLoadingDetails] = useState(false);
+ // A/B Test: Loading state for LCP delay (delay content, not entire page)
+ const [isLcpContentReady, setIsLcpContentReady] = useState(false);
+
+ // A/B Test: Apply LCP delay if user is in slow cohort
+ useEffect(() => {
+ const applyLcpDelay = async () => {
+ const lcpDelay = parseInt(sessionStorage.getItem('lcpDelayMs') || '0');
+
+ if (lcpDelay > 0) {
+ console.log(`[A/B Test] Applying ${lcpDelay}ms LCP delay for slow cohort`);
+ await new Promise(resolve => setTimeout(resolve, lcpDelay));
+ }
+
+ setIsLcpContentReady(true);
+ };
+
+ applyLcpDelay();
+ }, []);
// 2. Secondary Fetch: Get additional account details after initial data is set
useEffect(() => {
@@ -276,7 +294,7 @@ const DashboardPage = () => {
stackedBarData: mockStackedBarData,
};
- if (!userData) {
+ if (!userData) {
return
Trigger chaos and load testing scenarios.
-Toggle payment failure scenarios for card transactions.
-