Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 132 additions & 0 deletions .github/workflows/flow-lcp-ab-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
name: Flow - LCP A/B Test Scenario

on:
schedule:
# Run every 6 hours
- cron: '0 */6 * * *'
workflow_dispatch: # Allow manual trigger

jobs:
lcp-ab-test:
runs-on: ubuntu-latest
environment: events

steps:
- name: Enable LCP slowness for 50% of users
run: |
echo "Enabling LCP slowness A/B test (50% of users, 3000ms delay)..."
curl -X POST "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/lcp-slowness?enabled=true&percentage=50.0&delay_ms=3000" \
--fail-with-body || {
echo "Failed to enable LCP slowness scenario"
exit 1
}

- name: Verify scenario is enabled
run: |
echo "Verifying LCP slowness scenario configuration..."
response=$(curl -s "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/config")
echo "Response: $response"

enabled=$(echo $response | jq -r '.config.lcp_slowness_enabled')
percentage=$(echo $response | jq -r '.config.lcp_slowness_percentage')
delay=$(echo $response | jq -r '.config.lcp_slowness_delay_ms')

if [ "$enabled" != "true" ]; then
echo "ERROR: LCP slowness not enabled"
exit 1
fi

echo "✓ LCP slowness enabled: $percentage% of users with ${delay}ms delay"

- name: Create deployment marker (A/B test enabled)
env:
NEW_RELIC_API_KEY: ${{ secrets.NEW_RELIC_API_KEY }}
NEW_RELIC_APP_ID: ${{ secrets.NEW_RELIC_BROWSER_APP_ID }}
run: |
echo "Creating deployment marker in New Relic..."
curl -X POST "https://api.newrelic.com/v2/applications/${NEW_RELIC_APP_ID}/deployments.json" \
-H "Api-Key: ${NEW_RELIC_API_KEY}" \
-H "Content-Type: application/json" \
-d '{
"deployment": {
"revision": "LCP-AB-Test-Enabled-50%",
"description": "Feature flag: LCP slowness enabled for 50% of users (3000ms delay)",
"user": "GitHub Actions",
"changelog": "A/B test started to measure LCP impact"
}
}' || echo "Warning: Could not create deployment marker"

- name: Wait for data collection (20 minutes)
run: |
echo "Collecting data for 20 minutes..."
echo "Monitor LCP metrics at: https://one.newrelic.com/"
echo ""
echo "NRQL Query to see A/B test impact:"
echo "SELECT percentile(largestContentfulPaint, 50, 75, 95) as 'LCP (ms)', count(*) as 'Sessions'"
echo "FROM PageViewTiming"
echo "WHERE appName = 'ReliBank Frontend' AND pageUrl LIKE '%/dashboard%'"
echo "FACET custom.lcp_treatment TIMESERIES"
echo ""
sleep 1200 # 20 minutes

- name: Disable LCP slowness
run: |
echo "Disabling LCP slowness A/B test..."
curl -X POST "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/lcp-slowness?enabled=false" \
--fail-with-body || {
echo "Warning: Failed to disable LCP slowness scenario"
}

- name: Verify scenario is disabled
run: |
echo "Verifying LCP slowness scenario is disabled..."
response=$(curl -s "${{ vars.BASE_URL }}/scenario-runner/api/ab-testing/config")
enabled=$(echo $response | jq -r '.config.lcp_slowness_enabled')

if [ "$enabled" == "true" ]; then
echo "WARNING: LCP slowness still enabled!"
exit 1
fi

echo "✓ LCP slowness disabled successfully"

- name: Create rollback marker (A/B test disabled)
env:
NEW_RELIC_API_KEY: ${{ secrets.NEW_RELIC_API_KEY }}
NEW_RELIC_APP_ID: ${{ secrets.NEW_RELIC_BROWSER_APP_ID }}
run: |
echo "Creating rollback marker in New Relic..."
curl -X POST "https://api.newrelic.com/v2/applications/${NEW_RELIC_APP_ID}/deployments.json" \
-H "Api-Key: ${NEW_RELIC_API_KEY}" \
-H "Content-Type: application/json" \
-d '{
"deployment": {
"revision": "LCP-AB-Test-Disabled",
"description": "Rollback: LCP slowness disabled, all users back to normal",
"user": "GitHub Actions",
"changelog": "A/B test completed"
}
}' || echo "Warning: Could not create rollback marker"

- name: Test Summary
if: always()
run: |
echo "## LCP A/B Test Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "- **Duration**: 20 minutes" >> $GITHUB_STEP_SUMMARY
echo "- **Cohort Split**: 50% slow / 50% normal" >> $GITHUB_STEP_SUMMARY
echo "- **LCP Delay**: 3000ms for slow cohort" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Analysis" >> $GITHUB_STEP_SUMMARY
echo "Review LCP metrics in New Relic to compare:" >> $GITHUB_STEP_SUMMARY
echo "- \`lcp_treatment = normal\` (control group)" >> $GITHUB_STEP_SUMMARY
echo "- \`lcp_treatment = slow\` (test group with 3s delay)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "### NRQL Query" >> $GITHUB_STEP_SUMMARY
echo '```sql' >> $GITHUB_STEP_SUMMARY
echo 'SELECT percentile(largestContentfulPaint, 50, 75, 95) as "LCP (ms)", count(*) as "Sessions"' >> $GITHUB_STEP_SUMMARY
echo 'FROM PageViewTiming' >> $GITHUB_STEP_SUMMARY
echo "WHERE appName = 'ReliBank Frontend' AND pageUrl LIKE '%/dashboard%'" >> $GITHUB_STEP_SUMMARY
echo 'FACET custom.lcp_treatment' >> $GITHUB_STEP_SUMMARY
echo 'SINCE 30 minutes ago' >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
Comment on lines +11 to +132

Check warning

Code scanning / CodeQL

Workflow does not contain permissions Medium

Actions job or workflow does not limit the permissions of the GITHUB_TOKEN. Consider setting an explicit permissions block, using the following as a minimal starting point: {}

Copilot Autofix

AI about 16 hours ago

In general, to fix this class of issue, you add a permissions block either at the root of the workflow (to apply to all jobs) or inside specific jobs, and set the minimal scopes necessary. For a workflow that only calls external APIs and writes to $GITHUB_STEP_SUMMARY, you typically only need contents: read (the minimal default for most workflows) and no write permissions.

For this specific workflow in .github/workflows/flow-lcp-ab-test.yml, the simplest and least invasive fix is to add a top-level permissions block right under the name: (and before on:). The job does not perform any GitHub write actions (no actions/checkout, no pushes, no issue/PR operations), so we can safely lock the token down to contents: read. No additional imports or steps are required; this change only affects token scopes. Concretely, in the region around lines 1–8, add:

permissions:
  contents: read

and keep the rest of the workflow unchanged.

Suggested changeset 1
.github/workflows/flow-lcp-ab-test.yml

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/.github/workflows/flow-lcp-ab-test.yml b/.github/workflows/flow-lcp-ab-test.yml
--- a/.github/workflows/flow-lcp-ab-test.yml
+++ b/.github/workflows/flow-lcp-ab-test.yml
@@ -1,5 +1,8 @@
 name: Flow - LCP A/B Test Scenario
 
+permissions:
+  contents: read
+
 on:
   schedule:
     # Run every 6 hours
EOF
@@ -1,5 +1,8 @@
name: Flow - LCP A/B Test Scenario

permissions:
contents: read

on:
schedule:
# Run every 6 hours
Copilot is powered by AI and may make mistakes. Always verify output.
5 changes: 3 additions & 2 deletions .github/workflows/test-suite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ jobs:
run: |
cd tests
# Run scenario tests sequentially to avoid race conditions
pytest test_scenario_service.py test_payment_scenarios.py --tb=line --timeout=300
pytest test_scenario_service.py test_payment_scenarios.py test_ab_testing_scenarios.py --tb=line --timeout=300
# Run other tests in parallel
pytest . --ignore=test_scenario_service.py --ignore=test_payment_scenarios.py -n auto --tb=line --timeout=300
pytest . --ignore=test_scenario_service.py --ignore=test_payment_scenarios.py --ignore=test_ab_testing_scenarios.py -n auto --tb=line --timeout=300

- name: Run end-to-end tests only
if: ${{ github.event.inputs.test_suite == 'e2e' }}
Expand Down Expand Up @@ -135,6 +135,7 @@ jobs:

test-summary:
runs-on: ubuntu-latest
environment: events
needs: [python-tests, frontend-tests]
if: always()

Expand Down
117 changes: 107 additions & 10 deletions accounts_service/accounts_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import sys
import json
import logging
import hashlib
import psycopg2
from psycopg2 import extras, pool
from pydantic import BaseModel, Field
Expand Down Expand Up @@ -42,6 +43,29 @@

return headers_to_propagate

async def get_ab_test_config():
"""Fetch A/B test configuration from scenario service"""
try:
async with httpx.AsyncClient() as client:
response = await client.get(
f"{SCENARIO_SERVICE_URL}/scenario-runner/api/ab-testing/config",
timeout=2.0
)
if response.status_code == 200:
data = response.json()
return data.get("config", {})
except Exception as e:
logging.debug(f"Could not fetch A/B test config: {e}")

# Return defaults if scenario service unavailable
return {
"lcp_slowness_percentage_enabled": False,
"lcp_slowness_percentage": 0.0,
"lcp_slowness_percentage_delay_ms": 0,
"lcp_slowness_cohort_enabled": False,
"lcp_slowness_cohort_delay_ms": 0
}

# Database connection details from environment variables
DB_HOST = os.getenv("DB_HOST", "accounts-db")
DB_NAME = os.getenv("DB_NAME", "accountsdb")
Expand All @@ -54,6 +78,25 @@
# if not, default to local development variables
TRANSACTION_SERVICE_URL = f"http://{os.getenv("TRANSACTION_SERVICE_SERVICE_HOST", "transaction-service")}:{os.getenv("TRANSACTION_SERVICE_SERVICE_PORT", "5001")}"

# Scenario service API URL
SCENARIO_SERVICE_URL = f"http://{os.getenv("SCENARIO_RUNNER_SERVICE_SERVICE_HOST", "scenario-runner-service")}:{os.getenv("SCENARIO_RUNNER_SERVICE_SERVICE_PORT", "8000")}"

# Hardcoded list of 11 test users for LCP slowness A/B testing
# These users will experience LCP delays when the scenario is enabled
LCP_SLOW_USERS = {
'b2a5c9f1-3d7f-4b0d-9a8c-9c7b5a1f2e4d', # Alice Johnson
'f5e8d1c6-2a9b-4c3e-8f1a-6e5b0d2c9f1a', # Bob Williams
'e1f2b3c4-5d6a-7e8f-9a0b-1c2d3e4f5a6b', # Charlie Brown
'f47ac10b-58cc-4372-a567-0e02b2c3d471', # Solaire Astora
'd9b1e2a3-f4c5-4d6e-8f7a-9b0c1d2e3f4a', # Malenia Miquella
'8c7d6e5f-4a3b-2c1d-0e9f-8a7b6c5d4e3f', # Artorias Abyss
'7f6e5d4c-3b2a-1c0d-9e8f-7a6b5c4d3e2f', # Priscilla Painted
'6e5d4c3b-2a1c-0d9e-8f7a-6b5c4d3e2f1a', # Gwyn Cinder
'5d4c3b2a-1c0d-9e8f-7a6b-5c4d3e2f1a0b', # Siegmeyer Catarina
'4c3b2a1c-0d9e-8f7a-6b5c-4d3e2f1a0b9c', # Ornstein Dragon
'3b2a1c0d-9e8f-7a6b-5c4d-3e2f1a0b9c8d', # Smough Executioner
}

# Global connection pool
connection_pool = None

Expand Down Expand Up @@ -463,21 +506,46 @@

with conn.cursor(cursor_factory=extras.RealDictCursor) as cursor:
if browser_user_id:
# Validate UUID format before querying database
# Validate UUID format
try:
import uuid
uuid.UUID(browser_user_id) # Validate UUID format

# Query database for this user ID
cursor.execute("SELECT id FROM user_account WHERE id = %s", (browser_user_id,))
user = cursor.fetchone()
if user:
logging.info(f"[Browser User] Using header-provided user ID: {browser_user_id}")
return {"user_id": browser_user_id, "source": "header"}
# Accept the header user ID for A/B testing even if not in database
# This allows deterministic cohort assignment for testing and analytics
logging.info(f"[Browser User] Using header-provided user ID: {browser_user_id}")

# Fetch A/B test config and assign LCP slowness cohort
ab_config = await get_ab_test_config()
lcp_delay_ms = 0

# Check percentage-based scenario first
if ab_config.get("lcp_slowness_percentage_enabled"):
percentage = ab_config.get("lcp_slowness_percentage", 0.0)
# Deterministically assign cohort based on user_id hash
user_hash = int(hashlib.md5(browser_user_id.encode()).hexdigest(), 16)

Check failure

Code scanning / CodeQL

Use of a broken or weak cryptographic hashing algorithm on sensitive data High

Sensitive data (id)
is used in a hashing algorithm (MD5) that is insecure.

Copilot Autofix

AI about 16 hours ago

In general, the fix is to avoid MD5 (and other weak hashes like SHA‑1) for hashing identifiers or other sensitive data. For this specific use—deterministic bucketing of a user ID—you can replace MD5 with a stronger, standard hash from the SHA‑2 family such as SHA‑256. The numeric mapping logic (int(hash, 16) % 100) can remain the same, so functionality (deterministic but pseudo-random bucket assignment) is preserved.

Concretely, in accounts_service/accounts_service.py around line 526, change:

user_hash = int(hashlib.md5(browser_user_id.encode()).hexdigest(), 16)

to:

user_hash = int(hashlib.sha256(browser_user_id.encode()).hexdigest(), 16)

No additional imports are required since hashlib is already imported at the top of the file. This is the only necessary code change; all other logic (reading the header, using modulo 100, logging, and returning lcp_delay_ms) should remain as-is to avoid altering existing behavior apart from the internal distribution of users across buckets, which is acceptable for an A/B test.

Suggested changeset 1
accounts_service/accounts_service.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/accounts_service/accounts_service.py b/accounts_service/accounts_service.py
--- a/accounts_service/accounts_service.py
+++ b/accounts_service/accounts_service.py
@@ -522,8 +522,8 @@
                     # Check percentage-based scenario first
                     if ab_config.get("lcp_slowness_percentage_enabled"):
                         percentage = ab_config.get("lcp_slowness_percentage", 0.0)
-                        # Deterministically assign cohort based on user_id hash
-                        user_hash = int(hashlib.md5(browser_user_id.encode()).hexdigest(), 16)
+                        # Deterministically assign cohort based on user_id hash using a strong hash function
+                        user_hash = int(hashlib.sha256(browser_user_id.encode()).hexdigest(), 16)
                         if (user_hash % 100) < percentage:
                             lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
                             logging.info(f"[Browser User] User {browser_user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")
EOF
@@ -522,8 +522,8 @@
# Check percentage-based scenario first
if ab_config.get("lcp_slowness_percentage_enabled"):
percentage = ab_config.get("lcp_slowness_percentage", 0.0)
# Deterministically assign cohort based on user_id hash
user_hash = int(hashlib.md5(browser_user_id.encode()).hexdigest(), 16)
# Deterministically assign cohort based on user_id hash using a strong hash function
user_hash = int(hashlib.sha256(browser_user_id.encode()).hexdigest(), 16)
if (user_hash % 100) < percentage:
lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
logging.info(f"[Browser User] User {browser_user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")
Copilot is powered by AI and may make mistakes. Always verify output.
if (user_hash % 100) < percentage:
lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
logging.info(f"[Browser User] User {browser_user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")

# Check cohort-based scenario (can override percentage if both enabled)
elif ab_config.get("lcp_slowness_cohort_enabled"):
# Check if user is in the hardcoded slow cohort (11 test users)
if browser_user_id in LCP_SLOW_USERS:
lcp_delay_ms = ab_config.get("lcp_slowness_cohort_delay_ms", 0)
logging.info(f"[Browser User] User {browser_user_id} assigned to SLOW LCP cohort via COHORT ({lcp_delay_ms}ms delay)")
else:
logging.info(f"[Browser User] User {browser_user_id} assigned to NORMAL LCP cohort")
else:
logging.warning(f"[Browser User] Header-provided ID {browser_user_id} not found in database, falling back to random")
logging.info(f"[Browser User] User {browser_user_id} assigned to NORMAL LCP cohort (no scenarios enabled)")

return {
"user_id": browser_user_id,
"source": "header",
"lcp_delay_ms": lcp_delay_ms
}
except (ValueError, Exception) as e:
logging.warning(f"[Browser User] Invalid UUID format or database error: {e}, falling back to random")
logging.warning(f"[Browser User] Invalid UUID format: {e}, falling back to random")

# Fall back to random selection
cursor.execute("SELECT id FROM user_account ORDER BY RANDOM() LIMIT 1")
Expand All @@ -491,7 +559,36 @@

user_id = random_user["id"]
logging.info(f"[Browser User] Randomly selected user ID: {user_id}")
return {"user_id": user_id, "source": "random"}

# Fetch A/B test config and assign LCP slowness cohort
ab_config = await get_ab_test_config()
lcp_delay_ms = 0

# Check percentage-based scenario first
if ab_config.get("lcp_slowness_percentage_enabled"):
percentage = ab_config.get("lcp_slowness_percentage", 0.0)
# Deterministically assign cohort based on user_id hash
user_hash = int(hashlib.md5(user_id.encode()).hexdigest(), 16)

Check failure

Code scanning / CodeQL

Use of a broken or weak cryptographic hashing algorithm on sensitive data High

Sensitive data (id)
is used in a hashing algorithm (MD5) that is insecure.

Copilot Autofix

AI about 16 hours ago

General fix: Replace the use of the broken MD5 hash with a strong, modern hash function. Since this hashing is not for passwords but for deterministic bucketing, using hashlib.sha256 (or any SHA-2/SHA-3 function) is appropriate and requires minimal code change.

Best way in this code: In accounts_service/accounts_service.py, at the line where user_hash is computed, change hashlib.md5(user_id.encode()).hexdigest() to hashlib.sha256(user_id.encode()).hexdigest(). This preserves the same overall “hash-to-int-then-mod-100” pattern and keeps functionality (deterministic mapping of each user_id to a 0–99 bucket) unchanged in spirit, while avoiding the insecure MD5 algorithm. No new imports are needed because hashlib is already imported at the top of the file.

Concrete change:

  • File: accounts_service/accounts_service.py
  • Around line 571, replace:
    • user_hash = int(hashlib.md5(user_id.encode()).hexdigest(), 16)
  • With:
    • user_hash = int(hashlib.sha256(user_id.encode()).hexdigest(), 16)

No additional methods or definitions are required; the standard library already provides hashlib.sha256.


Suggested changeset 1
accounts_service/accounts_service.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/accounts_service/accounts_service.py b/accounts_service/accounts_service.py
--- a/accounts_service/accounts_service.py
+++ b/accounts_service/accounts_service.py
@@ -567,8 +567,8 @@
             # Check percentage-based scenario first
             if ab_config.get("lcp_slowness_percentage_enabled"):
                 percentage = ab_config.get("lcp_slowness_percentage", 0.0)
-                # Deterministically assign cohort based on user_id hash
-                user_hash = int(hashlib.md5(user_id.encode()).hexdigest(), 16)
+                # Deterministically assign cohort based on user_id hash using a strong hash function
+                user_hash = int(hashlib.sha256(user_id.encode()).hexdigest(), 16)
                 if (user_hash % 100) < percentage:
                     lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
                     logging.info(f"[Browser User] User {user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")
EOF
@@ -567,8 +567,8 @@
# Check percentage-based scenario first
if ab_config.get("lcp_slowness_percentage_enabled"):
percentage = ab_config.get("lcp_slowness_percentage", 0.0)
# Deterministically assign cohort based on user_id hash
user_hash = int(hashlib.md5(user_id.encode()).hexdigest(), 16)
# Deterministically assign cohort based on user_id hash using a strong hash function
user_hash = int(hashlib.sha256(user_id.encode()).hexdigest(), 16)
if (user_hash % 100) < percentage:
lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
logging.info(f"[Browser User] User {user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")
Copilot is powered by AI and may make mistakes. Always verify output.
if (user_hash % 100) < percentage:
lcp_delay_ms = ab_config.get("lcp_slowness_percentage_delay_ms", 0)
logging.info(f"[Browser User] User {user_id} assigned to SLOW LCP cohort via PERCENTAGE ({lcp_delay_ms}ms delay)")

# Check cohort-based scenario (can override percentage if both enabled)
elif ab_config.get("lcp_slowness_cohort_enabled"):
# Check if user is in the hardcoded slow cohort (11 test users)
if user_id in LCP_SLOW_USERS:
lcp_delay_ms = ab_config.get("lcp_slowness_cohort_delay_ms", 0)
logging.info(f"[Browser User] User {user_id} assigned to SLOW LCP cohort via COHORT ({lcp_delay_ms}ms delay)")
else:
logging.info(f"[Browser User] User {user_id} assigned to NORMAL LCP cohort")
else:
logging.info(f"[Browser User] User {user_id} assigned to NORMAL LCP cohort (no scenarios enabled)")

return {
"user_id": user_id,
"source": "random",
"lcp_delay_ms": lcp_delay_ms
}

except HTTPException:
raise
Expand Down
18 changes: 16 additions & 2 deletions frontend_service/app/root.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,16 @@ export function Layout({ children }: { children: React.ReactNode }) {
if (typeof window !== 'undefined' && isHydrated) {
// Check sessionStorage first
const storedUserId = sessionStorage.getItem('browserUserId');
if (storedUserId) {
const storedLcpDelay = sessionStorage.getItem('lcpDelayMs');

if (storedUserId && storedLcpDelay !== null) {
console.log('[Browser User] Loaded from sessionStorage:', storedUserId);
console.log('[Browser User] LCP Delay from sessionStorage:', storedLcpDelay + 'ms');
setBrowserUserId(storedUserId);
return;
}

// Fetch from API
// Fetch from API (either no user ID or no LCP delay stored)
const fetchBrowserUserId = async () => {
try {
const response = await fetch('/accounts-service/browser-user');
Expand All @@ -134,6 +137,17 @@ export function Layout({ children }: { children: React.ReactNode }) {
console.log(`[Browser User] Received: ${data.user_id} Source: ${data.source}`);
setBrowserUserId(data.user_id);
sessionStorage.setItem('browserUserId', data.user_id);

// Store LCP delay for A/B testing
const lcpDelay = data.lcp_delay_ms || 0;
sessionStorage.setItem('lcpDelayMs', lcpDelay.toString());
console.log(`[Browser User] LCP Delay: ${lcpDelay}ms`);

// Set New Relic custom attribute for A/B test cohort tracking
if (window.newrelic && typeof window.newrelic.setCustomAttribute === 'function') {
window.newrelic.setCustomAttribute('lcp_delay_ms', lcpDelay);
window.newrelic.setCustomAttribute('lcp_treatment', lcpDelay > 0 ? 'slow' : 'normal');
}
} else {
console.error('[Browser User] Failed to fetch user ID:', response.status);
}
Expand Down
Loading
Loading