-
Notifications
You must be signed in to change notification settings - Fork 21
Expand file tree
/
Copy pathentrypoint.sh
More file actions
941 lines (875 loc) · 44.5 KB
/
entrypoint.sh
File metadata and controls
941 lines (875 loc) · 44.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
#!/bin/bash
set -e
echo "[entrypoint] Agentic Workflow Firewall - Agent Container"
echo "[entrypoint] =================================="
# Adjust awfuser UID/GID to match host user at runtime
# This ensures file ownership is correct regardless of whether using GHCR images or local builds
HOST_UID=${AWF_USER_UID:-$(id -u awfuser)}
HOST_GID=${AWF_USER_GID:-$(id -g awfuser)}
CURRENT_UID=$(id -u awfuser)
CURRENT_GID=$(id -g awfuser)
# Validate UID/GID values to prevent security issues
if ! [[ "$HOST_UID" =~ ^[0-9]+$ ]]; then
echo "[entrypoint][ERROR] Invalid AWF_USER_UID: must be numeric"
exit 1
fi
if ! [[ "$HOST_GID" =~ ^[0-9]+$ ]]; then
echo "[entrypoint][ERROR] Invalid AWF_USER_GID: must be numeric"
exit 1
fi
# Prevent setting UID/GID to 0 (root) which defeats the privilege drop
if [ "$HOST_UID" -eq 0 ]; then
echo "[entrypoint][ERROR] Invalid AWF_USER_UID: cannot be 0 (root)"
exit 1
fi
if [ "$HOST_GID" -eq 0 ]; then
echo "[entrypoint][ERROR] Invalid AWF_USER_GID: cannot be 0 (root)"
exit 1
fi
if [ "$CURRENT_UID" != "$HOST_UID" ] || [ "$CURRENT_GID" != "$HOST_GID" ]; then
echo "[entrypoint] Adjusting awfuser UID:GID from $CURRENT_UID:$CURRENT_GID to $HOST_UID:$HOST_GID"
# Check if target GID is already in use by another group
EXISTING_GROUP=$(getent group "$HOST_GID" 2>/dev/null | cut -d: -f1 || true)
if [ -n "$EXISTING_GROUP" ] && [ "$EXISTING_GROUP" != "awfuser" ]; then
echo "[entrypoint][WARN] Target GID $HOST_GID is already used by group '$EXISTING_GROUP'. Skipping GID change."
else
# Change GID first (must be done before UID change)
if ! groupmod -g "$HOST_GID" awfuser 2>/dev/null; then
echo "[entrypoint][ERROR] Failed to change GID of awfuser to $HOST_GID"
exit 1
fi
fi
# Check if target UID is already in use by another user
EXISTING_USER=$(getent passwd "$HOST_UID" 2>/dev/null | cut -d: -f1 || true)
if [ -n "$EXISTING_USER" ] && [ "$EXISTING_USER" != "awfuser" ]; then
echo "[entrypoint][WARN] Target UID $HOST_UID is already used by user '$EXISTING_USER'. Skipping UID change."
else
# Change UID
if ! usermod -u "$HOST_UID" awfuser 2>/dev/null; then
echo "[entrypoint][ERROR] Failed to change UID of awfuser to $HOST_UID"
exit 1
fi
fi
# Fix ownership of awfuser's home directory
chown -R awfuser:awfuser /home/awfuser 2>/dev/null || true
echo "[entrypoint] UID/GID adjustment complete"
fi
# Configure DNS to use only Docker's embedded DNS (127.0.0.11)
# Docker embedded DNS handles all name resolution:
# - Container names (e.g., squid-proxy) → resolved directly
# - External domains → forwarded to upstream DNS servers configured via docker-compose dns: field
# No external DNS servers are listed in resolv.conf to prevent DNS-based data exfiltration
echo "[entrypoint] Configuring DNS..."
if [ -f /etc/resolv.conf ]; then
# Backup original resolv.conf
cp /etc/resolv.conf /etc/resolv.conf.orig
if [ "$AWF_DOH_ENABLED" = "true" ] && [ -n "$AWF_DOH_PROXY_IP" ]; then
# DNS-over-HTTPS mode: use DoH proxy as the DNS resolver
{
echo "# Generated by awf entrypoint (DNS-over-HTTPS mode)"
echo "# Docker embedded DNS for service name resolution (squid-proxy, etc.)"
echo "nameserver 127.0.0.11"
echo "# DNS-over-HTTPS proxy for encrypted internet domain resolution"
echo "nameserver $AWF_DOH_PROXY_IP"
echo "options ndots:0"
} > /etc/resolv.conf
echo "[entrypoint] DNS configured with Docker embedded DNS (127.0.0.11) and DoH proxy ($AWF_DOH_PROXY_IP)"
else
# Simplified security model: Docker embedded DNS only
# Docker embedded DNS at 127.0.0.11 handles all resolution
# It forwards to upstream servers configured via docker-compose dns: field
# No external DNS servers listed to prevent DNS-based data exfiltration
{
echo "# Generated by awf entrypoint"
echo "# Docker embedded DNS handles all resolution (forwards to upstream via docker-compose dns: config)"
echo "nameserver 127.0.0.11"
echo "options ndots:0"
} > /etc/resolv.conf
echo "[entrypoint] DNS configured with Docker embedded DNS (127.0.0.11) only"
fi
fi
# Update CA certificates if SSL Bump is enabled
# The CA certificate is mounted at /usr/local/share/ca-certificates/awf-ca.crt
if [ "${AWF_SSL_BUMP_ENABLED}" = "true" ]; then
echo "[entrypoint] SSL Bump mode detected - updating CA certificates..."
if [ -f /usr/local/share/ca-certificates/awf-ca.crt ]; then
update-ca-certificates 2>/dev/null
AWF_CA_PATH="/usr/local/share/ca-certificates/awf-ca.crt"
# Set NODE_EXTRA_CA_CERTS so Node.js tools (Yarn 4, Corepack, npm) trust the AWF CA.
# Node.js uses its own CA bundle, not the system CA store updated by update-ca-certificates.
export NODE_EXTRA_CA_CERTS="$AWF_CA_PATH"
# SSL_CERT_FILE is respected by curl, git, Python requests, Ruby, and most
# OpenSSL-based tools that don't use the system CA store updated above.
export SSL_CERT_FILE="$AWF_CA_PATH"
export REQUESTS_CA_BUNDLE="$AWF_CA_PATH"
echo "[entrypoint] CA certificates updated for SSL Bump"
echo "[entrypoint] NODE_EXTRA_CA_CERTS set to $NODE_EXTRA_CA_CERTS"
echo "[entrypoint] ⚠️ WARNING: HTTPS traffic will be intercepted for URL inspection"
else
echo "[entrypoint][WARN] SSL Bump enabled but CA certificate not found"
fi
fi
# Wait for iptables init container to complete setup
# The awf-iptables-init container shares our network namespace and runs
# setup-iptables.sh, then writes a ready signal file. This ensures the agent
# container NEVER needs NET_ADMIN capability.
echo "[entrypoint] Waiting for iptables initialization from init container..."
INIT_TIMEOUT=300 # 300 * 0.1s = 30 seconds
INIT_ELAPSED=0
while [ ! -f /tmp/awf-init/ready ]; do
if [ "$INIT_ELAPSED" -ge "$INIT_TIMEOUT" ]; then
echo "[entrypoint][ERROR] Timed out waiting for iptables init container after 30s"
if [ -f /tmp/awf-init/output.log ]; then
echo "[entrypoint] Init container output:"
cat /tmp/awf-init/output.log
else
echo "[entrypoint] No init container output log found"
fi
exit 1
fi
sleep 0.1
INIT_ELAPSED=$((INIT_ELAPSED + 1))
done
echo "[entrypoint] iptables initialization complete"
# Run API proxy health checks (verifies credential isolation and connectivity)
# This must run AFTER iptables setup (which allows api-proxy traffic) but BEFORE user command
# If health check fails, the script exits with non-zero code and prevents agent from running
/usr/local/bin/api-proxy-health-check.sh || exit 1
# Configure Claude Code API key helper
# This ensures the apiKeyHelper is properly configured in the config files
# The config files must exist before Claude Code starts for authentication to work
# We write to BOTH paths for compatibility:
# - ~/.claude.json (legacy path, used by older Claude Code versions)
# - ~/.claude/settings.json (used by Claude Code v2.1.81+)
# In chroot mode, we write to /host$HOME/... so files are accessible after chroot
if [ -n "$CLAUDE_CODE_API_KEY_HELPER" ]; then
echo "[entrypoint] Claude Code API key helper configured: $CLAUDE_CODE_API_KEY_HELPER"
# In chroot mode, write to /host path so files are accessible after chroot transition
if [ "${AWF_CHROOT_ENABLED}" = "true" ]; then
LEGACY_CONFIG_FILE="/host$HOME/.claude.json"
SETTINGS_DIR="/host$HOME/.claude"
else
LEGACY_CONFIG_FILE="$HOME/.claude.json"
SETTINGS_DIR="$HOME/.claude"
fi
SETTINGS_FILE="$SETTINGS_DIR/settings.json"
# Helper: write or validate apiKeyHelper in a config file
write_api_key_helper() {
local config_file="$1"
local label="$2"
if [ -f "$config_file" ]; then
if grep -q '"apiKeyHelper"' "$config_file"; then
CONFIGURED_HELPER=$(grep -o '"apiKeyHelper":"[^"]*"' "$config_file" | cut -d'"' -f4)
if [ "$CONFIGURED_HELPER" != "$CLAUDE_CODE_API_KEY_HELPER" ]; then
# Overwrite stale path (e.g. a previous run's chroot-adjusted path)
echo "[entrypoint] Updating apiKeyHelper in $label (was: $CONFIGURED_HELPER)"
if AWF_CONFIG_FILE="$config_file" AWF_KEY_HELPER="$CLAUDE_CODE_API_KEY_HELPER" \
node -e "
const fs = require('fs');
const f = process.env.AWF_CONFIG_FILE;
const obj = JSON.parse(fs.readFileSync(f, 'utf8'));
obj.apiKeyHelper = process.env.AWF_KEY_HELPER;
fs.writeFileSync(f, JSON.stringify(obj) + '\n');
" 2>/dev/null; then
chmod 666 "$config_file"
echo "[entrypoint] ✓ Updated apiKeyHelper in $label"
else
echo "{\"apiKeyHelper\":\"$CLAUDE_CODE_API_KEY_HELPER\"}" > "$config_file"
chmod 666 "$config_file"
echo "[entrypoint] ✓ Wrote apiKeyHelper to $label (overwrite fallback)"
fi
else
echo "[entrypoint] ✓ $label apiKeyHelper validated"
fi
else
echo "[entrypoint] $label exists but missing apiKeyHelper, merging..."
# Use node to safely add apiKeyHelper to existing JSON without losing other fields
# (e.g. hasCompletedOnboarding, session tokens, user preferences)
if AWF_CONFIG_FILE="$config_file" AWF_KEY_HELPER="$CLAUDE_CODE_API_KEY_HELPER" \
node -e "
const fs = require('fs');
const file = process.env.AWF_CONFIG_FILE;
const helper = process.env.AWF_KEY_HELPER;
let obj = {};
try { obj = JSON.parse(fs.readFileSync(file, 'utf8')); } catch(e) {}
obj.apiKeyHelper = helper;
fs.writeFileSync(file, JSON.stringify(obj) + '\n');
" 2>/dev/null; then
chmod 666 "$config_file"
echo "[entrypoint] ✓ Merged apiKeyHelper into $label"
else
# Fallback if node is unavailable or the file is not valid JSON
echo "{\"apiKeyHelper\":\"$CLAUDE_CODE_API_KEY_HELPER\"}" > "$config_file"
chmod 666 "$config_file"
echo "[entrypoint] ✓ Wrote apiKeyHelper to $label (overwrite fallback)"
fi
fi
else
echo "[entrypoint] Creating $label with apiKeyHelper..."
echo "{\"apiKeyHelper\":\"$CLAUDE_CODE_API_KEY_HELPER\"}" > "$config_file"
chmod 666 "$config_file"
echo "[entrypoint] ✓ Created $label with apiKeyHelper: $CLAUDE_CODE_API_KEY_HELPER"
fi
}
# Write to legacy path (~/.claude.json)
write_api_key_helper "$LEGACY_CONFIG_FILE" "$LEGACY_CONFIG_FILE"
# Write to settings path (~/.claude/settings.json) for Claude Code v2.1.81+
mkdir -p "$SETTINGS_DIR"
chmod 777 "$SETTINGS_DIR" 2>/dev/null || true
write_api_key_helper "$SETTINGS_FILE" "$SETTINGS_FILE"
fi
# Pre-seed JVM build tool proxy configuration
# Java build tools (Maven, Gradle, sbt) do not honor HTTP_PROXY/HTTPS_PROXY env vars
# and need explicit proxy configuration files
if [ -n "$HTTP_PROXY" ]; then
# Determine proxy host and port for JVM tools.
# Prefer SQUID_PROXY_HOST/SQUID_PROXY_PORT (set by docker-manager.ts) over parsing HTTP_PROXY,
# since HTTP_PROXY parsing is brittle (doesn't handle https://, credentials, IPv6, etc.)
if [ -n "$SQUID_PROXY_HOST" ] && [ -n "$SQUID_PROXY_PORT" ]; then
PROXY_HOST="$SQUID_PROXY_HOST"
PROXY_PORT="$SQUID_PROXY_PORT"
else
# Fallback: extract from HTTP_PROXY (format: http://HOST:PORT)
PROXY_HOST="${HTTP_PROXY#http://}"
PROXY_HOST="${PROXY_HOST#https://}"
PROXY_HOST="${PROXY_HOST#*@}" # strip credentials if present
PROXY_PORT="${PROXY_HOST##*:}" # extract port after last colon
PROXY_HOST="${PROXY_HOST%:*}" # strip port
PROXY_PORT="${PROXY_PORT:-3128}" # default port
fi
# Determine path prefix for config files (chroot-aware, same pattern as .claude.json)
if [ "${AWF_CHROOT_ENABLED}" = "true" ]; then
JVM_HOME_PREFIX="/host${HOME}"
else
JVM_HOME_PREFIX="${HOME}"
fi
echo "[entrypoint] Pre-seeding JVM build tool proxy configuration (${PROXY_HOST}:${PROXY_PORT})..."
# Maven proxy config (~/.m2/settings.xml)
# Only create if the file does not already exist, to avoid clobbering user-provided settings
mkdir -p "${JVM_HOME_PREFIX}/.m2"
if [ ! -f "${JVM_HOME_PREFIX}/.m2/settings.xml" ]; then
cat > "${JVM_HOME_PREFIX}/.m2/settings.xml" << MAVEN_EOF
<settings>
<proxies>
<proxy>
<id>awf-http</id>
<active>true</active>
<protocol>http</protocol>
<host>${PROXY_HOST}</host>
<port>${PROXY_PORT}</port>
</proxy>
<proxy>
<id>awf-https</id>
<active>true</active>
<protocol>https</protocol>
<host>${PROXY_HOST}</host>
<port>${PROXY_PORT}</port>
</proxy>
</proxies>
</settings>
MAVEN_EOF
chown awfuser:awfuser "${JVM_HOME_PREFIX}/.m2/settings.xml" 2>/dev/null || true
echo "[entrypoint] ✓ Created Maven proxy config (${JVM_HOME_PREFIX}/.m2/settings.xml)"
else
echo "[entrypoint] ✓ Maven settings.xml already exists, skipping proxy config creation"
fi
# Gradle proxy config (~/.gradle/gradle.properties)
# Only create if the file does not already exist, to avoid clobbering user-provided settings
# (e.g., org.gradle.jvmargs, build cache settings, private repo credentials)
mkdir -p "${JVM_HOME_PREFIX}/.gradle"
if [ ! -f "${JVM_HOME_PREFIX}/.gradle/gradle.properties" ]; then
cat > "${JVM_HOME_PREFIX}/.gradle/gradle.properties" << GRADLE_EOF
systemProp.http.proxyHost=${PROXY_HOST}
systemProp.http.proxyPort=${PROXY_PORT}
systemProp.https.proxyHost=${PROXY_HOST}
systemProp.https.proxyPort=${PROXY_PORT}
GRADLE_EOF
chown awfuser:awfuser "${JVM_HOME_PREFIX}/.gradle/gradle.properties" 2>/dev/null || true
echo "[entrypoint] ✓ Created Gradle proxy config (${JVM_HOME_PREFIX}/.gradle/gradle.properties)"
else
echo "[entrypoint] ✓ Gradle gradle.properties already exists, skipping proxy config creation"
fi
# sbt/JVM proxy config via JAVA_TOOL_OPTIONS
# This covers sbt and any JVM tool that reads standard system properties
# Also set nonProxyHosts from NO_PROXY to prevent JVM tools from proxying localhost traffic
NON_PROXY_RAW="${NO_PROXY:-${no_proxy:-}}"
JVM_PROXY_FLAGS="-Dhttp.proxyHost=${PROXY_HOST} -Dhttp.proxyPort=${PROXY_PORT} -Dhttps.proxyHost=${PROXY_HOST} -Dhttps.proxyPort=${PROXY_PORT}"
if [ -n "$NON_PROXY_RAW" ]; then
# Convert comma-separated NO_PROXY to Java's pipe-separated nonProxyHosts format
NON_PROXY_HOSTS=$(printf '%s' "$NON_PROXY_RAW" | tr ',' '|' | tr -d ' ')
JVM_PROXY_FLAGS="${JVM_PROXY_FLAGS} -Dhttp.nonProxyHosts=${NON_PROXY_HOSTS} -Dhttps.nonProxyHosts=${NON_PROXY_HOSTS}"
echo "[entrypoint] ✓ Set JAVA_TOOL_OPTIONS with proxy and nonProxyHosts flags"
else
echo "[entrypoint] ✓ Set JAVA_TOOL_OPTIONS with proxy flags"
fi
export JAVA_TOOL_OPTIONS="${JAVA_TOOL_OPTIONS:-} ${JVM_PROXY_FLAGS}"
fi
# Print proxy environment
echo "[entrypoint] Proxy configuration:"
echo "[entrypoint] HTTP_PROXY=$HTTP_PROXY"
echo "[entrypoint] HTTPS_PROXY=$HTTPS_PROXY"
# Print network information
echo "[entrypoint] Network information:"
echo "[entrypoint] IP address: $(hostname -I)"
echo "[entrypoint] Hostname: $(hostname)"
# Configure git safe directories for awfuser
# Use runuser instead of su to avoid PAM session issues
runuser -u awfuser -- git config --global --add safe.directory '*' 2>/dev/null || true
echo "[entrypoint] =================================="
# Determine which capabilities to drop
# - CAP_NET_ADMIN is NOT present (never granted to agent container - iptables setup
# is handled by the separate awf-iptables-init container)
# - CAP_SYS_CHROOT is dropped when chroot mode is enabled (prevents user code from using chroot)
# - CAP_SYS_ADMIN is dropped when chroot mode is enabled (was needed for mounting procfs)
if [ "${AWF_CHROOT_ENABLED}" = "true" ]; then
CAPS_TO_DROP="cap_sys_chroot,cap_sys_admin"
echo "[entrypoint] Chroot mode enabled - dropping CAP_SYS_CHROOT and CAP_SYS_ADMIN"
else
# In non-chroot mode, no capabilities need to be dropped
# NET_ADMIN is never granted (init container handles iptables)
# SYS_CHROOT and SYS_ADMIN are only needed/dropped in chroot mode
CAPS_TO_DROP=""
echo "[entrypoint] No capabilities to drop (NET_ADMIN never granted to agent)"
fi
# Function to unset sensitive tokens from the entrypoint's environment
# This prevents tokens from being accessible via /proc/1/environ after the agent has started
unset_sensitive_tokens() {
# List of sensitive token environment variables (matches one-shot-token library defaults)
local SENSITIVE_TOKENS=(
# GitHub tokens
"COPILOT_GITHUB_TOKEN"
"GITHUB_TOKEN"
"GH_TOKEN"
"GITHUB_API_TOKEN"
"GITHUB_PAT"
"GH_ACCESS_TOKEN"
"GITHUB_PERSONAL_ACCESS_TOKEN"
# OpenAI tokens
"OPENAI_API_KEY"
"OPENAI_KEY"
# Anthropic/Claude tokens
"ANTHROPIC_API_KEY"
"CLAUDE_API_KEY"
"CLAUDE_CODE_OAUTH_TOKEN"
# Codex tokens
"CODEX_API_KEY"
)
echo "[entrypoint] Unsetting sensitive tokens from parent shell environment..." >&2
for token in "${SENSITIVE_TOKENS[@]}"; do
if [ -n "${!token}" ]; then
unset "$token"
echo "[entrypoint] Unset $token from /proc/1/environ" >&2
fi
done
}
echo "[entrypoint] Switching to awfuser (UID: $(id -u awfuser), GID: $(id -g awfuser))"
echo "[entrypoint] Executing command: $@"
echo ""
# If chroot mode is enabled, run user command INSIDE the chroot /host
# This provides transparent host binary access - user command sees host filesystem as /
if [ "${AWF_CHROOT_ENABLED}" = "true" ]; then
echo "[entrypoint] Chroot mode: running command inside host filesystem (/host)"
# Mount a container-scoped procfs at /host/proc
# This provides dynamic /proc/self/exe resolution (required by .NET CLR, JVM, and other
# runtimes that read /proc/self/exe to find themselves). A static bind mount of /proc/self
# always resolves to the parent shell's exe, causing runtime failures.
# Security: This procfs is container-scoped (only shows container processes, not host).
# SYS_ADMIN capability (required for mount) is dropped before user code runs.
mkdir -p /host/proc
if mount -t proc -o nosuid,nodev,noexec proc /host/proc; then
echo "[entrypoint] Mounted procfs at /host/proc (nosuid,nodev,noexec)"
else
echo "[entrypoint][ERROR] Failed to mount procfs at /host/proc"
echo "[entrypoint][ERROR] This is required for Java, .NET, and other runtimes that read /proc/self/exe"
echo "[entrypoint][ERROR] Ensure the container has SYS_ADMIN capability (it will be dropped before user code runs)"
exit 1
fi
# Copy one-shot-token library to host filesystem for LD_PRELOAD in chroot
# This prevents tokens from being read multiple times by malicious code
# Note: /tmp is always writable in chroot mode (mounted from host /tmp as rw)
ONE_SHOT_TOKEN_LIB=""
if [ -f /usr/local/lib/one-shot-token.so ]; then
# Create the library directory in /tmp (always writable)
if mkdir -p /host/tmp/awf-lib 2>/dev/null; then
# Copy the library and verify it exists after copying
if cp /usr/local/lib/one-shot-token.so /host/tmp/awf-lib/one-shot-token.so 2>/dev/null && \
[ -f /host/tmp/awf-lib/one-shot-token.so ]; then
ONE_SHOT_TOKEN_LIB="/tmp/awf-lib/one-shot-token.so"
echo "[entrypoint] One-shot token library copied to chroot at ${ONE_SHOT_TOKEN_LIB}"
else
echo "[entrypoint][WARN] Could not copy one-shot-token library to /tmp/awf-lib"
echo "[entrypoint][WARN] Token protection will be disabled (tokens may be readable multiple times)"
fi
else
echo "[entrypoint][ERROR] Could not create /tmp/awf-lib directory"
echo "[entrypoint][ERROR] This should not happen - /tmp is mounted read-write in chroot mode"
echo "[entrypoint][WARN] Token protection will be disabled (tokens may be readable multiple times)"
fi
fi
# Copy get-claude-key.sh to chroot-accessible path
# The script is baked into the Docker image at /usr/local/bin/, but the chroot
# bind-mounts the host's /usr (read-only), shadowing the container's copy.
# We must copy it to /tmp/awf-lib/ (writable) before the chroot activates.
if [ -n "$CLAUDE_CODE_API_KEY_HELPER" ] && [ -f "$CLAUDE_CODE_API_KEY_HELPER" ]; then
if mkdir -p /host/tmp/awf-lib 2>/dev/null; then
CHROOT_KEY_HELPER="/tmp/awf-lib/$(basename "$CLAUDE_CODE_API_KEY_HELPER")"
if cp "$CLAUDE_CODE_API_KEY_HELPER" "/host${CHROOT_KEY_HELPER}" 2>/dev/null && \
chmod +x "/host${CHROOT_KEY_HELPER}" 2>/dev/null; then
echo "[entrypoint] Claude key helper copied to chroot at ${CHROOT_KEY_HELPER}"
# Update apiKeyHelper in config files to use the chroot-accessible path
for cfg in "/host$HOME/.claude.json" "/host$HOME/.claude/settings.json"; do
if [ -f "$cfg" ] && grep -q '"apiKeyHelper"' "$cfg" 2>/dev/null; then
if AWF_CFG="$cfg" AWF_NEW="$CHROOT_KEY_HELPER" \
node -e "
const fs = require('fs');
const f = process.env.AWF_CFG;
const obj = JSON.parse(fs.readFileSync(f, 'utf8'));
obj.apiKeyHelper = process.env.AWF_NEW;
fs.writeFileSync(f, JSON.stringify(obj) + '\n');
" 2>/dev/null; then
echo "[entrypoint] ✓ Updated apiKeyHelper path in $cfg"
fi
fi
done
CLAUDE_CODE_API_KEY_HELPER="$CHROOT_KEY_HELPER"
else
echo "[entrypoint][WARN] Could not copy get-claude-key.sh to chroot"
echo "[entrypoint][WARN] Claude Code API key helper may not work in chroot mode"
fi
fi
fi
# Activate gh CLI proxy wrapper when CLI proxy sidecar is enabled.
# The wrapper at /usr/local/bin/gh-cli-proxy-wrapper.sh (baked into the image)
# is copied to /tmp/awf-lib/gh so it is accessible inside the chroot at a
# location that takes precedence over the host's /usr/bin/gh mount.
if [ -n "$AWF_CLI_PROXY_URL" ] && [ -f /usr/local/bin/gh-cli-proxy-wrapper.sh ]; then
if mkdir -p /host/tmp/awf-lib 2>/dev/null; then
if cp /usr/local/bin/gh-cli-proxy-wrapper.sh /host/tmp/awf-lib/gh 2>/dev/null && \
chmod +x /host/tmp/awf-lib/gh 2>/dev/null; then
# The chroot will see this as /tmp/awf-lib/gh (the /host prefix is the bind mount)
echo "[entrypoint] gh CLI proxy wrapper installed at /tmp/awf-lib/gh (inside chroot)"
# Prepend /tmp/awf-lib to PATH so the wrapper takes precedence over host gh
export AWF_HOST_PATH="/tmp/awf-lib:${AWF_HOST_PATH:-$PATH}"
else
echo "[entrypoint][WARN] Could not install gh CLI proxy wrapper"
fi
fi
fi
# Copy AWF CA certificate to chroot-accessible path for ssl-bump TLS trust.
# NODE_EXTRA_CA_CERTS points to /usr/local/share/ca-certificates/awf-ca.crt which
# is a Docker volume mount on the container's overlay filesystem. After chroot /host,
# this path is inaccessible. Copy to /tmp/awf-lib/ (always writable) and update the
# env var so Node.js (Claude Code), curl, git, Python, etc. trust the Squid CA.
AWF_CA_CHROOT=""
if [ "${AWF_SSL_BUMP_ENABLED}" = "true" ] && [ -f /usr/local/share/ca-certificates/awf-ca.crt ]; then
if mkdir -p /host/tmp/awf-lib 2>/dev/null; then
if cp /usr/local/share/ca-certificates/awf-ca.crt /host/tmp/awf-lib/awf-ca.crt 2>/dev/null && \
[ -f /host/tmp/awf-lib/awf-ca.crt ]; then
AWF_CA_CHROOT="/tmp/awf-lib/awf-ca.crt"
export NODE_EXTRA_CA_CERTS="$AWF_CA_CHROOT"
# SSL_CERT_FILE is respected by curl, git, Python requests, Ruby, and most
# OpenSSL-based tools. This ensures non-Node.js tools also trust the AWF CA.
export SSL_CERT_FILE="$AWF_CA_CHROOT"
export REQUESTS_CA_BUNDLE="$AWF_CA_CHROOT"
echo "[entrypoint] AWF CA certificate copied to chroot at $AWF_CA_CHROOT"
echo "[entrypoint] NODE_EXTRA_CA_CERTS updated to $AWF_CA_CHROOT"
echo "[entrypoint] SSL_CERT_FILE updated to $AWF_CA_CHROOT"
else
echo "[entrypoint][WARN] Could not copy AWF CA certificate to chroot — ssl-bump TLS may fail"
fi
else
echo "[entrypoint][WARN] Could not create /host/tmp/awf-lib for CA cert — ssl-bump TLS may fail in chroot"
fi
fi
# Verify capsh is available on the host (required for privilege drop)
if ! chroot /host which capsh >/dev/null 2>&1; then
echo "[entrypoint][ERROR] capsh not found on host system"
echo "[entrypoint][ERROR] Install libcap2-bin package: apt-get install libcap2-bin"
exit 1
fi
# Backup and copy container's resolv.conf to host (preserves AWF DNS configuration)
# This ensures DNS queries inside the chroot use the configured DNS servers
# NOTE: We backup the host's original resolv.conf and set up a trap to restore it
RESOLV_BACKUP="/host/etc/resolv.conf.awf-backup-$$"
RESOLV_MODIFIED=false
RESOLV_CREATED=false
if [ -f /host/etc/resolv.conf ]; then
# File exists: backup original and replace with AWF DNS configuration
if cp /host/etc/resolv.conf "$RESOLV_BACKUP" 2>/dev/null; then
if cp /etc/resolv.conf /host/etc/resolv.conf.awf 2>/dev/null; then
mv /host/etc/resolv.conf.awf /host/etc/resolv.conf 2>/dev/null && RESOLV_MODIFIED=true
echo "[entrypoint] DNS configuration copied to chroot (backup at $RESOLV_BACKUP)"
else
echo "[entrypoint][WARN] Could not copy DNS configuration to chroot"
fi
else
echo "[entrypoint][WARN] Could not backup host resolv.conf, skipping DNS override"
fi
else
# File doesn't exist: selective /etc mounts don't include resolv.conf
# Create it from the container's resolv.conf (which has AWF DNS config)
if cp /etc/resolv.conf /host/etc/resolv.conf 2>/dev/null; then
RESOLV_CREATED=true
echo "[entrypoint] DNS configuration created in chroot (/host/etc/resolv.conf)"
else
echo "[entrypoint][WARN] Could not create DNS configuration in chroot"
fi
fi
# Inject host.docker.internal into chroot's /etc/hosts when host access is enabled
# Docker adds this to the container's /etc/hosts via extra_hosts, but the chroot
# uses a separate hosts file that doesn't have it. The container's /etc/hosts has
# the correct mapping, so we copy it to the chroot's /etc/hosts.
HOSTS_MODIFIED=false
if [ "${AWF_ENABLE_HOST_ACCESS}" = "1" ]; then
HOST_DOCKER_ENTRY=$(grep "host.docker.internal" /etc/hosts 2>/dev/null | head -1 || true)
if [ -n "$HOST_DOCKER_ENTRY" ] && ! grep -q "host.docker.internal" /host/etc/hosts 2>/dev/null; then
if echo "$HOST_DOCKER_ENTRY" >> /host/etc/hosts 2>/dev/null; then
HOSTS_MODIFIED=true
echo "[entrypoint] Added host.docker.internal to chroot /etc/hosts"
else
echo "[entrypoint][WARN] Could not add host.docker.internal to chroot /etc/hosts"
fi
fi
fi
# Determine working directory inside the chroot
# AWF_WORKDIR is set by docker-manager.ts (containerWorkDir or HOME)
# For chroot mode, paths like /home/user stay the same (no /host prefix)
CONTAINER_WORKDIR="${AWF_WORKDIR:-${HOME:-/}}"
if [ -n "${CONTAINER_WORKDIR}" ] && [ "${CONTAINER_WORKDIR#/host}" != "${CONTAINER_WORKDIR}" ]; then
# Strip /host prefix if present (for paths that include it)
CHROOT_WORKDIR="${CONTAINER_WORKDIR#/host}"
[ -z "${CHROOT_WORKDIR}" ] && CHROOT_WORKDIR="/"
else
# Use the path as-is (normal paths like /home/user, /tmp, etc.)
CHROOT_WORKDIR="${CONTAINER_WORKDIR}"
fi
echo "[entrypoint] Chroot working directory: ${CHROOT_WORKDIR}"
# Validate working directory exists in chroot
if [ ! -d "/host${CHROOT_WORKDIR}" ]; then
echo "[entrypoint][WARN] Working directory ${CHROOT_WORKDIR} does not exist on host, will use /"
fi
# Find the user name on the host system by UID
# This allows us to run as the same user inside the chroot
HOST_USER_UID="${AWF_USER_UID:-1000}"
HOST_USER=$(chroot /host getent passwd "${HOST_USER_UID}" 2>/dev/null | cut -d: -f1 || echo "")
if [ -z "${HOST_USER}" ]; then
# Fall back to 'nobody' if user not found by UID
HOST_USER="nobody"
echo "[entrypoint][WARN] Could not find user with UID ${HOST_USER_UID} on host, using ${HOST_USER}"
else
echo "[entrypoint] Running as host user: ${HOST_USER} (UID: ${HOST_USER_UID})"
fi
# Write the command to a temporary script file in the chroot
# This avoids complex quoting issues with nested shells
SCRIPT_FILE="/tmp/awf-cmd-$$.sh"
# Use host's actual PATH if provided, otherwise construct a default
# This ensures we use the same Python/Node/Go versions as the host
if [ -n "${AWF_HOST_PATH}" ]; then
echo "[entrypoint] Using host PATH for chroot"
cat > "/host${SCRIPT_FILE}" << AWFEOF
#!/bin/bash
# Use the host's actual PATH (passed via AWF_HOST_PATH)
export PATH="${AWF_HOST_PATH}"
AWFEOF
# Add CARGO_HOME/bin to PATH if provided (for Rust/Cargo on GitHub Actions)
if [ -n "${AWF_CARGO_HOME}" ]; then
echo "[entrypoint] Adding CARGO_HOME/bin to PATH: ${AWF_CARGO_HOME}/bin"
echo "export PATH=\"${AWF_CARGO_HOME}/bin:\$PATH\"" >> "/host${SCRIPT_FILE}"
echo "export CARGO_HOME=\"${AWF_CARGO_HOME}\"" >> "/host${SCRIPT_FILE}"
# Also set RUSTUP_HOME if provided (needed for rustc to find toolchain)
if [ -n "${AWF_RUSTUP_HOME}" ]; then
echo "[entrypoint] Setting RUSTUP_HOME: ${AWF_RUSTUP_HOME}"
echo "export RUSTUP_HOME=\"${AWF_RUSTUP_HOME}\"" >> "/host${SCRIPT_FILE}"
fi
else
# Fallback: detect Cargo from default location if CARGO_HOME not provided
# This ensures Rust binaries work even when CARGO_HOME env var is not set
echo "# Add Cargo bin for Rust if it exists (fallback when CARGO_HOME not provided)" >> "/host${SCRIPT_FILE}"
echo "[ -d \"\$HOME/.cargo/bin\" ] && export PATH=\"\$HOME/.cargo/bin:\$PATH\"" >> "/host${SCRIPT_FILE}"
fi
# Add JAVA_HOME/bin to PATH if provided (for Java on GitHub Actions)
# Also set LD_LIBRARY_PATH to include Java's lib directory for libjli.so
if [ -n "${AWF_JAVA_HOME}" ]; then
echo "[entrypoint] Adding JAVA_HOME/bin to PATH: ${AWF_JAVA_HOME}/bin"
echo "export PATH=\"${AWF_JAVA_HOME}/bin:\$PATH\"" >> "/host${SCRIPT_FILE}"
echo "export JAVA_HOME=\"${AWF_JAVA_HOME}\"" >> "/host${SCRIPT_FILE}"
# Java needs LD_LIBRARY_PATH to find libjli.so and other shared libs
echo "export LD_LIBRARY_PATH=\"${AWF_JAVA_HOME}/lib:${AWF_JAVA_HOME}/lib/server:\$LD_LIBRARY_PATH\"" >> "/host${SCRIPT_FILE}"
fi
# Add DOTNET_ROOT to PATH if provided (for .NET on GitHub Actions)
if [ -n "${AWF_DOTNET_ROOT}" ]; then
echo "[entrypoint] Adding DOTNET_ROOT to PATH: ${AWF_DOTNET_ROOT}"
echo "export PATH=\"${AWF_DOTNET_ROOT}:\$PATH\"" >> "/host${SCRIPT_FILE}"
echo "export DOTNET_ROOT=\"${AWF_DOTNET_ROOT}\"" >> "/host${SCRIPT_FILE}"
fi
# Add GOROOT/bin to PATH if provided (required for Go on GitHub Actions with trimmed binaries)
# This ensures the correct Go version is found even if AWF_HOST_PATH has wrong ordering
if [ -n "${AWF_GOROOT}" ]; then
echo "[entrypoint] Adding GOROOT/bin to PATH: ${AWF_GOROOT}/bin"
echo "export PATH=\"${AWF_GOROOT}/bin:\$PATH\"" >> "/host${SCRIPT_FILE}"
echo "export GOROOT=\"${AWF_GOROOT}\"" >> "/host${SCRIPT_FILE}"
fi
# Add BUN_INSTALL/bin to PATH if provided (for Bun on GitHub Actions)
# Bun must be pre-installed on the host because it crashes inside chroot (restricted /proc)
if [ -n "${AWF_BUN_INSTALL}" ]; then
echo "[entrypoint] Adding BUN_INSTALL/bin to PATH: ${AWF_BUN_INSTALL}/bin"
echo "export PATH=\"${AWF_BUN_INSTALL}/bin:\$PATH\"" >> "/host${SCRIPT_FILE}"
echo "export BUN_INSTALL=\"${AWF_BUN_INSTALL}\"" >> "/host${SCRIPT_FILE}"
fi
else
echo "[entrypoint] Constructing default PATH for chroot"
cat > "/host${SCRIPT_FILE}" << 'AWFEOF'
#!/bin/bash
# Set comprehensive PATH for host binaries
# Include standard paths plus tool cache locations (GitHub Actions)
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Dynamically scan /opt/hostedtoolcache for all installed tool bin directories
# This covers tools installed by any setup-* action (setup-ruby, setup-dart,
# setup-python, setup-node, setup-go, setup-java, etc.)
if [ -d "/opt/hostedtoolcache" ]; then
for tool_dir in /opt/hostedtoolcache/*/; do
for version_dir in "$tool_dir"*/; do
for arch_dir in "$version_dir"*/; do
if [ -d "${arch_dir}bin" ]; then
export PATH="${arch_dir}bin:$PATH"
fi
done
done
done
fi
# Add user's local bin if it exists
[ -d "$HOME/.local/bin" ] && export PATH="$HOME/.local/bin:$PATH"
# Add Cargo bin for Rust (common in development)
[ -d "$HOME/.cargo/bin" ] && export PATH="$HOME/.cargo/bin:$PATH"
AWFEOF
# Add GOROOT/bin to PATH if provided (required for Go on GitHub Actions with trimmed binaries)
# This ensures the correct Go version is found even if PATH has wrong ordering
if [ -n "${AWF_GOROOT}" ]; then
echo "[entrypoint] Adding GOROOT/bin to PATH: ${AWF_GOROOT}/bin"
echo "export PATH=\"${AWF_GOROOT}/bin:\$PATH\"" >> "/host${SCRIPT_FILE}"
echo "export GOROOT=\"${AWF_GOROOT}\"" >> "/host${SCRIPT_FILE}"
fi
fi
# Configure npm global prefix to a writable directory (since /usr is read-only)
# Preserve user-provided NPM_CONFIG_PREFIX if already set
echo 'if [ -z "${NPM_CONFIG_PREFIX:-}" ]; then' >> "/host${SCRIPT_FILE}"
echo ' export NPM_CONFIG_PREFIX="$HOME/.npm-global"' >> "/host${SCRIPT_FILE}"
echo 'fi' >> "/host${SCRIPT_FILE}"
echo 'mkdir -p "$NPM_CONFIG_PREFIX/bin" 2>/dev/null' >> "/host${SCRIPT_FILE}"
echo 'export PATH="$NPM_CONFIG_PREFIX/bin:$PATH"' >> "/host${SCRIPT_FILE}"
if [ "${AWF_REQUIRE_NODE:-}" = "1" ]; then
cat >> "/host${SCRIPT_FILE}" << 'AWFEOF'
if ! command -v node >/dev/null 2>&1; then
echo "[entrypoint][ERROR] Copilot CLI requires Node.js, but 'node' is not available inside AWF chroot." >&2
echo "[entrypoint][ERROR] Ensure Node.js is installed on the runner and reachable from PATH inside the chroot." >&2
echo "[entrypoint][ERROR] If using setup-node or nvm, verify the install path is present and bind-mounted into /host." >&2
echo "[entrypoint][ERROR] Example locations include /opt/hostedtoolcache/... and $HOME/.nvm/..." >&2
exit 127
fi
AWFEOF
fi
# If AWF_PREFLIGHT_BINARY is set, verify the named binary is reachable inside the
# chroot before exec'ing the user command. This provides a fast, human-readable
# diagnostic when the runner slot lacks the binary (e.g. codex not installed).
if [ -n "${AWF_PREFLIGHT_BINARY:-}" ]; then
# Validate the binary name: must start with an alphanumeric char or underscore,
# then contain only [a-zA-Z0-9_.-] — prevents option injection (e.g. "-v")
# and shell meta-characters in the generated chroot startup script.
if [[ "${AWF_PREFLIGHT_BINARY}" =~ ^[a-zA-Z0-9_][a-zA-Z0-9_.-]*$ ]]; then
# Use "command -v -- <binary>" so the name is never parsed as an option.
printf 'if ! command -v -- %s >/dev/null 2>&1; then\n' "${AWF_PREFLIGHT_BINARY}" >> "/host${SCRIPT_FILE}"
printf ' echo "[entrypoint][ERROR] Required binary '"'"'%s'"'"' is not available inside AWF chroot." >&2\n' "${AWF_PREFLIGHT_BINARY}" >> "/host${SCRIPT_FILE}"
printf ' echo "[entrypoint][ERROR] Ensure '"'"'%s'"'"' is installed on the runner and present in a PATH directory bind-mounted into /host." >&2\n' "${AWF_PREFLIGHT_BINARY}" >> "/host${SCRIPT_FILE}"
printf ' echo "[entrypoint][ERROR] Standard bind-mounted PATH directories: /usr/local/bin, /usr/bin, /bin, /opt." >&2\n' >> "/host${SCRIPT_FILE}"
printf ' exit 127\n' >> "/host${SCRIPT_FILE}"
printf 'fi\n' >> "/host${SCRIPT_FILE}"
else
echo "[entrypoint][WARN] AWF_PREFLIGHT_BINARY='${AWF_PREFLIGHT_BINARY}' contains unsafe characters; skipping preflight check." >&2
fi
fi
# Append the actual command arguments
# Docker CMD passes commands as ['/bin/bash', '-c', 'command_string'].
# Instead of writing the full [bash, -c, cmd] via printf '%q' (which creates
# a nested bash -c layer), extract the command string and write it directly.
# This eliminates an extra bash process layer that causes /proc/self/exe to
# resolve as /bin/bash for runtimes like Java and .NET that check their own
# process identity via /proc/self/exe.
# Trust assumption: $3 comes from Docker CMD set by docker-manager.ts using
# the user's own --allow-domains command. No additional sanitization needed.
# We use $# -eq 3 (not -ge 3) intentionally: extra args beyond the command
# string indicate a non-standard invocation that should fall back to printf.
if [ "$1" = "/bin/bash" ] && [ "$2" = "-c" ] && [ $# -eq 3 ] && [ -n "$3" ]; then
printf '%s\n' "$3" >> "/host${SCRIPT_FILE}"
else
printf '%q ' "$@" >> "/host${SCRIPT_FILE}"
echo "" >> "/host${SCRIPT_FILE}"
fi
chmod +x "/host${SCRIPT_FILE}"
# Execute inside chroot:
# 1. chroot /host - filesystem root becomes host's /
# 2. cd to the working directory
# 3. Drop capabilities (NET_ADMIN and SYS_CHROOT)
# 4. Run as the mapped user using capsh --user
# 5. Clean up the script file and restore resolv.conf
#
# Note: We use capsh inside the chroot because it handles the privilege drop
# and user switch atomically. The host must have capsh installed.
# Build cleanup command that restores resolv.conf if it was modified
# The backup path uses the chroot perspective (no /host prefix)
CLEANUP_CMD="rm -f ${SCRIPT_FILE}"
if [ "$RESOLV_MODIFIED" = "true" ]; then
# Convert backup path from container perspective (/host/etc/...) to chroot perspective (/etc/...)
CHROOT_RESOLV_BACKUP="${RESOLV_BACKUP#/host}"
CLEANUP_CMD="${CLEANUP_CMD}; mv '${CHROOT_RESOLV_BACKUP}' /etc/resolv.conf 2>/dev/null || true"
echo "[entrypoint] DNS configuration will be restored on exit"
elif [ "$RESOLV_CREATED" = "true" ]; then
# File was created by us; remove it on exit to leave no trace
CLEANUP_CMD="${CLEANUP_CMD}; rm -f /etc/resolv.conf 2>/dev/null || true"
echo "[entrypoint] DNS configuration will be removed on exit"
fi
if [ "$HOSTS_MODIFIED" = "true" ]; then
# Remove the specific host.docker.internal line we added (runs inside chroot perspective)
# Use a precise pattern to avoid accidentally removing unrelated entries
CLEANUP_CMD="${CLEANUP_CMD}; sed -i '/^[0-9.]\\+[[:space:]]\\+host\\.docker\\.internal\$/d' /etc/hosts 2>/dev/null || true"
echo "[entrypoint] host.docker.internal will be removed from /etc/hosts on exit"
fi
# Clean up /tmp/awf-lib if anything was copied (one-shot-token, CA cert, key helper)
if [ -n "${ONE_SHOT_TOKEN_LIB}" ] || [ -n "${AWF_CA_CHROOT}" ] || [ -n "${CHROOT_KEY_HELPER}" ]; then
CLEANUP_CMD="${CLEANUP_CMD}; rm -rf /tmp/awf-lib 2>/dev/null || true"
fi
# Transfer ownership of gh-aw config directories to the chroot user.
# On self-hosted runners these directories are created by the host-side
# gh-aw tooling as root, so the unprivileged chroot user cannot access them.
# We use chown (not chmod a+rwX) to avoid making these dirs world-writable,
# which would be a security risk on multi-user self-hosted runners.
if [ -d /host/tmp/gh-aw ]; then
if chown -R "${HOST_UID}:${HOST_GID}" /host/tmp/gh-aw 2>/dev/null; then
echo "[entrypoint] Transferred /host/tmp/gh-aw ownership to chroot user (${HOST_UID}:${HOST_GID})"
else
echo "[entrypoint][WARN] Failed to transfer /host/tmp/gh-aw ownership to chroot user"
fi
fi
# Handle safe-outputs directory (path varies by gh-aw version)
if [ -n "${GH_AW_SAFE_OUTPUTS:-}" ]; then
_so_dir="/host$(dirname "$GH_AW_SAFE_OUTPUTS")"
if [ -d "$_so_dir" ]; then
if chown -R "${HOST_UID}:${HOST_GID}" "$_so_dir" 2>/dev/null; then
echo "[entrypoint] Transferred $_so_dir ownership to chroot user (${HOST_UID}:${HOST_GID})"
else
echo "[entrypoint][WARN] Failed to transfer $_so_dir ownership to chroot user"
fi
fi
fi
# Ensure ~/.gemini exists and is owned by the chroot user.
# If Docker created this directory as root:root (because it did not exist on the
# host at container start time), the Gemini CLI cannot write its project registry
# (atomic rename of projects.json.tmp → projects.json fails with ENOENT).
# AWF pre-creates this directory host-side in writeConfigs(), but on first run or
# after a previous failed run the directory may still be root-owned.
# We fix ownership here (as root, before privilege drop) as a defense-in-depth measure.
GEMINI_DIR="/host${HOME}/.gemini"
mkdir -p "${GEMINI_DIR}" 2>/dev/null || true
if chown "${HOST_UID}:${HOST_GID}" "${GEMINI_DIR}" 2>/dev/null; then
echo "[entrypoint] Ensured ~/.gemini ownership for chroot user (${HOST_UID}:${HOST_GID})"
else
echo "[entrypoint][WARN] Could not set ~/.gemini ownership to chroot user (${HOST_UID}:${HOST_GID})"
fi
# Build LD_PRELOAD command for one-shot token protection
LD_PRELOAD_CMD=""
if [ -n "${ONE_SHOT_TOKEN_LIB}" ]; then
LD_PRELOAD_CMD="export LD_PRELOAD=${ONE_SHOT_TOKEN_LIB};"
fi
# Setup signal handler to forward signals to agent process and perform cleanup
cleanup_and_exit() {
if [ -n "$AGENT_PID" ]; then
kill -TERM "$AGENT_PID" 2>/dev/null || true
wait "$AGENT_PID" 2>/dev/null || true
fi
exit 143 # Standard exit code for SIGTERM
}
trap cleanup_and_exit TERM INT
# SECURITY: Run agent command in background, then unset tokens from parent shell
# This prevents tokens from being accessible via /proc/1/environ after agent starts
# The one-shot-token library caches tokens in the agent process, so agent can still read them
chroot /host /bin/bash -c "
cd '${CHROOT_WORKDIR}' 2>/dev/null || cd /
trap '${CLEANUP_CMD}' EXIT
${LD_PRELOAD_CMD}
exec capsh --drop=${CAPS_TO_DROP} --user=${HOST_USER} -- -c 'exec ${SCRIPT_FILE}'
" &
AGENT_PID=$!
# Wait for agent to initialize and cache tokens (up to 1 second)
# The one-shot-token LD_PRELOAD library caches tokens in ~100ms during process init.
# Poll every 100ms so fast commands (e.g. 'echo ok') don't pay the full wait.
for _i in 1 2 3 4 5 6 7 8 9 10; do
kill -0 "$AGENT_PID" 2>/dev/null || break
sleep 0.1
done
# Unset all sensitive tokens from parent shell environment
unset_sensitive_tokens
# Wait for agent command to complete and capture its exit code
wait $AGENT_PID
EXIT_CODE=$?
trap - TERM INT
exit $EXIT_CODE
else
# Original behavior - run in container filesystem
# Drop capabilities and privileges, then execute the user command
# Activate gh CLI proxy wrapper in non-chroot mode.
# Copy the wrapper to /tmp/awf-lib/gh so it takes precedence over
# the system gh at /usr/bin/gh (since /tmp/awf-lib is prepended to PATH).
if [ -n "$AWF_CLI_PROXY_URL" ] && [ -f /usr/local/bin/gh-cli-proxy-wrapper.sh ]; then
mkdir -p /tmp/awf-lib
if cp /usr/local/bin/gh-cli-proxy-wrapper.sh /tmp/awf-lib/gh 2>/dev/null && \
chmod +x /tmp/awf-lib/gh 2>/dev/null; then
export PATH="/tmp/awf-lib:${PATH}"
echo "[entrypoint] gh CLI proxy wrapper installed at /tmp/awf-lib/gh"
else
echo "[entrypoint][WARN] Could not install gh CLI proxy wrapper"
fi
fi
# This prevents malicious code from modifying iptables rules or using chroot
# Security note: capsh --drop removes capabilities from the bounding set,
# preventing any process (even if it escalates to root) from acquiring them
# The order of operations:
# 1. capsh drops capabilities from the bounding set (cannot be regained)
# 2. gosu switches to awfuser (drops root privileges)
# 3. Execute the user command (NOT using exec, so we can unset tokens after)
#
# Enable one-shot token protection - tokens are cached in memory and
# unset from the environment so /proc/self/environ is cleared
export LD_PRELOAD=/usr/local/lib/one-shot-token.so
# Setup signal handler to forward signals to agent process and perform cleanup
cleanup_and_exit() {
if [ -n "$AGENT_PID" ]; then
kill -TERM "$AGENT_PID" 2>/dev/null || true
wait "$AGENT_PID" 2>/dev/null || true
fi
exit 143 # Standard exit code for SIGTERM
}
trap cleanup_and_exit TERM INT
# SECURITY: Run agent command in background, then unset tokens from parent shell
# This prevents tokens from being accessible via /proc/1/environ after agent starts
# The one-shot-token library caches tokens in the agent process, so agent can still read them
if [ -n "$CAPS_TO_DROP" ]; then
capsh --drop=$CAPS_TO_DROP -- -c "exec gosu awfuser $(printf '%q ' "$@")" &
else
# No capabilities to drop - just switch to unprivileged user
gosu awfuser "$@" &
fi
AGENT_PID=$!
# Wait for agent to initialize and cache tokens (up to 1 second)
# The one-shot-token LD_PRELOAD library caches tokens in ~100ms during process init.
# Poll every 100ms so fast commands (e.g. 'echo ok') don't pay the full wait.
for _i in 1 2 3 4 5 6 7 8 9 10; do
kill -0 "$AGENT_PID" 2>/dev/null || break
sleep 0.1
done
# Unset all sensitive tokens from parent shell environment
unset_sensitive_tokens
# Wait for agent command to complete and capture its exit code
wait $AGENT_PID
EXIT_CODE=$?
trap - TERM INT
exit $EXIT_CODE
fi