Skip to content
Merged
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@anthropic-ai/sandbox-runtime",
"version": "0.0.44",
"version": "0.0.45",
"description": "Anthropic Sandbox Runtime (ASRT) - A general-purpose tool for wrapping security boundaries around arbitrary processes",
"type": "module",
"main": "./dist/index.js",
Expand Down
49 changes: 45 additions & 4 deletions src/sandbox/linux-sandbox-utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,13 @@ const generatedSeccompFilters: Set<string> = new Set()
// be cleaned up explicitly.
const bwrapMountPoints: Set<string> = new Set()

// Number of wrapped commands that have been generated but whose cleanup has
// not yet run. cleanupBwrapMountPoints() defers file deletion while this is
// positive, because deleting a mount point file on the host while another
// bwrap instance is still running detaches that instance's bind mount and
// the deny rule stops applying inside it.
let activeSandboxCount = 0

let exitHandlerRegistered = false

/**
Expand All @@ -308,7 +315,7 @@ function registerExitCleanupHandler(): void {
// Ignore cleanup errors during exit
}
}
cleanupBwrapMountPoints()
cleanupBwrapMountPoints({ force: true })
})

exitHandlerRegistered = true
Expand All @@ -325,10 +332,31 @@ function registerExitCleanupHandler(): void {
* ghost dotfiles (e.g. .bashrc, .gitconfig) from appearing in the working
* directory. It is also called automatically on process exit as a safety net.
*
* Safe to call at any time — it only removes files that were tracked during
* generateFilesystemArgs() and skips any that no longer exist.
* Each call decrements the active-sandbox counter that was incremented by
* wrapCommandWithSandboxLinux(). File deletion is deferred until the counter
* reaches zero. Deleting a mount point file on the host while another bwrap
* instance is still running detaches that instance's bind mount (the dentry
* is unhashed, so path lookup no longer finds the mount) and the deny rule
* stops applying inside that sandbox.
*
* Pass `{ force: true }` to delete unconditionally — used by the process-exit
* handler and reset() where deferral is not meaningful.
*/
export function cleanupBwrapMountPoints(): void {
export function cleanupBwrapMountPoints(opts?: { force?: boolean }): void {
if (!opts?.force) {
if (activeSandboxCount > 0) {
activeSandboxCount--
}
if (activeSandboxCount > 0) {
logForDebugging(
`[Sandbox Linux] Deferring mount point cleanup — ${activeSandboxCount} sandbox(es) still active`,
)
return
}
} else {
activeSandboxCount = 0
}

for (const mountPoint of bwrapMountPoints) {
try {
// Only remove if it's still the empty file/directory bwrap created.
Expand Down Expand Up @@ -1030,6 +1058,14 @@ export async function wrapCommandWithSandboxLinux(
return command
}

// Mark this sandbox invocation as active. cleanupBwrapMountPoints() will
// defer file deletion until this (and every other concurrent) invocation
// has been cleaned up. The matching decrement happens in
// cleanupBwrapMountPoints(), which the caller must invoke after the
// spawned command exits. If wrapping fails below, the catch block
// decrements so the count does not leak.
activeSandboxCount++

const bwrapArgs: string[] = ['--new-session', '--die-with-parent']
let seccompFilterPath: string | undefined = undefined

Expand Down Expand Up @@ -1234,6 +1270,11 @@ export async function wrapCommandWithSandboxLinux(

return wrappedCommand
} catch (error) {
// Undo the activeSandboxCount increment — the caller won't call
// cleanupBwrapMountPoints() for a wrap that threw.
if (activeSandboxCount > 0) {
activeSandboxCount--
}
// Clean up seccomp filter on error
if (seccompFilterPath && !seccompFilterPath.includes('/vendor/seccomp/')) {
generatedSeccompFilters.delete(seccompFilterPath)
Expand Down
5 changes: 3 additions & 2 deletions src/sandbox/sandbox-manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -755,8 +755,9 @@ function cleanupAfterCommand(): void {
}

async function reset(): Promise<void> {
// Clean up any leftover bwrap mount points
cleanupAfterCommand()
// Clean up any leftover bwrap mount points. Force past the
// active-sandbox counter — reset() means the session is over.
cleanupBwrapMountPoints({ force: true })

// Stop log monitor
if (logMonitorShutdown) {
Expand Down
220 changes: 218 additions & 2 deletions test/sandbox/mandatory-deny-paths.test.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
import { describe, it, expect, beforeAll, afterAll, beforeEach } from 'bun:test'
import { spawnSync } from 'node:child_process'
import {
describe,
it,
expect,
beforeAll,
afterAll,
beforeEach,
afterEach,
} from 'bun:test'
import { spawn, spawnSync } from 'node:child_process'
import {
mkdirSync,
rmSync,
Expand Down Expand Up @@ -124,6 +132,14 @@ describe('Mandatory Deny Paths - Integration Tests', () => {
process.chdir(TEST_DIR)
})

afterEach(() => {
if (skipIfUnsupportedPlatform()) return
// Reset the active-sandbox counter and scrub any leftover mount points so
// each test starts clean. Tests that don't explicitly call
// cleanupBwrapMountPoints() would otherwise leak the counter.
cleanupBwrapMountPoints({ force: true })
})

async function runSandboxedWrite(
filePath: string,
content: string,
Expand Down Expand Up @@ -675,6 +691,206 @@ describe('Mandatory Deny Paths - Integration Tests', () => {
cleanupBwrapMountPoints()
})

// --- Concurrent sandbox mount point cleanup ---
//
// When two sandboxed commands run concurrently and one finishes first,
// cleanupBwrapMountPoints() must NOT delete mount point files that the
// still-running sandbox depends on. Deleting a mountpoint's dentry on the
// host detaches the bind mount in the child namespace, so the deny rule
// stops applying inside the still-running sandbox.

it('defers mount point cleanup while another sandbox is still running', async () => {
if (getPlatform() !== 'linux') return

const raceDir = join(TEST_DIR, 'race-test')
mkdirSync(raceDir, { recursive: true })
mkdirSync(join(raceDir, '.claude'), { recursive: true })

const originalDir = process.cwd()
process.chdir(raceDir)

try {
const protectedFile = join(raceDir, '.claude', 'settings.json')
const writeConfig = {
allowOnly: ['.'],
denyWithinAllow: [protectedFile],
}

// Sandbox A: long-running command that sleeps then tries to write
// to the denied path. The write should be blocked.
// allowAllUnixSockets skips seccomp (environment-dependent) while
// keeping the filesystem isolation we're testing.
const wrappedA = await wrapCommandWithSandboxLinux({
command: `sleep 2; echo '{"hooks":{}}' > .claude/settings.json`,
needsNetworkRestriction: false,
readConfig: undefined,
writeConfig,
enableWeakerNestedSandbox: true,
allowAllUnixSockets: true,
})

const childA = spawn(wrappedA, { shell: true })
const exitA = new Promise<number | null>(resolve => {
childA.on('exit', code => resolve(code))
})

// Wait for bwrap A to start and create the mount point on the host
await new Promise(r => setTimeout(r, 500))
expect(existsSync(protectedFile)).toBe(true)

// Sandbox B: short command. When it finishes, the caller invokes
// cleanupBwrapMountPoints() — simulating the real-world race.
const wrappedB = await wrapCommandWithSandboxLinux({
command: 'true',
needsNetworkRestriction: false,
readConfig: undefined,
writeConfig,
enableWeakerNestedSandbox: true,
allowAllUnixSockets: true,
})
spawnSync(wrappedB, { shell: true, encoding: 'utf8', timeout: 10000 })

// This is what the caller does after every command completes.
// Without deferral, this would delete sandbox A's mount point too.
cleanupBwrapMountPoints()

// Wait for sandbox A to attempt its write
await exitA

// The deny rule must have held — the file should not contain the
// write from sandbox A. If cleanup had deleted the mount point
// early, A's bind mount would have detached and the write would
// have landed on the host.
const content = existsSync(protectedFile)
? readFileSync(protectedFile, 'utf8')
: ''
expect(content).not.toContain('hooks')

cleanupBwrapMountPoints()
} finally {
process.chdir(originalDir)
rmSync(raceDir, { recursive: true, force: true })
}
}, 15000)

it('defers cleanup when two sandboxes share the same non-existent deny path', async () => {
if (getPlatform() !== 'linux') return

const raceDir = join(TEST_DIR, 'race-test-2')
mkdirSync(raceDir, { recursive: true })
mkdirSync(join(raceDir, '.claude'), { recursive: true })

const originalDir = process.cwd()
process.chdir(raceDir)

try {
const protectedFile = join(raceDir, '.claude', 'settings.json')
const writeConfig = {
allowOnly: ['.'],
denyWithinAllow: [protectedFile],
}

// Generate both wrapped commands BEFORE spawning, so both see the
// deny path as non-existent and both add it to bwrapMountPoints.
const wrappedA = await wrapCommandWithSandboxLinux({
command: `sleep 2; echo WRITTEN > .claude/settings.json`,
needsNetworkRestriction: false,
readConfig: undefined,
writeConfig,
enableWeakerNestedSandbox: true,
allowAllUnixSockets: true,
})
const wrappedB = await wrapCommandWithSandboxLinux({
command: 'sleep 0.5',
needsNetworkRestriction: false,
readConfig: undefined,
writeConfig,
enableWeakerNestedSandbox: true,
allowAllUnixSockets: true,
})

const childA = spawn(wrappedA, { shell: true })
const exitA = new Promise<number | null>(resolve => {
childA.on('exit', code => resolve(code))
})

// Sandbox B runs and finishes first
spawnSync(wrappedB, { shell: true, encoding: 'utf8', timeout: 10000 })
cleanupBwrapMountPoints()

await exitA

const content = existsSync(protectedFile)
? readFileSync(protectedFile, 'utf8')
: ''
expect(content).not.toContain('WRITTEN')

cleanupBwrapMountPoints()
} finally {
process.chdir(originalDir)
rmSync(raceDir, { recursive: true, force: true })
}
}, 15000)

it('deferred cleanup runs once all concurrent sandboxes finish', async () => {
if (getPlatform() !== 'linux') return

const raceDir = join(TEST_DIR, 'race-test-3')
mkdirSync(raceDir, { recursive: true })
mkdirSync(join(raceDir, '.claude'), { recursive: true })

const originalDir = process.cwd()
process.chdir(raceDir)

try {
const protectedFile = join(raceDir, '.claude', 'settings.json')
const writeConfig = {
allowOnly: ['.'],
denyWithinAllow: [protectedFile],
}

const wrappedA = await wrapCommandWithSandboxLinux({
command: 'sleep 1',
needsNetworkRestriction: false,
readConfig: undefined,
writeConfig,
enableWeakerNestedSandbox: true,
allowAllUnixSockets: true,
})
const wrappedB = await wrapCommandWithSandboxLinux({
command: 'true',
needsNetworkRestriction: false,
readConfig: undefined,
writeConfig,
enableWeakerNestedSandbox: true,
allowAllUnixSockets: true,
})

const childA = spawn(wrappedA, { shell: true })
const exitA = new Promise<void>(resolve => {
childA.on('exit', () => resolve())
})

await new Promise(r => setTimeout(r, 300))
expect(existsSync(protectedFile)).toBe(true)

spawnSync(wrappedB, { shell: true, encoding: 'utf8', timeout: 10000 })
cleanupBwrapMountPoints()

// Cleanup deferred — mount point still present while A runs
expect(existsSync(protectedFile)).toBe(true)

await exitA
cleanupBwrapMountPoints()

// Both sandboxes done — mount point now cleaned up
expect(existsSync(protectedFile)).toBe(false)
} finally {
process.chdir(originalDir)
rmSync(raceDir, { recursive: true, force: true })
}
}, 15000)

it('non-existent .git/hooks deny does not turn .git into a file, breaking git', async () => {
if (getPlatform() !== 'linux') return

Expand Down
Loading