Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 17 additions & 7 deletions rlm/environments/daytona_repl.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def respond():
# =============================================================================


def _build_exec_script(code: str, broker_port: int = 8080) -> str:
def _build_exec_script(code: str, broker_port: int = 8080, depth: int = 1) -> str:
"""
Build a script that executes code with state persistence.
LLM queries go through the local broker server.
Expand Down Expand Up @@ -185,7 +185,7 @@ def llm_query(prompt, model=None):
try:
response = requests.post(
f"{{BROKER_URL}}/enqueue",
json={{"type": "single", "prompt": prompt, "model": model}},
json={{"type": "single", "prompt": prompt, "model": model, "depth": {depth}}},
timeout=300,
)
data = response.json()
Expand All @@ -201,7 +201,7 @@ def llm_query_batched(prompts, model=None):
try:
response = requests.post(
f"{{BROKER_URL}}/enqueue",
json={{"type": "batched", "prompts": prompts, "model": model}},
json={{"type": "batched", "prompts": prompts, "model": model, "depth": {depth}}},
timeout=300,
)
data = response.json()
Expand Down Expand Up @@ -329,6 +329,8 @@ def __init__(
lm_handler_address: tuple[str, int] | None = None,
context_payload: dict | list | str | None = None,
setup_code: str | None = None,
persistent: bool = False,
depth: int = 1,
**kwargs,
):
"""
Expand All @@ -347,9 +349,15 @@ def __init__(
lm_handler_address: (host, port) tuple for LM Handler server.
context_payload: Initial context to load into the environment.
setup_code: Optional code to run during setup.
persistent: Whether to persist state across calls (not yet supported).
depth: Depth level for LLM request routing (used by LMHandler).
**kwargs: Additional arguments passed to base class.
"""
super().__init__(**kwargs)
if persistent:
raise NotImplementedError(
"Persistent REPLs are currently not supported for environment: DaytonaREPL"
)
super().__init__(persistent=persistent, depth=depth, **kwargs)

self.api_key = api_key or os.getenv("DAYTONA_API_KEY")
self.target = target
Expand Down Expand Up @@ -489,7 +497,7 @@ def _handle_llm_request(self, req_data: dict) -> dict:

if req_type == "single":
prompt = req_data.get("prompt")
request = LMRequest(prompt=prompt, model=model)
request = LMRequest(prompt=prompt, model=model, depth=self.depth)
response = send_lm_request(self.lm_handler_address, request)

if not response.success:
Expand All @@ -503,7 +511,9 @@ def _handle_llm_request(self, req_data: dict) -> dict:

elif req_type == "batched":
prompts = req_data.get("prompts", [])
responses = send_lm_request_batched(self.lm_handler_address, prompts, model=model)
responses = send_lm_request_batched(
self.lm_handler_address, prompts, model=model, depth=self.depth
)

results = []
for resp in responses:
Expand Down Expand Up @@ -539,7 +549,7 @@ def execute_code(self, code: str) -> REPLResult:
self.pending_llm_calls.clear()

# Build and execute the script
script = _build_exec_script(code, self.BROKER_PORT)
script = _build_exec_script(code, self.BROKER_PORT, self.depth)

# Upload the script as a temporary file
script_path = "/tmp/rlm_exec_script.py"
Expand Down