Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 112 additions & 0 deletions examples/daytona_repl_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
"""
Example usage of Daytona REPL with code execution and LLM queries.

Run with: python -m examples.daytona_repl_example
"""

from rlm.clients.base_lm import BaseLM
from rlm.core.lm_handler import LMHandler
from rlm.core.types import ModelUsageSummary, UsageSummary
from rlm.environments.daytona_repl import DaytonaREPL


class MockLM(BaseLM):
"""Simple mock LM that echoes prompts."""

def __init__(self):
super().__init__(model_name="mock-model")

def completion(self, prompt):
return f"Mock response to: {prompt[:50]}"

async def acompletion(self, prompt):
return self.completion(prompt)

def get_usage_summary(self):
return UsageSummary(
model_usage_summaries={
"mock-model": ModelUsageSummary(
total_calls=1, total_input_tokens=10, total_output_tokens=10
)
}
)

def get_last_usage(self):
return self.get_usage_summary()


def main():
print("=" * 60)
print("Daytona REPL Example")
print("=" * 60)

# Note: Requires DAYTONA_API_KEY environment variable to be set
# or passed explicitly to DaytonaREPL(api_key="...")

# Example 1: Basic code execution
print("\n[1] Basic code execution (no LLM handler)")
print("-" * 40)

try:
with DaytonaREPL(name="rlm-example") as repl:
print(f"Daytona sandbox started with ID: {repl.sandbox.id}")
result = repl.execute_code("x = 1 + 2")
print("Executed: x = 1 + 2")
print(f"Locals: {result.locals}")

result = repl.execute_code("print(x * 2)")
print("Executed: print(x * 2)")
print(f"Stdout: {result.stdout.strip()}")

result = repl.execute_code("answer = 42")
result = repl.execute_code('print(FINAL_VAR("answer"))')
print(f"FINAL_VAR('answer'): {result.stdout.strip()}")

# Example 2: With LLM handler
print("\n[2] Code execution with LLM handler")
print("-" * 40)

mock_client = MockLM()

with LMHandler(client=mock_client) as handler:
print(f"LM Handler started at {handler.address}")

with DaytonaREPL(
name="rlm-example-handler",
lm_handler_address=handler.address,
) as repl:
print(f"Daytona sandbox started with ID: {repl.sandbox.id}")
print(f"Broker URL: {repl.broker_url}")

# Single LLM query
result = repl.execute_code('response = llm_query("What is 2+2?")')
print("Executed: response = llm_query('What is 2+2?')")
if result.stderr:
print(f"Stderr: {result.stderr}")

result = repl.execute_code("print(response)")
print(f"Response: {result.stdout.strip()}")

# Batched LLM query
result = repl.execute_code(
'responses = llm_query_batched(["Question 1", "Question 2", "Question 3"])'
)
print("\nExecuted: responses = llm_query_batched([...])")

result = repl.execute_code("print(f'Got {len(responses)} responses')")
print(f"Result: {result.stdout.strip()}")

result = repl.execute_code("print(responses[0])")
print(f"First response: {result.stdout.strip()}")

except Exception as e:
print(f"Error: {e}")
print("\nMake sure Daytona is configured correctly and DAYTONA_API_KEY is set.")

print("\n" + "=" * 60)
print("Done!")
print("=" * 60)


if __name__ == "__main__":
main()
4 changes: 2 additions & 2 deletions examples/quickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@
"model_name": "gpt-5-nano",
"api_key": os.getenv("OPENAI_API_KEY"),
},
environment="local",
environment="docker",
environment_kwargs={},
max_depth=1,
logger=logger,
verbose=True, # For printing to console with rich, disabled by default.
)

result = rlm.completion("Print me the first 100 powers of two, each on a newline.")
result = rlm.completion("Print me the first 5 powers of two, each on a newline.")

print(result)
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ dependencies = [

[project.optional-dependencies]
modal = ["modal>=0.73.0", "dill>=0.3.7"]
daytona = ["daytona>=0.128.1", "dill>=0.3.7"]
prime = ["prime-sandboxes>=0.2.0", "dill>=0.3.7"]

[build-system]
Expand Down
2 changes: 1 addition & 1 deletion rlm/core/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
"azure_openai",
"gemini",
]
EnvironmentType = Literal["local", "docker", "modal", "prime"]
EnvironmentType = Literal["local", "docker", "modal", "prime", "daytona"]


def _serialize_value(value: Any) -> Any:
Expand Down
10 changes: 7 additions & 3 deletions rlm/environments/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@


def get_environment(
environment: Literal["local", "modal", "docker", "prime"],
environment: Literal["local", "modal", "docker", "daytona", "prime"],
environment_kwargs: dict[str, Any],
) -> BaseEnv:
"""
Routes a specific environment and the args (as a dict) to the appropriate environment if supported.
Currently supported environments: ['local', 'modal', 'docker', 'prime']
Currently supported environments: ['local', 'modal', 'docker', 'daytona', 'prime']
"""
if environment == "local":
return LocalREPL(**environment_kwargs)
Expand All @@ -24,11 +24,15 @@ def get_environment(
from rlm.environments.docker_repl import DockerREPL

return DockerREPL(**environment_kwargs)
elif environment == "daytona":
from rlm.environments.daytona_repl import DaytonaREPL

return DaytonaREPL(**environment_kwargs)
elif environment == "prime":
from rlm.environments.prime_repl import PrimeREPL

return PrimeREPL(**environment_kwargs)
else:
raise ValueError(
f"Unknown environment: {environment}. Supported: ['local', 'modal', 'docker', 'prime']"
f"Unknown environment: {environment}. Supported: ['local', 'modal', 'docker', 'daytona', 'prime']"
)
Loading