|
1 | | -""" |
2 | | -Example usage of E2B REPL with code execution and LLM queries. |
3 | | -
|
4 | | -Run with: python -m examples.e2b_repl_example |
5 | | -
|
6 | | -Requires E2B API key set in environment: |
7 | | - export E2B_API_KEY=your_api_key |
8 | | -""" |
9 | | - |
10 | | -from rlm.clients.base_lm import BaseLM |
11 | | -from rlm.core.lm_handler import LMHandler |
12 | | -from rlm.core.types import ModelUsageSummary, UsageSummary |
13 | | -from rlm.environments.e2b_repl import E2BREPL |
14 | | - |
15 | | - |
16 | | -class MockLM(BaseLM): |
17 | | - """Simple mock LM that echoes prompts.""" |
18 | | - |
19 | | - def __init__(self): |
20 | | - super().__init__(model_name="mock-model") |
21 | | - |
22 | | - def completion(self, prompt): |
23 | | - return f"Mock response to: {prompt[:50]}" |
24 | | - |
25 | | - async def acompletion(self, prompt): |
26 | | - return self.completion(prompt) |
27 | | - |
28 | | - def get_usage_summary(self): |
29 | | - return UsageSummary( |
30 | | - model_usage_summaries={ |
31 | | - "mock-model": ModelUsageSummary( |
32 | | - total_calls=1, total_input_tokens=10, total_output_tokens=10 |
33 | | - ) |
34 | | - } |
35 | | - ) |
36 | | - |
37 | | - def get_last_usage(self): |
38 | | - return self.get_usage_summary() |
39 | | - |
40 | | - |
41 | | -def main(): |
42 | | - print("=" * 60) |
43 | | - print("E2B REPL Example") |
44 | | - print("=" * 60) |
45 | | - |
46 | | - # Example 1: Basic code execution |
47 | | - print("\n[1] Basic code execution (no LLM handler)") |
48 | | - print("-" * 40) |
49 | | - |
50 | | - with E2BREPL(timeout=300) as repl: |
51 | | - print(f"E2B sandbox created, ID: {repl.sandbox_id}") |
52 | | - |
53 | | - result = repl.execute_code("x = 1 + 2") |
54 | | - print("Executed: x = 1 + 2") |
55 | | - print(f"Locals: {result.locals}") |
56 | | - |
57 | | - result = repl.execute_code("print(x * 2)") |
58 | | - print("Executed: print(x * 2)") |
59 | | - print(f"Stdout: {result.stdout.strip()}") |
60 | | - |
61 | | - result = repl.execute_code("answer = 42") |
62 | | - result = repl.execute_code('print(FINAL_VAR("answer"))') |
63 | | - print(f"FINAL_VAR('answer'): {result.stdout.strip()}") |
64 | | - |
65 | | - # Example 2: With LLM handler |
66 | | - print("\n[2] Code execution with LLM handler") |
67 | | - print("-" * 40) |
68 | | - |
69 | | - mock_client = MockLM() |
70 | | - |
71 | | - with LMHandler(client=mock_client) as handler: |
72 | | - print(f"LM Handler started at {handler.address}") |
73 | | - |
74 | | - with E2BREPL( |
75 | | - timeout=300, |
76 | | - lm_handler_address=handler.address, |
77 | | - ) as repl: |
78 | | - print(f"E2B sandbox created, ID: {repl.sandbox_id}") |
79 | | - print(f"Broker URL: {repl.broker_url}") |
80 | | - |
81 | | - # Single LLM query |
82 | | - result = repl.execute_code('response = llm_query("What is 2+2?")') |
83 | | - print("Executed: response = llm_query('What is 2+2?')") |
84 | | - print(f"Stderr: {result.stderr or '(none)'}") |
85 | | - |
86 | | - result = repl.execute_code("print(response)") |
87 | | - print(f"Response: {result.stdout.strip()}") |
88 | | - |
89 | | - # Batched LLM query |
90 | | - result = repl.execute_code( |
91 | | - 'responses = llm_query_batched(["Question 1", "Question 2", "Question 3"])' |
92 | | - ) |
93 | | - print("\nExecuted: responses = llm_query_batched([...])") |
94 | | - |
95 | | - result = repl.execute_code("print(f'Got {len(responses)} responses')") |
96 | | - print(f"Result: {result.stdout.strip()}") |
97 | | - |
98 | | - result = repl.execute_code("print(responses[0])") |
99 | | - print(f"First response: {result.stdout.strip()}") |
100 | | - |
101 | | - print("\n" + "=" * 60) |
102 | | - print("Done!") |
103 | | - print("=" * 60) |
104 | | - |
105 | | - |
106 | | -if __name__ == "__main__": |
107 | | - main() |
| 1 | +import os |
| 2 | + |
| 3 | +from dotenv import load_dotenv |
| 4 | + |
| 5 | +from rlm import RLM |
| 6 | +from rlm.logger import RLMLogger |
| 7 | + |
| 8 | +load_dotenv() |
| 9 | + |
| 10 | +logger = RLMLogger(log_dir="./logs") |
| 11 | + |
| 12 | +rlm = RLM( |
| 13 | + backend="openai", |
| 14 | + backend_kwargs={ |
| 15 | + "api_key": os.getenv("OPENAI_API_KEY"), |
| 16 | + "model_name": "gpt-5-nano", |
| 17 | + }, |
| 18 | + environment="e2b", |
| 19 | + environment_kwargs={ |
| 20 | + "timeout": 300, |
| 21 | + }, |
| 22 | + max_depth=1, |
| 23 | + logger=logger, |
| 24 | + verbose=True, |
| 25 | +) |
| 26 | + |
| 27 | +result = rlm.completion("Using your code, solve 2^(2^(2^(2))). Show your work in Python.") |
| 28 | +print(result.response) |
0 commit comments