Skip to content

Commit e031082

Browse files
ziltozilto
zilto
authored and
zilto
committed
code generation chain added
1 parent 144b6db commit e031082

File tree

6 files changed

+131
-0
lines changed

6 files changed

+131
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# Purpose of this module
2+
3+
This module uses the OpenAI completion API to generate code.
4+
5+
For any language, you can request `generated_code` to get the generated response. If you are generating Python code, you can execute it in a subprocess by requesting `execution_output` and `execution_error`.
6+
7+
# Configuration Options
8+
## Config.when
9+
This module doesn't receive configurations.
10+
11+
## Inputs
12+
- `query`: The query for which you want code generated.
13+
- `api_key`: Set the OpenAI API key to use. If None, read the environment variable `OPENAI_API_KEY`
14+
- `code_language`: Set the code language to generate the reponse in. Defaults to `python`
15+
16+
## Overrides
17+
- `prompt_template_to_generate_code`: Create a new prompt template with the fields `query` and `code_language`.
18+
- `prompt_to_generate_code`: Manually provide a prompt to generate Python code
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
import logging
2+
import os
3+
import subprocess
4+
from typing import Optional
5+
6+
from hamilton.function_modifiers import extract_fields
7+
8+
logger = logging.getLogger(__name__)
9+
10+
from hamilton import contrib
11+
12+
with contrib.catch_import_errors(__name__, __file__, logger):
13+
import openai
14+
15+
16+
def llm_client(api_key: Optional[str] = None) -> openai.OpenAI:
17+
"""Create an OpenAI client"""
18+
if api_key is None:
19+
api_key = os.environ.get("OPENAI_API_KEY")
20+
21+
return openai.OpenAI(api_key=api_key)
22+
23+
24+
def prompt_template_to_generate_code() -> str:
25+
return """Write some {code_language} code to solve the user's problem.
26+
27+
Return only python code in Markdown format, e.g.:
28+
29+
```{code_language}
30+
....
31+
```
32+
33+
user problem
34+
{query}
35+
36+
{code_language} code
37+
"""
38+
39+
40+
def prompt_to_generate_code(
41+
prompt_template_to_generate_code: str, query: str, code_language: str = "python"
42+
) -> str:
43+
return prompt_template_to_generate_code.format(
44+
query=query,
45+
code_language=code_language,
46+
)
47+
48+
49+
def response_generated_code(llm_client: openai.OpenAI, prompt_to_generate_code: str) -> str:
50+
response = llm_client.completions.create(
51+
model="gpt-3.5-turbo-instruct",
52+
prompt=prompt_to_generate_code,
53+
)
54+
return response.choices[0].text
55+
56+
57+
def generated_code(response_generated_code: str) -> str:
58+
_, _, lower_part = response_generated_code.partition("```python")
59+
code_part, _, _ = lower_part.partition("```")
60+
return code_part
61+
62+
63+
def code_prepared_for_execution(generated_code: str, code_language: str = "python") -> str:
64+
if code_language != "python":
65+
raise ValueError("Can only execute the generated code if `code_language` = 'python'")
66+
67+
code_to_get_vars = """
68+
excluded_vars = { 'excluded_vars', '__builtins__', '__annotations__'} | set(dir(__builtins__))
69+
local_vars = {k:v for k,v in locals().items() if k not in excluded_vars}
70+
print(local_vars)
71+
"""
72+
return generated_code + code_to_get_vars
73+
74+
75+
@extract_fields(
76+
dict(
77+
execution_output=str,
78+
execution_error=str,
79+
)
80+
)
81+
def execute_output(code_prepared_for_execution: str) -> dict:
82+
process = subprocess.Popen(
83+
["python", "-c", code_prepared_for_execution],
84+
stdout=subprocess.PIPE,
85+
stderr=subprocess.PIPE,
86+
universal_newlines=True,
87+
)
88+
output, errors = process.communicate()
89+
return dict(execution_output=output, execution_error=errors)
90+
91+
92+
# run as a script to test dataflow
93+
if __name__ == "__main__":
94+
import __init__ as llm_generate_code
95+
96+
from hamilton import driver
97+
98+
dr = driver.Builder().with_modules(llm_generate_code).build()
99+
100+
dr.display_all_functions("dag.png", orient="TB")
101+
102+
res = dr.execute(
103+
["execution_output", "execution_error"],
104+
overrides=dict(generated_code="s = 'hello world'"),
105+
)
106+
107+
print(res)
Loading
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
openai
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"schema": "1.0",
3+
"use_case_tags": ["LLM", "OpenAI"],
4+
"secondary_tags": {}
5+
}

contrib/hamilton/contrib/user/zilto/llm_generate_code/valid_configs.jsonl

Whitespace-only changes.

0 commit comments

Comments
 (0)