Skip to content

Commit c3ea42d

Browse files
committed
feat: upgrade openai client to v1.0+
1 parent 1c92dbe commit c3ea42d

File tree

5 files changed

+74
-80
lines changed

5 files changed

+74
-80
lines changed

agentverse/environments/tasksolving_env/rules/executor/tool_using.py

+34-34
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import json
22
import ast
3-
import openai
3+
from openai import AsyncOpenAI
4+
5+
aclient = AsyncOpenAI()
46
from string import Template
57
from colorama import Fore
68
from aiohttp import ClientSession
@@ -219,43 +221,41 @@ async def _summarize_webpage(webpage, question):
219221
)
220222
for _ in range(3):
221223
try:
222-
response = await openai.ChatCompletion.acreate(
223-
messages=[{"role": "user", "content": summarize_prompt}],
224-
model="gpt-3.5-turbo-16k",
225-
functions=[
226-
{
227-
"name": "parse_web_text",
228-
"description": "Parse the text of the webpage based on tthe question. Extract all related infomation about `Question` from the webpage. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!",
229-
"parameters": {
230-
"type": "object",
231-
"properties": {
232-
"summary": {
233-
"type": "string",
234-
"description": "Summary of the webpage with 50 words. Make sure all important information about `Question` is included. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!",
235-
},
236-
"related_details": {
224+
response = await aclient.chat.completions.create(messages=[{"role": "user", "content": summarize_prompt}],
225+
model="gpt-3.5-turbo-16k",
226+
functions=[
227+
{
228+
"name": "parse_web_text",
229+
"description": "Parse the text of the webpage based on tthe question. Extract all related infomation about `Question` from the webpage. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!",
230+
"parameters": {
231+
"type": "object",
232+
"properties": {
233+
"summary": {
234+
"type": "string",
235+
"description": "Summary of the webpage with 50 words. Make sure all important information about `Question` is included. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!",
236+
},
237+
"related_details": {
238+
"type": "string",
239+
"description": "List all webpage details related to the question. Maximum 400 words. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!",
240+
},
241+
"useful_hyperlinks": {
242+
"type": "array",
243+
"description": "Maximum 3 items. Select useful hyperlinks in the webpage that related to the question. Make sure the url is useful for further browse. Don't provide repeated hyperlinks.",
244+
"items": {
237245
"type": "string",
238-
"description": "List all webpage details related to the question. Maximum 400 words. ! Don't provide information that is not shown in the webpage! ! Don't provide your own opinion!",
239-
},
240-
"useful_hyperlinks": {
241-
"type": "array",
242-
"description": "Maximum 3 items. Select useful hyperlinks in the webpage that related to the question. Make sure the url is useful for further browse. Don't provide repeated hyperlinks.",
243-
"items": {
244-
"type": "string",
245-
"description": "! Don't provide hyperlinks that is not shown in the webpage! ! Don't provide your own opinion!",
246-
},
246+
"description": "! Don't provide hyperlinks that is not shown in the webpage! ! Don't provide your own opinion!",
247247
},
248248
},
249-
"required": [
250-
"summary",
251-
"related_details",
252-
"useful_hyperlinks",
253-
],
254249
},
255-
}
256-
],
257-
function_call={"name": "parse_web_text"},
258-
)
250+
"required": [
251+
"summary",
252+
"related_details",
253+
"useful_hyperlinks",
254+
],
255+
},
256+
}
257+
],
258+
function_call={"name": "parse_web_text"})
259259
except Exception as e:
260260
logger.error("Failed to call the tool. Exception: " + str(e))
261261
continue

agentverse/llms/openai.py

+26-32
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,20 @@
1818
from .utils.jsonrepair import JsonRepair
1919

2020
try:
21-
import openai
21+
from openai import AzureOpenAI, AsyncAzureOpenAI
22+
23+
client = AzureOpenAI(api_key=os.environ.get("OPENAI_API_KEY"),
24+
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
25+
azure_endpoint=os.environ.get("AZURE_OPENAI_API_BASE"),
26+
api_version="2023-05-15",
27+
azure_endpoint="http://localhost:5000/v1",
28+
api_key="EMPTY")
29+
aclient = AsyncAzureOpenAI(api_key=os.environ.get("OPENAI_API_KEY"),
30+
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
31+
azure_endpoint=os.environ.get("AZURE_OPENAI_API_BASE"),
32+
api_version="2023-05-15",
33+
azure_endpoint="http://localhost:5000/v1",
34+
api_key="EMPTY")
2235
from openai.error import OpenAIError
2336
except ImportError:
2437
is_openai_available = False
@@ -28,13 +41,8 @@
2841
# if openai.proxy is None:
2942
# openai.proxy = os.environ.get("HTTP_PROXY")
3043
if os.environ.get("OPENAI_API_KEY") != None:
31-
openai.api_key = os.environ.get("OPENAI_API_KEY")
3244
is_openai_available = True
3345
elif os.environ.get("AZURE_OPENAI_API_KEY") != None:
34-
openai.api_type = "azure"
35-
openai.api_key = os.environ.get("AZURE_OPENAI_API_KEY")
36-
openai.api_base = os.environ.get("AZURE_OPENAI_API_BASE")
37-
openai.api_version = "2023-05-15"
3846
is_openai_available = True
3947
else:
4048
logger.warn(
@@ -112,8 +120,6 @@ def __init__(self, max_retry: int = 3, **kwargs):
112120
if len(kwargs) > 0:
113121
logger.warn(f"Unused arguments: {kwargs}")
114122
if args["model"] in LOCAL_LLMS:
115-
openai.api_base = "http://localhost:5000/v1"
116-
openai.api_key = "EMPTY"
117123
super().__init__(args=args, max_retry=max_retry)
118124

119125
@classmethod
@@ -148,11 +154,9 @@ def generate_response(
148154
try:
149155
# Execute function call
150156
if functions != []:
151-
response = openai.ChatCompletion.create(
152-
messages=messages,
153-
functions=functions,
154-
**self.args.dict(),
155-
)
157+
response = client.chat.completions.create(messages=messages,
158+
functions=functions,
159+
**self.args.dict())
156160
if response["choices"][0]["message"].get("function_call") is not None:
157161
self.collect_metrics(response)
158162
return LLMResult(
@@ -179,10 +183,8 @@ def generate_response(
179183
)
180184

181185
else:
182-
response = openai.ChatCompletion.create(
183-
messages=messages,
184-
**self.args.dict(),
185-
)
186+
response = client.chat.completions.create(messages=messages,
187+
**self.args.dict())
186188
self.collect_metrics(response)
187189
return LLMResult(
188190
content=response["choices"][0]["message"]["content"],
@@ -212,11 +214,9 @@ async def agenerate_response(
212214
if functions != []:
213215
async with ClientSession(trust_env=True) as session:
214216
openai.aiosession.set(session)
215-
response = await openai.ChatCompletion.acreate(
216-
messages=messages,
217-
functions=functions,
218-
**self.args.dict(),
219-
)
217+
response = await aclient.chat.completions.create(messages=messages,
218+
functions=functions,
219+
**self.args.dict())
220220
if response["choices"][0]["message"].get("function_call") is not None:
221221
function_name = response["choices"][0]["message"]["function_call"][
222222
"name"
@@ -280,10 +280,8 @@ async def agenerate_response(
280280
else:
281281
async with ClientSession(trust_env=True) as session:
282282
openai.aiosession.set(session)
283-
response = await openai.ChatCompletion.acreate(
284-
messages=messages,
285-
**self.args.dict(),
286-
)
283+
response = await aclient.chat.completions.create(messages=messages,
284+
**self.args.dict())
287285
self.collect_metrics(response)
288286
return LLMResult(
289287
content=response["choices"][0]["message"]["content"],
@@ -352,13 +350,9 @@ def get_embedding(text: str, attempts=3) -> np.array:
352350
try:
353351
text = text.replace("\n", " ")
354352
if openai.api_type == "azure":
355-
embedding = openai.Embedding.create(
356-
input=[text], deployment_id="text-embedding-ada-002"
357-
)["data"][0]["embedding"]
353+
embedding = client.embeddings.create(input=[text], deployment_id="text-embedding-ada-002")["data"][0]["embedding"]
358354
else:
359-
embedding = openai.Embedding.create(
360-
input=[text], model="text-embedding-ada-002"
361-
)["data"][0]["embedding"]
355+
embedding = client.embeddings.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
362356
return tuple(embedding)
363357
except Exception as e:
364358
attempt += 1

agentverse/memory/chat_history.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
import json
22
import logging
33
import os
4-
import openai
4+
from openai import AsyncOpenAI
5+
6+
aclient = AsyncOpenAI()
57
import copy
68
from typing import List, Optional, Tuple, Dict
79

@@ -206,12 +208,10 @@ async def _update_summary_with_batch(
206208
summary=self.summary, new_events=new_events_batch
207209
)
208210

209-
self.summary = await openai.ChatCompletion.acreate(
210-
messages=[{"role": "user", "content": prompt}],
211-
model=model,
212-
max_tokens=max_summary_length,
213-
temperature=0.5,
214-
)["choices"][0]["message"]["content"]
211+
self.summary = await aclient.chat.completions.create(messages=[{"role": "user", "content": prompt}],
212+
model=model,
213+
max_tokens=max_summary_length,
214+
temperature=0.5)["choices"][0]["message"]["content"]
215215

216216
def summary_message(self) -> dict:
217217
return {

requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ fastapi==0.95.1
33
uvicorn
44
py3langid
55
setuptools-scm
6-
openai==0.27.8
6+
openai==1.6.1
77
opencv-python==4.8.0.76
88
gradio
99
httpx[socks]==0.25.0

scripts/evaluate_responsegen.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
import json
33
from string import Template
44
import time
5-
import openai
5+
from openai import OpenAI
6+
7+
client = OpenAI()
68
from tqdm import tqdm
79

810
with open("./results.jsonl", "r") as f:
@@ -50,11 +52,9 @@ def write_eval_to_file(file, skip=0):
5052
)
5153
for i in range(100):
5254
try:
53-
eval_response = openai.ChatCompletion.create(
54-
model="gpt-4",
55-
messages=[{"role": "user", "content": prompt}],
56-
temperature=0.0,
57-
)
55+
eval_response = client.chat.completions.create(model="gpt-4",
56+
messages=[{"role": "user", "content": prompt}],
57+
temperature=0.0)
5858
except:
5959
time.sleep(min(i**2, 60))
6060
continue

0 commit comments

Comments
 (0)