Skip to content

Commit 80151dd

Browse files
authored
Release v0.4.3 (#4802)
2 parents 3e697d5 + 4452283 commit 80151dd

File tree

108 files changed

+2422
-3059
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

108 files changed

+2422
-3059
lines changed

.env.template

+5-1
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,14 @@ OPENAI_API_KEY=your-openai-api-key
2525
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
2626
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
2727

28-
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
28+
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
2929
# the following is an example:
3030
# OPENAI_API_BASE_URL=http://localhost:443/v1
3131

32+
## OPENAI_FUNCTIONS - Enables OpenAI functions: https://platform.openai.com/docs/guides/gpt/function-calling
33+
## WARNING: this feature is only supported by OpenAI's newest models. Until these models become the default on 27 June, add a '-0613' suffix to the model of your choosing.
34+
# OPENAI_FUNCTIONS=False
35+
3236
## AUTHORISE COMMAND KEY - Key to authorise commands
3337
# AUTHORISE_COMMAND_KEY=y
3438

.gitignore

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ __pycache__/
3131
build/
3232
develop-eggs/
3333
dist/
34-
plugins/
34+
/plugins/
3535
plugins_config.yaml
3636
downloads/
3737
eggs/

BULLETIN.md

+9-14
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,15 @@ Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core
88
Check out the contribution guide on our wiki:
99
https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
1010

11-
# 🚀 v0.4.1 Release 🚀
12-
Two weeks and 50+ pull requests have passed since v0.4.0, and we are happy to announce the release of v0.4.1!
13-
14-
Highlights and notable changes since v0.4.0:
15-
- The .env.template is more readable and better explains the purpose of each environment variable.
16-
- More dependable search
17-
- The CUSTOM_SEARCH_ENGINE_ID variable has been replaced to GOOGLE_CUSTOM_SEARCH_ENGINE_ID, make sure you update it.
18-
- Better read_file
19-
- More reliable python code execution
20-
- Lots of JSON error fixes
21-
- Directory-based plugins
22-
23-
## Further fixes and changes 🛠️
24-
Under the hood, we've done a bunch of work improving architectures and streamlining code. Most of that won't be user-visible
11+
# 🚀 v0.4.3 Release 🚀
12+
We're happy to announce the 0.4.3 maintenance release, which primarily focuses on refining the LLM command execution,
13+
extending support for OpenAI's latest models (including the powerful GPT-3 16k model), and laying the groundwork
14+
for future compatibility with OpenAI's function calling feature.
2515

16+
Key Highlights:
17+
- OpenAI API Key Prompt: Auto-GPT will now courteously prompt users for their OpenAI API key, if it's not already provided.
18+
- Summarization Enhancements: We've optimized Auto-GPT's use of the LLM context window even further.
19+
- JSON Memory Reading: Support for reading memories from JSON files has been improved, resulting in enhanced task execution.
20+
- Deprecated commands, removed for a leaner, more performant LLM: analyze_code, write_tests, improve_code, audio_text, web_playwright, web_requests
2621
## Take a look at the Release Notes on Github for the full changelog!
2722
https://github.com/Significant-Gravitas/Auto-GPT/releases

autogpt/agent/agent.py

+16-75
Original file line numberDiff line numberDiff line change
@@ -5,25 +5,22 @@
55

66
from colorama import Fore, Style
77

8-
from autogpt.commands.command import CommandRegistry
98
from autogpt.config import Config
109
from autogpt.config.ai_config import AIConfig
1110
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
12-
from autogpt.llm.base import ChatSequence
13-
from autogpt.llm.chat import chat_with_ai, create_chat_completion
11+
from autogpt.llm.chat import chat_with_ai
1412
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
1513
from autogpt.llm.utils import count_string_tokens
1614
from autogpt.log_cycle.log_cycle import (
1715
FULL_MESSAGE_HISTORY_FILE_NAME,
1816
NEXT_ACTION_FILE_NAME,
19-
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
20-
SUPERVISOR_FEEDBACK_FILE_NAME,
2117
USER_INPUT_FILE_NAME,
2218
LogCycleHandler,
2319
)
24-
from autogpt.logs import logger, print_assistant_thoughts
20+
from autogpt.logs import logger, print_assistant_thoughts, remove_ansi_escape
2521
from autogpt.memory.message_history import MessageHistory
2622
from autogpt.memory.vector import VectorMemory
23+
from autogpt.models.command_registry import CommandRegistry
2724
from autogpt.speech import say_text
2825
from autogpt.spinner import Spinner
2926
from autogpt.utils import clean_input
@@ -145,8 +142,10 @@ def signal_handler(signum, frame):
145142
)
146143

147144
try:
148-
assistant_reply_json = extract_json_from_response(assistant_reply)
149-
validate_json(assistant_reply_json)
145+
assistant_reply_json = extract_json_from_response(
146+
assistant_reply.content
147+
)
148+
validate_json(assistant_reply_json, self.config)
150149
except json.JSONDecodeError as e:
151150
logger.error(f"Exception while validating assistant reply JSON: {e}")
152151
assistant_reply_json = {}
@@ -161,9 +160,11 @@ def signal_handler(signum, frame):
161160
# Get command name and arguments
162161
try:
163162
print_assistant_thoughts(
164-
self.ai_name, assistant_reply_json, self.config.speak_mode
163+
self.ai_name, assistant_reply_json, self.config
164+
)
165+
command_name, arguments = get_command(
166+
assistant_reply_json, assistant_reply, self.config
165167
)
166-
command_name, arguments = get_command(assistant_reply_json)
167168
if self.config.speak_mode:
168169
say_text(f"I want to execute {command_name}")
169170

@@ -184,7 +185,7 @@ def signal_handler(signum, frame):
184185
logger.typewriter_log(
185186
"NEXT ACTION: ",
186187
Fore.CYAN,
187-
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
188+
f"COMMAND = {Fore.CYAN}{remove_ansi_escape(command_name)}{Style.RESET_ALL} "
188189
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
189190
)
190191

@@ -200,32 +201,16 @@ def signal_handler(signum, frame):
200201
)
201202
while True:
202203
if self.config.chat_messages_enabled:
203-
console_input = clean_input("Waiting for your response...")
204+
console_input = clean_input(
205+
self.config, "Waiting for your response..."
206+
)
204207
else:
205208
console_input = clean_input(
206-
Fore.MAGENTA + "Input:" + Style.RESET_ALL
209+
self.config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
207210
)
208211
if console_input.lower().strip() == self.config.authorise_key:
209212
user_input = "GENERATE NEXT COMMAND JSON"
210213
break
211-
elif console_input.lower().strip() == "s":
212-
logger.typewriter_log(
213-
"-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=",
214-
Fore.GREEN,
215-
"",
216-
)
217-
thoughts = assistant_reply_json.get("thoughts", {})
218-
self_feedback_resp = self.get_self_feedback(
219-
thoughts, self.config.fast_llm_model
220-
)
221-
logger.typewriter_log(
222-
f"SELF FEEDBACK: {self_feedback_resp}",
223-
Fore.YELLOW,
224-
"",
225-
)
226-
user_input = self_feedback_resp
227-
command_name = "self_feedback"
228-
break
229214
elif console_input.lower().strip() == "":
230215
logger.warn("Invalid input format.")
231216
continue
@@ -281,8 +266,6 @@ def signal_handler(signum, frame):
281266
result = f"Could not execute command: {arguments}"
282267
elif command_name == "human_feedback":
283268
result = f"Human feedback: {user_input}"
284-
elif command_name == "self_feedback":
285-
result = f"Self feedback: {user_input}"
286269
else:
287270
for plugin in self.config.plugins:
288271
if not plugin.can_handle_pre_command():
@@ -335,45 +318,3 @@ def _resolve_pathlike_command_args(self, command_args):
335318
self.workspace.get_path(command_args[pathlike])
336319
)
337320
return command_args
338-
339-
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
340-
"""Generates a feedback response based on the provided thoughts dictionary.
341-
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
342-
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
343-
feedback message and uses the create_chat_completion() function to generate a
344-
response based on the input message.
345-
Args:
346-
thoughts (dict): A dictionary containing thought elements like reasoning,
347-
plan, thoughts, and criticism.
348-
Returns:
349-
str: A feedback response generated using the provided thoughts dictionary.
350-
"""
351-
ai_role = self.ai_config.ai_role
352-
353-
feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
354-
reasoning = thoughts.get("reasoning", "")
355-
plan = thoughts.get("plan", "")
356-
thought = thoughts.get("thoughts", "")
357-
feedback_thoughts = thought + reasoning + plan
358-
359-
prompt = ChatSequence.for_model(llm_model)
360-
prompt.add("user", feedback_prompt + feedback_thoughts)
361-
362-
self.log_cycle_handler.log_cycle(
363-
self.ai_config.ai_name,
364-
self.created_at,
365-
self.cycle_count,
366-
prompt.raw(),
367-
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
368-
)
369-
370-
feedback = create_chat_completion(prompt)
371-
372-
self.log_cycle_handler.log_cycle(
373-
self.ai_config.ai_name,
374-
self.created_at,
375-
self.cycle_count,
376-
feedback,
377-
SUPERVISOR_FEEDBACK_FILE_NAME,
378-
)
379-
return feedback

autogpt/agent/agent_manager.py

+14-10
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,12 @@
1010
class AgentManager(metaclass=Singleton):
1111
"""Agent manager for managing GPT agents"""
1212

13-
def __init__(self):
13+
def __init__(self, config: Config):
1414
self.next_key = 0
1515
self.agents: dict[
1616
int, tuple[str, list[Message], str]
1717
] = {} # key, (task, full_message_history, model)
18-
self.cfg = Config()
18+
self.config = config
1919

2020
# Create new GPT agent
2121
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
@@ -35,18 +35,20 @@ def create_agent(
3535
"""
3636
messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
3737

38-
for plugin in self.cfg.plugins:
38+
for plugin in self.config.plugins:
3939
if not plugin.can_handle_pre_instruction():
4040
continue
4141
if plugin_messages := plugin.pre_instruction(messages.raw()):
4242
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
4343
# Start GPT instance
44-
agent_reply = create_chat_completion(prompt=messages)
44+
agent_reply = create_chat_completion(
45+
prompt=messages, config=self.config
46+
).content
4547

4648
messages.add("assistant", agent_reply)
4749

4850
plugins_reply = ""
49-
for i, plugin in enumerate(self.cfg.plugins):
51+
for i, plugin in enumerate(self.config.plugins):
5052
if not plugin.can_handle_on_instruction():
5153
continue
5254
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
@@ -62,7 +64,7 @@ def create_agent(
6264

6365
self.agents[key] = (task, list(messages), model)
6466

65-
for plugin in self.cfg.plugins:
67+
for plugin in self.config.plugins:
6668
if not plugin.can_handle_post_instruction():
6769
continue
6870
agent_reply = plugin.post_instruction(agent_reply)
@@ -85,19 +87,21 @@ def message_agent(self, key: str | int, message: str) -> str:
8587
messages = ChatSequence.for_model(model, messages)
8688
messages.add("user", message)
8789

88-
for plugin in self.cfg.plugins:
90+
for plugin in self.config.plugins:
8991
if not plugin.can_handle_pre_instruction():
9092
continue
9193
if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
9294
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
9395

9496
# Start GPT instance
95-
agent_reply = create_chat_completion(prompt=messages)
97+
agent_reply = create_chat_completion(
98+
prompt=messages, config=self.config
99+
).content
96100

97101
messages.add("assistant", agent_reply)
98102

99103
plugins_reply = agent_reply
100-
for i, plugin in enumerate(self.cfg.plugins):
104+
for i, plugin in enumerate(self.config.plugins):
101105
if not plugin.can_handle_on_instruction():
102106
continue
103107
if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
@@ -107,7 +111,7 @@ def message_agent(self, key: str | int, message: str) -> str:
107111
if plugins_reply and plugins_reply != "":
108112
messages.add("assistant", plugins_reply)
109113

110-
for plugin in self.cfg.plugins:
114+
for plugin in self.config.plugins:
111115
if not plugin.can_handle_post_instruction():
112116
continue
113117
agent_reply = plugin.post_instruction(agent_reply)

0 commit comments

Comments
 (0)