Skip to content

Commit d4c0236

Browse files
Merge pull request #448 from MervinPraison/develop
Develop
2 parents 248fcbd + dca01cd commit d4c0236

4 files changed

Lines changed: 13 additions & 11 deletions

File tree

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -770,7 +770,7 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
770770
display_error(f"Error in chat completion: {e}")
771771
return None
772772

773-
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
773+
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True):
774774
# Log all parameter values when in debug mode
775775
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
776776
param_info = {
@@ -912,7 +912,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
912912
agent_tools=agent_tools
913913
)
914914

915-
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps)
915+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=stream)
916916
if not response:
917917
return None
918918

@@ -949,7 +949,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
949949
"content": "Function returned an empty output"
950950
})
951951

952-
response = self._chat_completion(messages, temperature=temperature)
952+
response = self._chat_completion(messages, temperature=temperature, stream=stream)
953953
if not response:
954954
return None
955955
response_text = response.choices[0].message.content.strip()
@@ -1019,7 +1019,7 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
10191019

10201020
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
10211021
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1022-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=True)
1022+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=stream)
10231023
response_text = response.choices[0].message.content.strip()
10241024
reflection_count += 1
10251025
continue # Continue the loop for more reflections
@@ -1199,7 +1199,7 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
11991199
model=self.llm,
12001200
messages=messages,
12011201
temperature=temperature,
1202-
tools=formatted_tools
1202+
tools=formatted_tools,
12031203
)
12041204
result = await self._achat_completion(response, tools)
12051205
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:

src/praisonai-agents/praisonaiagents/agents/agents.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def process_video(video_path: str, seconds_per_frame=2):
4545
return base64_frames
4646

4747
class PraisonAIAgents:
48-
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10):
48+
def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_retries=5, process="sequential", manager_llm=None, memory=False, memory_config=None, embedder=None, user_id=None, max_iter=10, stream=True):
4949
# Add check at the start if memory is requested
5050
if memory:
5151
try:
@@ -68,15 +68,16 @@ def __init__(self, agents, tasks=None, verbose=0, completion_checker=None, max_r
6868
for agent in agents:
6969
agent.user_id = self.user_id
7070

71-
self.agents = agents
72-
self.tasks = {}
71+
self.agents: List[Agent] = agents
72+
self.tasks: Dict[int, Task] = {}
7373
if max_retries < 3:
7474
max_retries = 3
7575
self.completion_checker = completion_checker if completion_checker else self.default_completion_checker
7676
self.task_id_counter = 0
7777
self.verbose = verbose
7878
self.max_retries = max_retries
7979
self.process = process
80+
self.stream = stream
8081

8182
# Check for manager_llm in environment variable if not provided
8283
self.manager_llm = manager_llm or os.getenv('OPENAI_MODEL_NAME', 'gpt-4o')
@@ -665,7 +666,8 @@ def _get_multimodal_message(text_prompt, images):
665666
task_prompt,
666667
tools=task.tools,
667668
output_json=task.output_json,
668-
output_pydantic=task.output_pydantic
669+
output_pydantic=task.output_pydantic,
670+
stream=self.stream,
669671
)
670672

671673
if agent_output:

src/praisonai-agents/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "praisonaiagents"
7-
version = "0.0.72"
7+
version = "0.0.73"
88
description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
99
authors = [
1010
{ name="Mervin Praison" }

src/praisonai-agents/uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)