Skip to content

Commit ed32081

Browse files
authored
Merge pull request #44 from TauricResearch/dev
Merge dev into main branch
2 parents 570644d + 7eaf4d9 commit ed32081

File tree

11 files changed

+5642
-46
lines changed

11 files changed

+5642
-46
lines changed

.python-version

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
3.10

cli/main.py

Lines changed: 55 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -295,10 +295,27 @@ def update_display(layout, spinner_text=None):
295295

296296
# Add regular messages
297297
for timestamp, msg_type, content in message_buffer.messages:
298+
# Convert content to string if it's not already
299+
content_str = content
300+
if isinstance(content, list):
301+
# Handle list of content blocks (Anthropic format)
302+
text_parts = []
303+
for item in content:
304+
if isinstance(item, dict):
305+
if item.get('type') == 'text':
306+
text_parts.append(item.get('text', ''))
307+
elif item.get('type') == 'tool_use':
308+
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
309+
else:
310+
text_parts.append(str(item))
311+
content_str = ' '.join(text_parts)
312+
elif not isinstance(content_str, str):
313+
content_str = str(content)
314+
298315
# Truncate message content if too long
299-
if isinstance(content, str) and len(content) > 200:
300-
content = content[:197] + "..."
301-
all_messages.append((timestamp, msg_type, content))
316+
if len(content_str) > 200:
317+
content_str = content_str[:197] + "..."
318+
all_messages.append((timestamp, msg_type, content_str))
302319

303320
# Sort by timestamp
304321
all_messages.sort(key=lambda x: x[0])
@@ -444,20 +461,30 @@ def create_question_box(title, prompt, default=None):
444461
)
445462
selected_research_depth = select_research_depth()
446463

447-
# Step 5: Thinking agents
464+
# Step 5: OpenAI backend
448465
console.print(
449466
create_question_box(
450-
"Step 5: Thinking Agents", "Select your thinking agents for analysis"
467+
"Step 5: OpenAI backend", "Select which service to talk to"
451468
)
452469
)
453-
selected_shallow_thinker = select_shallow_thinking_agent()
454-
selected_deep_thinker = select_deep_thinking_agent()
470+
selected_llm_provider, backend_url = select_llm_provider()
471+
472+
# Step 6: Thinking agents
473+
console.print(
474+
create_question_box(
475+
"Step 6: Thinking Agents", "Select your thinking agents for analysis"
476+
)
477+
)
478+
selected_shallow_thinker = select_shallow_thinking_agent(selected_llm_provider)
479+
selected_deep_thinker = select_deep_thinking_agent(selected_llm_provider)
455480

456481
return {
457482
"ticker": selected_ticker,
458483
"analysis_date": analysis_date,
459484
"analysts": selected_analysts,
460485
"research_depth": selected_research_depth,
486+
"llm_provider": selected_llm_provider.lower(),
487+
"backend_url": backend_url,
461488
"shallow_thinker": selected_shallow_thinker,
462489
"deep_thinker": selected_deep_thinker,
463490
}
@@ -683,6 +710,24 @@ def update_research_team_status(status):
683710
for agent in research_team:
684711
message_buffer.update_agent_status(agent, status)
685712

713+
def extract_content_string(content):
714+
"""Extract string content from various message formats."""
715+
if isinstance(content, str):
716+
return content
717+
elif isinstance(content, list):
718+
# Handle Anthropic's list format
719+
text_parts = []
720+
for item in content:
721+
if isinstance(item, dict):
722+
if item.get('type') == 'text':
723+
text_parts.append(item.get('text', ''))
724+
elif item.get('type') == 'tool_use':
725+
text_parts.append(f"[Tool: {item.get('name', 'unknown')}]")
726+
else:
727+
text_parts.append(str(item))
728+
return ' '.join(text_parts)
729+
else:
730+
return str(content)
686731

687732
def run_analysis():
688733
# First get all user selections
@@ -694,6 +739,8 @@ def run_analysis():
694739
config["max_risk_discuss_rounds"] = selections["research_depth"]
695740
config["quick_think_llm"] = selections["shallow_thinker"]
696741
config["deep_think_llm"] = selections["deep_thinker"]
742+
config["backend_url"] = selections["backend_url"]
743+
config["llm_provider"] = selections["llm_provider"].lower()
697744

698745
# Initialize the graph
699746
graph = TradingAgentsGraph(
@@ -754,7 +801,7 @@ def run_analysis():
754801

755802
# Extract message content and type
756803
if hasattr(last_message, "content"):
757-
content = last_message.content
804+
content = extract_content_string(last_message.content) # Use the helper function
758805
msg_type = "Reasoning"
759806
else:
760807
content = str(last_message)

cli/utils.py

Lines changed: 99 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -122,22 +122,43 @@ def select_research_depth() -> int:
122122
return choice
123123

124124

125-
def select_shallow_thinking_agent() -> str:
125+
def select_shallow_thinking_agent(provider) -> str:
126126
"""Select shallow thinking llm engine using an interactive selection."""
127127

128128
# Define shallow thinking llm engine options with their corresponding model names
129-
SHALLOW_AGENT_OPTIONS = [
130-
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
131-
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
132-
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
133-
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
134-
]
129+
SHALLOW_AGENT_OPTIONS = {
130+
"openai": [
131+
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
132+
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
133+
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
134+
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
135+
],
136+
"anthropic": [
137+
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
138+
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
139+
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
140+
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
141+
],
142+
"google": [
143+
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
144+
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
145+
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
146+
],
147+
"openrouter": [
148+
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
149+
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
150+
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"),
151+
],
152+
"ollama": [
153+
("llama3.2 local", "llama3.2"),
154+
]
155+
}
135156

136157
choice = questionary.select(
137158
"Select Your [Quick-Thinking LLM Engine]:",
138159
choices=[
139160
questionary.Choice(display, value=value)
140-
for display, value in SHALLOW_AGENT_OPTIONS
161+
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
141162
],
142163
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
143164
style=questionary.Style(
@@ -158,25 +179,47 @@ def select_shallow_thinking_agent() -> str:
158179
return choice
159180

160181

161-
def select_deep_thinking_agent() -> str:
182+
def select_deep_thinking_agent(provider) -> str:
162183
"""Select deep thinking llm engine using an interactive selection."""
163184

164185
# Define deep thinking llm engine options with their corresponding model names
165-
DEEP_AGENT_OPTIONS = [
166-
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
167-
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
168-
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
169-
("o4-mini - Specialized reasoning model (compact)", "o4-mini"),
170-
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
171-
("o3 - Full advanced reasoning model", "o3"),
172-
("o1 - Premier reasoning and problem-solving model", "o1"),
173-
]
174-
186+
DEEP_AGENT_OPTIONS = {
187+
"openai": [
188+
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
189+
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
190+
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
191+
("o4-mini - Specialized reasoning model (compact)", "o4-mini"),
192+
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
193+
("o3 - Full advanced reasoning model", "o3"),
194+
("o1 - Premier reasoning and problem-solving model", "o1"),
195+
],
196+
"anthropic": [
197+
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
198+
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
199+
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
200+
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
201+
("Claude Opus 4 - Most powerful Anthropic model", " claude-opus-4-0"),
202+
],
203+
"google": [
204+
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
205+
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
206+
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
207+
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
208+
],
209+
"openrouter": [
210+
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
211+
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
212+
],
213+
"ollama": [
214+
("qwen3", "qwen3"),
215+
]
216+
}
217+
175218
choice = questionary.select(
176219
"Select Your [Deep-Thinking LLM Engine]:",
177220
choices=[
178221
questionary.Choice(display, value=value)
179-
for display, value in DEEP_AGENT_OPTIONS
222+
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
180223
],
181224
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
182225
style=questionary.Style(
@@ -193,3 +236,39 @@ def select_deep_thinking_agent() -> str:
193236
exit(1)
194237

195238
return choice
239+
240+
def select_llm_provider() -> tuple[str, str]:
241+
"""Select the OpenAI api url using interactive selection."""
242+
# Define OpenAI api options with their corresponding endpoints
243+
BASE_URLS = [
244+
("OpenAI", "https://api.openai.com/v1"),
245+
("Anthropic", "https://api.anthropic.com/"),
246+
("Google", "https://generativelanguage.googleapis.com/v1"),
247+
("Openrouter", "https://openrouter.ai/api/v1"),
248+
("Ollama", "http://localhost:11434/v1"),
249+
]
250+
251+
choice = questionary.select(
252+
"Select your LLM Provider:",
253+
choices=[
254+
questionary.Choice(display, value=(display, value))
255+
for display, value in BASE_URLS
256+
],
257+
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
258+
style=questionary.Style(
259+
[
260+
("selected", "fg:magenta noinherit"),
261+
("highlighted", "fg:magenta noinherit"),
262+
("pointer", "fg:magenta noinherit"),
263+
]
264+
),
265+
).ask()
266+
267+
if choice is None:
268+
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
269+
exit(1)
270+
271+
display_name, url = choice
272+
print(f"You selected: {display_name}\tURL: {url}")
273+
274+
return display_name, url

main.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,10 @@
33

44
# Create a custom config
55
config = DEFAULT_CONFIG.copy()
6-
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
7-
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
6+
config["llm_provider"] = "google" # Use a different model
7+
config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
8+
config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model
9+
config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model
810
config["max_debate_rounds"] = 1 # Increase debate rounds
911
config["online_tools"] = True # Increase debate rounds
1012

pyproject.toml

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
[project]
2+
name = "tradingagents"
3+
version = "0.1.0"
4+
description = "Add your description here"
5+
readme = "README.md"
6+
requires-python = ">=3.10"
7+
dependencies = [
8+
"akshare>=1.16.98",
9+
"backtrader>=1.9.78.123",
10+
"chainlit>=2.5.5",
11+
"chromadb>=1.0.12",
12+
"eodhd>=1.0.32",
13+
"feedparser>=6.0.11",
14+
"finnhub-python>=2.4.23",
15+
"langchain-anthropic>=0.3.15",
16+
"langchain-experimental>=0.3.4",
17+
"langchain-google-genai>=2.1.5",
18+
"langchain-openai>=0.3.23",
19+
"langgraph>=0.4.8",
20+
"pandas>=2.3.0",
21+
"parsel>=1.10.0",
22+
"praw>=7.8.1",
23+
"pytz>=2025.2",
24+
"questionary>=2.1.0",
25+
"redis>=6.2.0",
26+
"requests>=2.32.4",
27+
"rich>=14.0.0",
28+
"setuptools>=80.9.0",
29+
"stockstats>=0.6.5",
30+
"tqdm>=4.67.1",
31+
"tushare>=1.4.21",
32+
"typing-extensions>=4.14.0",
33+
"yfinance>=0.2.63",
34+
]

tradingagents/agents/utils/agent_utils.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,22 @@
1212
from langchain_openai import ChatOpenAI
1313
import tradingagents.dataflows.interface as interface
1414
from tradingagents.default_config import DEFAULT_CONFIG
15+
from langchain_core.messages import HumanMessage
1516

1617

1718
def create_msg_delete():
1819
def delete_messages(state):
19-
"""To prevent message history from overflowing, regularly clear message history after a stage of the pipeline is done"""
20+
"""Clear messages and add placeholder for Anthropic compatibility"""
2021
messages = state["messages"]
21-
return {"messages": [RemoveMessage(id=m.id) for m in messages]}
22-
22+
23+
# Remove all messages
24+
removal_operations = [RemoveMessage(id=m.id) for m in messages]
25+
26+
# Add a minimal placeholder message
27+
placeholder = HumanMessage(content="Continue")
28+
29+
return {"messages": removal_operations + [placeholder]}
30+
2331
return delete_messages
2432

2533

tradingagents/agents/utils/memory.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,23 @@
11
import chromadb
22
from chromadb.config import Settings
33
from openai import OpenAI
4-
import numpy as np
54

65

76
class FinancialSituationMemory:
8-
def __init__(self, name):
9-
self.client = OpenAI()
7+
def __init__(self, name, config):
8+
if config["backend_url"] == "http://localhost:11434/v1":
9+
self.embedding = "nomic-embed-text"
10+
else:
11+
self.embedding = "text-embedding-3-small"
12+
self.client = OpenAI()
1013
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
1114
self.situation_collection = self.chroma_client.create_collection(name=name)
1215

1316
def get_embedding(self, text):
1417
"""Get OpenAI embedding for a text"""
18+
1519
response = self.client.embeddings.create(
16-
model="text-embedding-ada-002", input=text
20+
model=self.embedding, input=text
1721
)
1822
return response.data[0].embedding
1923

tradingagents/dataflows/interface.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -703,6 +703,7 @@ def get_YFin_data(
703703

704704

705705
def get_stock_news_openai(ticker, curr_date):
706+
config = get_config()
706707
client = OpenAI()
707708

708709
response = client.responses.create(
@@ -737,6 +738,7 @@ def get_stock_news_openai(ticker, curr_date):
737738

738739

739740
def get_global_news_openai(curr_date):
741+
config = get_config()
740742
client = OpenAI()
741743

742744
response = client.responses.create(
@@ -771,6 +773,7 @@ def get_global_news_openai(curr_date):
771773

772774

773775
def get_fundamentals_openai(ticker, curr_date):
776+
config = get_config()
774777
client = OpenAI()
775778

776779
response = client.responses.create(

tradingagents/default_config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,10 @@
88
"dataflows/data_cache",
99
),
1010
# LLM settings
11+
"llm_provider": "openai",
1112
"deep_think_llm": "o4-mini",
1213
"quick_think_llm": "gpt-4o-mini",
14+
"backend_url": "https://api.openai.com/v1",
1315
# Debate and discussion settings
1416
"max_debate_rounds": 1,
1517
"max_risk_discuss_rounds": 1,

0 commit comments

Comments
 (0)