Skip to content

Commit 2e96e51

Browse files
authored
Merge pull request #1 from shlomihod/add-anthropic-llm
Add support for Anthropic LLM
2 parents 8b9344c + 0353cac commit 2e96e51

File tree

2 files changed

+82
-0
lines changed

2 files changed

+82
-0
lines changed

tabmemcheck/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
LLM_Interface,
44
openai_setup,
55
gemini_setup,
6+
claude_setup,
67
send_chat_completion,
78
send_completion,
89
)

tabmemcheck/llm.py

+81
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,87 @@ def gemini_setup(model: str = None, api_key: str = None):
286286
return None
287287

288288

289+
#################################################################################################
290+
# Anthropic (requires pip install anthropic)
291+
#################################################################################################
292+
293+
294+
@dataclass
295+
class ClaudeAnthropicLLM(LLM_Interface):
296+
def __init__(self, model: str):
297+
from anthropic import Anthropic
298+
299+
self.model = model
300+
self.anthropic = Anthropic()
301+
self.chat_mode = True
302+
303+
def chat_completion(self, messages, temperature, max_tokens):
304+
print("Received messages:")
305+
for msg in messages:
306+
print(f"Role: {msg['role']}, Content: {repr(msg['content'])}")
307+
# Extract system prompt if present
308+
system_prompt = None
309+
for message in messages:
310+
if message["role"] == "system":
311+
system_prompt = message["content"]
312+
break
313+
314+
# Convert remaining messages from OpenAI format to Anthropic format
315+
anthropic_messages = []
316+
for message in messages:
317+
if message["role"] == "system":
318+
continue # Skip system message as we handle it separately
319+
elif message["role"] == "user":
320+
anthropic_messages.append({
321+
"role": "user",
322+
"content": message["content"]
323+
})
324+
elif message["role"] == "assistant":
325+
anthropic_messages.append({
326+
"role": "assistant",
327+
"content": message["content"]
328+
})
329+
else:
330+
raise ValueError(f"Unknown message role: {message['role']}")
331+
332+
try:
333+
# Create message arguments
334+
message_args = {
335+
"model": self.model,
336+
"messages": anthropic_messages,
337+
"temperature": temperature,
338+
"max_tokens": max_tokens
339+
}
340+
341+
# Add system prompt if present
342+
if system_prompt:
343+
message_args["system"] = system_prompt
344+
345+
print(f"{message_args=}")
346+
response = self.anthropic.messages.create(**message_args)
347+
return response.content[0].text
348+
except Exception as e:
349+
print(f"Claude: Error during completion: {str(e)}")
350+
return ""
351+
352+
def __repr__(self) -> str:
353+
return f"{self.model}"
354+
355+
356+
def claude_setup(model: str = None, api_key: str = None):
357+
from anthropic import Anthropic
358+
359+
if api_key:
360+
Anthropic(api_key=api_key)
361+
elif "ANTHROPIC_API_KEY" in os.environ:
362+
Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
363+
else:
364+
raise ValueError("No API key provided and ANTHROPIC_API_KEY not found in environment variables")
365+
366+
if model is not None:
367+
return ClaudeAnthropicLLM(model)
368+
return None
369+
289370
####################################################################################
290371
# dummy for testing
291372
####################################################################################

0 commit comments

Comments
 (0)