A Julia SDK for the Anthropic API, providing convenient access to Claude models with full streaming support.
- Complete Messages API implementation
- Server-Sent Events (SSE) streaming support
- Models API for listing available models
- Automatic retry logic with exponential backoff
- Comprehensive error handling
- Strong typing with Julia's type system
- Both synchronous and asynchronous clients
using Pkg
Pkg.add(url="https://github.com/yourusername/Anthropic.jl")using Anthropic
# Initialize client (uses ANTHROPIC_API_KEY environment variable by default)
client = AnthropicClient()
# Create a simple message
response = create_message(client,
model = "claude-3-5-sonnet-20241022",
messages = [
Dict("role" => "user", "content" => "Hello, Claude!")
],
max_tokens = 100
)
println(response.content[1].text)Set your API key as an environment variable:
export ANTHROPIC_API_KEY="your-api-key-here"Or provide it directly:
client = AnthropicClient(api_key = "your-api-key-here")using Anthropic
client = AnthropicClient()
response = create_message(client,
model = "claude-3-5-sonnet-20241022",
messages = [
Dict("role" => "user", "content" => "What is the capital of France?")
],
max_tokens = 100,
temperature = 0.7
)
for content_block in response.content
if content_block isa TextBlock
println(content_block.text)
end
endusing Anthropic
client = AnthropicClient()
# Stream message events
stream = stream_message(client,
model = "claude-3-5-sonnet-20241022",
messages = [
Dict("role" => "user", "content" => "Write a short story about a robot")
],
max_tokens = 500
)
# Process stream events
for event in stream
if event isa ContentBlockDeltaEvent && haskey(event.delta, "text")
print(event.delta["text"])
flush(stdout)
end
endusing Anthropic
client = AnthropicClient()
stream = stream_message(client,
model = "claude-3-5-sonnet-20241022",
messages = [
Dict("role" => "user", "content" => "Explain quantum computing")
],
max_tokens = 300
)
# Use the text_stream_buffered helper for simpler text streaming
for text in text_stream_buffered(stream)
print(text)
flush(stdout)
endresponse = create_message(client,
model = "claude-3-5-sonnet-20241022",
system = "You are a helpful assistant who speaks like a pirate.",
messages = [
Dict("role" => "user", "content" => "Tell me about sailing")
],
max_tokens = 200
)messages = [
Dict("role" => "user", "content" => "What is 2+2?"),
Dict("role" => "assistant", "content" => "2+2 equals 4."),
Dict("role" => "user", "content" => "What about 3+3?")
]
response = create_message(client,
model = "claude-3-5-sonnet-20241022",
messages = messages,
max_tokens = 100
)tools = [
Dict(
"name" => "get_weather",
"description" => "Get the current weather in a given location",
"input_schema" => Dict(
"type" => "object",
"properties" => Dict(
"location" => Dict(
"type" => "string",
"description" => "The city and state, e.g. San Francisco, CA"
)
),
"required" => ["location"]
)
)
]
response = create_message(client,
model = "claude-3-5-sonnet-20241022",
messages = [
Dict("role" => "user", "content" => "What's the weather in Paris?")
],
tools = tools,
max_tokens = 200
)
for content_block in response.content
if content_block isa ToolUseBlock
println("Tool called: $(content_block.name)")
println("Input: $(content_block.input)")
end
endusing Base64
# Read and encode an image
image_data = base64encode(read("image.jpg"))
messages = [
Dict(
"role" => "user",
"content" => [
Dict(
"type" => "image",
"source" => Dict(
"type" => "base64",
"media_type" => "image/jpeg",
"data" => image_data
)
),
Dict(
"type" => "text",
"text" => "What's in this image?"
)
]
)
]
response = create_message(client,
model = "claude-3-5-sonnet-20241022",
messages = messages,
max_tokens = 200
)token_count = count_tokens(client,
model = "claude-3-5-sonnet-20241022",
messages = [
Dict("role" => "user", "content" => "Hello, Claude!")
]
)
println("Input tokens: $(token_count["input_tokens"])")models = list_models(client)
for model in models
println("$(model.id): $(model.display_name)")
endtry
response = create_message(client,
model = "claude-3-5-sonnet-20241022",
messages = [
Dict("role" => "user", "content" => "Hello!")
],
max_tokens = 100000 # Too many tokens
)
catch e
if e isa BadRequestError
println("Bad request: $(e.message)")
elseif e isa RateLimitError
println("Rate limited. Retry after: $(e.retry_after) seconds")
elseif e isa AuthenticationError
println("Authentication failed: $(e.message)")
else
rethrow(e)
end
endusing Anthropic
async_client = AsyncAnthropicClient()
# Create multiple async requests
task1 = create_message(async_client,
MessageRequest(
model = "claude-3-5-sonnet-20241022",
messages = [Dict("role" => "user", "content" => "What is 2+2?")],
max_tokens = 50
)
)
task2 = create_message(async_client,
MessageRequest(
model = "claude-3-5-sonnet-20241022",
messages = [Dict("role" => "user", "content" => "What is 3+3?")],
max_tokens = 50
)
)
# Wait for results
response1 = fetch(task1)
response2 = fetch(task2)AnthropicClient(; api_key, base_url, api_version, timeout, max_retries)- Create a synchronous clientAsyncAnthropicClient(; ...)- Create an asynchronous client
create_message(client, request::MessageRequest)- Create a messagecreate_message(client; model, messages, max_tokens, ...)- Create a message with keyword argumentsstream_message(client, request::MessageRequest)- Stream a messagestream_message(client; model, messages, max_tokens, ...)- Stream a message with keyword argumentscount_tokens(client; model, messages, system, tools)- Count tokens for a request
list_models(client; before_id, after_id, limit)- List available modelsget_model(client, model_id)- Get information about a specific model
create_message_batch(client, requests)- Create a batch of messagesget_message_batch(client, batch_id)- Get batch statuscancel_message_batch(client, batch_id)- Cancel a batchlist_message_batches(client; before_id, after_id, limit)- List batches
MessageRequest- Request structure for creating messagesUsage- Token usage informationModel- Model information
TextBlock- Text content blockToolUseBlock- Tool invocation blockToolResultBlock- Tool result blockImageBlock- Image content blockThinkingBlock- Thinking/reasoning block
MessageResponse- Complete message responseStreamEvent- Base type for streaming eventsMessageStartEventContentBlockStartEventContentBlockDeltaEventContentBlockStopEventMessageDeltaEventMessageStopEventErrorEvent
AnthropicError- Base error typeAPIError- General API errorAPIStatusError- HTTP status errorAuthenticationError- Authentication failureRateLimitError- Rate limit exceededBadRequestError- Invalid requestNotFoundError- Resource not foundInternalServerError- Server errorAPIConnectionError- Network errorAPITimeoutError- Request timeout
ANTHROPIC_API_KEY- Your Anthropic API keyANTHROPIC_AUTH_TOKEN- Alternative authentication token
api_key- API key for authenticationbase_url- Base URL for the API (default: "https://api.anthropic.com")api_version- API version (default: "2023-06-01")timeout- Request timeout in seconds (default: 600.0)max_retries- Maximum number of retries (default: 2)
Contributions are welcome! Please feel free to submit a Pull Request.
This project is licensed under the MIT License.