Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions conversation/anthropic/anthropic.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,16 @@ func (a *Anthropic) Init(ctx context.Context, meta conversation.Metadata) error
// Resolve model via central helper (uses metadata, then env var, then default)
model := conversation.GetAnthropicModel(m.Model)

llm, err := anthropic.New(
options := []anthropic.Option{
anthropic.WithModel(model),
anthropic.WithToken(m.Key),
)
}

if httpClient := conversation.BuildHTTPClient(m.HttpClientTimeout, m.IdleConnectionTimeout); httpClient != nil {
options = append(options, anthropic.WithHTTPClient(httpClient))
}

llm, err := anthropic.New(options...)
if err != nil {
return err
}
Expand Down
14 changes: 14 additions & 0 deletions conversation/anthropic/metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,17 @@ metadata:
A time-to-live value for a prompt cache to expire. Uses Golang durations
type: string
example: '10m'
- name: httpClientTimeout
required: false
description: |
Maximum duration for HTTP client requests to the Anthropic API.
type: duration
example: '30s'
default: '0s'
- name: idleConnectionTimeout
required: false
description: |
Maximum duration an idle HTTP connection can remain open before being closed.
type: duration
example: '30s'
default: '90s'
5 changes: 4 additions & 1 deletion conversation/aws/bedrock/metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,10 @@ metadata:
- name: model
required: false
description: |
The LLM to use. Defaults to Bedrock's default provider model from Amazon.
The model identifier or inference profile ARN to use. Defaults to Bedrock's default provider model from Amazon.
You can specify either:
- A model ID (e.g., "amazon.titan-text-express-v1") that supports on-demand throughput
- An inference profile ARN for models that require it (found in the AWS Bedrock console under "Cross-Region Inference")
type: string
example: 'amazon.titan-text-express-v1'
- name: cacheTTL
Expand Down
28 changes: 13 additions & 15 deletions conversation/converse.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
"io"

"github.com/tmc/langchaingo/llms"
"google.golang.org/protobuf/types/known/anypb"

"github.com/dapr/components-contrib/metadata"
)
Expand All @@ -36,23 +35,22 @@ type Conversation interface {

type Request struct {
// Message can be user input prompt/instructions and/or tool call responses.
Message *[]llms.MessageContent
Tools *[]llms.Tool
ToolChoice *string
Parameters map[string]*anypb.Any `json:"parameters"`
ConversationContext string `json:"conversationContext"`
Temperature float64 `json:"temperature"`

// from metadata
Key string `json:"key"`
Model string `json:"model"`
Endpoints []string `json:"endpoints"`
Policy string `json:"loadBalancingPolicy"`
Comment on lines -42 to -50
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The “from metadata” fields were bubbling the Metadata field from the Conversations API (see https://github.com/dapr/dapr/blob/master/dapr/proto/runtime/v1/ai.proto#L43
), which allowed all of these fields to be passed through the request. This is an incorrect use of metadata. Components already expose their own metadata field, so there’s no need to bubble this information up from the API—contrib already handles it.

As a result, we can remove the following fields: Key, Endpoints, and Policy.

I also removed Parameters and ConversationContext, since these were surfaced but never actually used. All of this traces back to the original implementation, which I’m now cleaning up.

Message *[]llms.MessageContent
Tools *[]llms.Tool
ToolChoice *string
Temperature float64 `json:"temperature"`

// Metadata fields that are separate from the actual component metadata fields
// that get passed to the LLM through the conversation.
// https://github.com/openai/openai-go/blob/main/chatcompletion.go#L3010
Metadata map[string]string `json:"metadata"`
ResponseFormatAsJSONSchema map[string]any `json:"responseFormatAsJsonSchema"`
Model *string `json:"model"`
}

type Response struct {
ConversationContext string `json:"conversationContext"`
Outputs []Result `json:"outputs"`
Outputs []Result `json:"outputs"`
Model string `json:"model"`
}

type Result struct {
Expand Down
7 changes: 1 addition & 6 deletions conversation/deepseek/deepseek.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,7 @@ func (d *Deepseek) Init(ctx context.Context, meta conversation.Metadata) error {
md.Endpoint = defaultEndpoint
}

options := []openai.Option{
openai.WithModel(model),
openai.WithToken(md.Key),
openai.WithBaseURL(md.Endpoint),
}

options := conversation.BuildOpenAIClientOptions(model, md.Key, md.Endpoint, md.HttpClientTimeout, md.IdleConnectionTimeout)
llm, err := openai.New(options...)
if err != nil {
return err
Expand Down
9 changes: 5 additions & 4 deletions conversation/deepseek/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ limitations under the License.

package deepseek

import "github.com/dapr/components-contrib/conversation"

type DeepseekMetadata struct {
Key string `json:"key"`
MaxTokens int `json:"maxTokens"`
Model string `json:"model"`
Endpoint string `json:"endpoint"`
conversation.LangchainMetadata `json:",inline" mapstructure:",squash"`
Key string `json:"key"`
MaxTokens int `json:"maxTokens"`
}
14 changes: 14 additions & 0 deletions conversation/deepseek/metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,17 @@ metadata:
Max tokens for each request
type: number
example: "2048"
- name: httpClientTimeout
required: false
description: |
Maximum duration for HTTP client requests to the Deepseek API.
type: duration
example: '30s'
default: '0s'
- name: idleConnectionTimeout
required: false
description: |
Maximum duration an idle HTTP connection can remain open before being closed.
type: duration
example: '30s'
default: '90s'
23 changes: 17 additions & 6 deletions conversation/echo/echo.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ func (e *Echo) Init(ctx context.Context, meta conversation.Metadata) error {
return err
}

e.model = r.Model
if r.Model != nil {
e.model = *r.Model
}

return nil
}
Expand All @@ -66,8 +68,7 @@ func (e *Echo) GetComponentMetadata() (metadataInfo metadata.MetadataMap) {
func (e *Echo) Converse(ctx context.Context, r *conversation.Request) (res *conversation.Response, err error) {
if r == nil || r.Message == nil {
return &conversation.Response{
ConversationContext: r.ConversationContext,
Outputs: []conversation.Result{},
Outputs: []conversation.Result{},
}, nil
}

Expand Down Expand Up @@ -139,6 +140,8 @@ func (e *Echo) Converse(ctx context.Context, r *conversation.Request) (res *conv
}
}

responseContent := strings.Join(contentFromMessaged, "\n")

stopReason := "stop"
if len(toolCalls) > 0 {
stopReason = "tool_calls"
Expand All @@ -148,7 +151,7 @@ func (e *Echo) Converse(ctx context.Context, r *conversation.Request) (res *conv
FinishReason: stopReason,
Index: 0,
Message: conversation.Message{
Content: strings.Join(contentFromMessaged, "\n"),
Content: responseContent,
},
}

Expand All @@ -161,9 +164,17 @@ func (e *Echo) Converse(ctx context.Context, r *conversation.Request) (res *conv
Choices: []conversation.Choice{choice},
}

// allow per request model overrides
var modelName string
if r.Model != nil && *r.Model != "" {
modelName = *r.Model
} else {
modelName = e.model
}

res = &conversation.Response{
ConversationContext: r.ConversationContext,
Outputs: []conversation.Result{output},
Outputs: []conversation.Result{output},
Model: modelName,
}

return res, nil
Expand Down
15 changes: 6 additions & 9 deletions conversation/googleai/googleai.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,16 +50,13 @@ func (g *GoogleAI) Init(ctx context.Context, meta conversation.Metadata) error {

// Resolve model via central helper (uses metadata, then env var, then default)
model := conversation.GetGoogleAIModel(md.Model)
key, _ := meta.GetProperty("key")

opts := []openai.Option{
openai.WithModel(model),
openai.WithToken(md.Key),
// endpoint from https://ai.google.dev/gemini-api/docs/openai
openai.WithBaseURL("https://generativelanguage.googleapis.com/v1beta/openai/"),
}
llm, err := openai.New(
opts...,
)
// endpoint from https://ai.google.dev/gemini-api/docs/openai
const endpoint = "https://generativelanguage.googleapis.com/v1beta/openai/"
opts := conversation.BuildOpenAIClientOptions(model, key, endpoint, md.HttpClientTimeout, md.IdleConnectionTimeout)

llm, err := openai.New(opts...)
if err != nil {
return err
}
Expand Down
14 changes: 14 additions & 0 deletions conversation/googleai/metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,17 @@ metadata:
A time-to-live value for a prompt cache to expire. Uses Golang durations
type: string
example: '10m'
- name: httpClientTimeout
required: false
description: |
Maximum duration for HTTP client requests to the Google AI API.
type: duration
example: '30s'
default: '0s'
- name: idleConnectionTimeout
required: false
description: |
Maximum duration an idle HTTP connection can remain open before being closed.
type: duration
example: '30s'
default: '90s'
7 changes: 1 addition & 6 deletions conversation/huggingface/huggingface.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,19 +54,14 @@ func (h *Huggingface) Init(ctx context.Context, meta conversation.Metadata) erro

// Resolve model via central helper (uses metadata, then env var, then default)
model := conversation.GetHuggingFaceModel(m.Model)

endpoint := strings.Replace(defaultEndpoint, "{{model}}", model, 1)
if m.Endpoint != "" {
endpoint = m.Endpoint
}

// Create options for OpenAI client using HuggingFace's OpenAI-compatible API
// This is a workaround for issues with the native HuggingFace langchaingo implementation
options := []openai.Option{
openai.WithModel(model),
openai.WithToken(m.Key),
openai.WithBaseURL(endpoint),
}
options := conversation.BuildOpenAIClientOptions(model, m.Key, endpoint, m.HttpClientTimeout, m.IdleConnectionTimeout)

llm, err := openai.New(options...)
if err != nil {
Expand Down
14 changes: 14 additions & 0 deletions conversation/huggingface/metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,17 @@ metadata:
A time-to-live value for a prompt cache to expire. Uses Golang durations
type: string
example: '10m'
- name: httpClientTimeout
required: false
description: |
Maximum duration for HTTP client requests to the HuggingFace API.
type: duration
example: '30s'
default: '0s'
- name: idleConnectionTimeout
required: false
description: |
Maximum duration an idle HTTP connection can remain open before being closed.
type: duration
example: '30s'
default: '90s'
Loading
Loading