Skip to content

Commit c3b072e

Browse files
committed
feat: concat expert opinions as a single message
use the interface
1 parent 96b9134 commit c3b072e

File tree

3 files changed

+20
-32
lines changed

3 files changed

+20
-32
lines changed

expert.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@ import (
88
)
99

1010
type ExpertInterface interface {
11-
Evaluate(ctx context.Context, input string, opts llms.CallOption) (*ExpertResult, error)
12-
SetPrompt(prompt string) *Expert
11+
Evaluate(ctx context.Context, input string) (*ExpertResult, error)
1312
}
1413

1514
type Expert struct {

main.go

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,9 @@ import (
1111
var llm, _ = openai.New()
1212

1313
var callOptions = &llms.CallOptions{
14-
Model: "gpt-4o-mini",
15-
CandidateCount: 0,
16-
MaxTokens: 1024,
17-
Temperature: 0.9,
18-
Tools: nil,
19-
ToolChoice: nil,
14+
Model: "gpt-4o-latest",
15+
MaxTokens: 2048,
16+
Temperature: 0.7,
2017
}
2118

2219
var economicsExpert = &Expert{

thinktank.go

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,13 @@ const DefaultConcurrency = 5
1313
type ThinkTank struct {
1414
Prompt string
1515
Model llms.Model
16-
Experts []*Expert
16+
Experts []ExpertInterface
1717
Concurrency int
1818
}
1919

20-
func NewThinkTank(model llms.Model, experts ...*Expert) *ThinkTank {
20+
func NewThinkTank(model llms.Model, experts ...ExpertInterface) *ThinkTank {
2121
return &ThinkTank{
22-
Prompt: "Consider the points from all experts, then provide a comprehensive and definitive answer to the question: %s",
22+
Prompt: "Weigh and judge the experts answers against each other. Eliminate contradictions. Then provide a comprehensive and definitive answer to the question: %s",
2323
Model: model,
2424
Experts: experts,
2525
Concurrency: DefaultConcurrency,
@@ -39,7 +39,7 @@ func (tt *ThinkTank) Evaluate(ctx context.Context, input string) *ThinkTankResul
3939
for _, expert := range tt.Experts {
4040
wg.Add(1)
4141

42-
go func(expert *Expert) {
42+
go func(expert ExpertInterface) {
4343
defer wg.Done()
4444
result, err := expert.Evaluate(ctx, input)
4545
result.Err = err
@@ -60,40 +60,32 @@ func (tt *ThinkTank) Evaluate(ctx context.Context, input string) *ThinkTankResul
6060
func (tt *ThinkTank) Answer(ctx context.Context, input string) (string, error) {
6161
result := tt.Evaluate(ctx, input)
6262

63-
var messages []llms.MessageContent
64-
65-
// First we innocently pose the question verbatim
66-
originalQuestion := llms.MessageContent{
67-
Role: llms.ChatMessageTypeHuman,
68-
Parts: []llms.ContentPart{llms.TextPart(input)},
69-
}
70-
71-
messages = append(messages, originalQuestion)
63+
var expertsReports = ""
7264

7365
// Next we let each expert chime in
7466
for _, expertResult := range result.ExpertResults {
75-
7667
if expertResult.Err == nil {
77-
opinion := fmt.Sprintf("%s: %s\n", expertResult.Expert.Name, expertResult.Text)
78-
79-
expertOpinion := llms.MessageContent{
80-
Role: llms.ChatMessageTypeSystem,
81-
Parts: []llms.ContentPart{llms.TextPart(opinion)},
82-
}
8368

84-
messages = append(messages, expertOpinion)
69+
expertsReports += fmt.Sprintf("%s: %s\n\n", expertResult.Expert.Name, expertResult.Text)
8570
}
8671
}
8772

88-
// Lastly we instruct the LLM to re-answer the question in light of
73+
// Concatenate all expert reports into one message as context
74+
expertOpinions := llms.MessageContent{
75+
Role: llms.ChatMessageTypeSystem,
76+
Parts: []llms.ContentPart{llms.TextPart(expertsReports)},
77+
}
78+
79+
// Next we instruct the LLM to re-answer the question in light of
8980
// the new expert opinions
9081
thinkTankPrompt := fmt.Sprintf(tt.Prompt, input)
82+
9183
systemMessage := llms.MessageContent{
92-
Role: llms.ChatMessageTypeSystem,
84+
Role: llms.ChatMessageTypeHuman,
9385
Parts: []llms.ContentPart{llms.TextPart(thinkTankPrompt)},
9486
}
9587

96-
messages = append(messages, systemMessage)
88+
var messages = []llms.MessageContent{systemMessage, expertOpinions}
9789

9890
content, err := tt.Model.GenerateContent(ctx, messages)
9991

0 commit comments

Comments
 (0)