@@ -13,13 +13,13 @@ const DefaultConcurrency = 5
1313type ThinkTank struct {
1414 Prompt string
1515 Model llms.Model
16- Experts []* Expert
16+ Experts []ExpertInterface
1717 Concurrency int
1818}
1919
20- func NewThinkTank (model llms.Model , experts ... * Expert ) * ThinkTank {
20+ func NewThinkTank (model llms.Model , experts ... ExpertInterface ) * ThinkTank {
2121 return & ThinkTank {
22- Prompt : "Consider the points from all experts, then provide a comprehensive and definitive answer to the question: %s" ,
22+ Prompt : "Weigh and judge the experts answers against each other. Eliminate contradictions. Then provide a comprehensive and definitive answer to the question: %s" ,
2323 Model : model ,
2424 Experts : experts ,
2525 Concurrency : DefaultConcurrency ,
@@ -39,7 +39,7 @@ func (tt *ThinkTank) Evaluate(ctx context.Context, input string) *ThinkTankResul
3939 for _ , expert := range tt .Experts {
4040 wg .Add (1 )
4141
42- go func (expert * Expert ) {
42+ go func (expert ExpertInterface ) {
4343 defer wg .Done ()
4444 result , err := expert .Evaluate (ctx , input )
4545 result .Err = err
@@ -60,40 +60,32 @@ func (tt *ThinkTank) Evaluate(ctx context.Context, input string) *ThinkTankResul
6060func (tt * ThinkTank ) Answer (ctx context.Context , input string ) (string , error ) {
6161 result := tt .Evaluate (ctx , input )
6262
63- var messages []llms.MessageContent
64-
65- // First we innocently pose the question verbatim
66- originalQuestion := llms.MessageContent {
67- Role : llms .ChatMessageTypeHuman ,
68- Parts : []llms.ContentPart {llms .TextPart (input )},
69- }
70-
71- messages = append (messages , originalQuestion )
63+ var expertsReports = ""
7264
7365 // Next we let each expert chime in
7466 for _ , expertResult := range result .ExpertResults {
75-
7667 if expertResult .Err == nil {
77- opinion := fmt .Sprintf ("%s: %s\n " , expertResult .Expert .Name , expertResult .Text )
78-
79- expertOpinion := llms.MessageContent {
80- Role : llms .ChatMessageTypeSystem ,
81- Parts : []llms.ContentPart {llms .TextPart (opinion )},
82- }
8368
84- messages = append ( messages , expertOpinion )
69+ expertsReports += fmt . Sprintf ( "%s: %s \n \n " , expertResult . Expert . Name , expertResult . Text )
8570 }
8671 }
8772
88- // Lastly we instruct the LLM to re-answer the question in light of
73+ // Concatenate all expert reports into one message as context
74+ expertOpinions := llms.MessageContent {
75+ Role : llms .ChatMessageTypeSystem ,
76+ Parts : []llms.ContentPart {llms .TextPart (expertsReports )},
77+ }
78+
79+ // Next we instruct the LLM to re-answer the question in light of
8980 // the new expert opinions
9081 thinkTankPrompt := fmt .Sprintf (tt .Prompt , input )
82+
9183 systemMessage := llms.MessageContent {
92- Role : llms .ChatMessageTypeSystem ,
84+ Role : llms .ChatMessageTypeHuman ,
9385 Parts : []llms.ContentPart {llms .TextPart (thinkTankPrompt )},
9486 }
9587
96- messages = append ( messages , systemMessage )
88+ var messages = []llms. MessageContent { systemMessage , expertOpinions }
9789
9890 content , err := tt .Model .GenerateContent (ctx , messages )
9991
0 commit comments