1- from flare_ai_consensus .consensus .config import AggregatorConfig
2- from flare_ai_consensus .router .client import AsyncOpenRouterClient , OpenRouterClient
1+ from flare_ai_consensus .router import (
2+ AsyncOpenRouterProvider ,
3+ ChatRequest ,
4+ OpenRouterProvider ,
5+ )
6+ from flare_ai_consensus .settings import AggregatorConfig , Message
37
48
5- def concatenate_aggregator (responses : dict [str , str ]) -> str :
9+ def _concatenate_aggregator (responses : dict [str , str ]) -> str :
610 """
711 Aggregate responses by concatenating each model's answer with a label.
812
@@ -13,52 +17,52 @@ def concatenate_aggregator(responses: dict[str, str]) -> str:
1317
1418
1519def centralized_llm_aggregator (
16- client : OpenRouterClient ,
20+ provider : OpenRouterProvider ,
1721 aggregator_config : AggregatorConfig ,
1822 aggregated_responses : dict [str , str ],
1923) -> str :
2024 """Use a centralized LLM to combine responses.
2125
22- :param client : An OpenRouterClient instance.
26+ :param provider : An OpenRouterProvider instance.
2327 :param aggregator_config: An instance of AggregatorConfig.
2428 :param aggregated_responses: A string containing aggregated
2529 responses from individual models.
2630 :return: The aggregator's combined response.
2731 """
2832 # Build the message list.
29- messages = []
33+ messages : list [ Message ] = []
3034 messages .extend (aggregator_config .context )
3135
3236 # Add a system message with the aggregated responses.
33- aggregated_str = concatenate_aggregator (aggregated_responses )
37+ aggregated_str = _concatenate_aggregator (aggregated_responses )
3438 messages .append (
3539 {"role" : "system" , "content" : f"Aggregated responses:\n { aggregated_str } " }
3640 )
3741
3842 # Add the aggregator prompt
3943 messages .extend (aggregator_config .prompt )
4044
41- payload = {
45+ payload : ChatRequest = {
4246 "model" : aggregator_config .model .model_id ,
4347 "messages" : messages ,
4448 "max_tokens" : aggregator_config .model .max_tokens ,
4549 "temperature" : aggregator_config .model .temperature ,
4650 }
4751
4852 # Get aggregated response from the centralized LLM
49- response = client .send_chat_completion (payload )
53+ response = provider .send_chat_completion (payload )
5054 return response .get ("choices" , [])[0 ].get ("message" , {}).get ("content" , "" )
5155
5256
5357async def async_centralized_llm_aggregator (
54- client : AsyncOpenRouterClient ,
58+ provider : AsyncOpenRouterProvider ,
5559 aggregator_config : AggregatorConfig ,
5660 aggregated_responses : dict [str , str ],
5761) -> str :
5862 """
59- Use a centralized LLM (via an async client ) to combine responses.
63+ Use a centralized LLM (via an async provider ) to combine responses.
6064
61- :param client : An asynchronous OpenRouter client .
65+ :param provider : An asynchronous OpenRouterProvider .
6266 :param aggregator_config: An instance of AggregatorConfig.
6367 :param aggregated_responses: A string containing aggregated
6468 responses from individual models.
@@ -71,12 +75,12 @@ async def async_centralized_llm_aggregator(
7175 )
7276 messages .extend (aggregator_config .prompt )
7377
74- payload = {
78+ payload : ChatRequest = {
7579 "model" : aggregator_config .model .model_id ,
7680 "messages" : messages ,
7781 "max_tokens" : aggregator_config .model .max_tokens ,
7882 "temperature" : aggregator_config .model .temperature ,
7983 }
8084
81- response = await client .send_chat_completion (payload )
85+ response = await provider .send_chat_completion (payload )
8286 return response .get ("choices" , [])[0 ].get ("message" , {}).get ("content" , "" )
0 commit comments