@@ -83,44 +83,39 @@ async def invoke_structured_model(
8383 :param response_structure: Dictionary defining the output structure
8484 :return: StructuredResponse containing the structured data
8585 """
86+ structured_response = StructuredResponse (
87+ data = {},
88+ raw_response = '' ,
89+ metrics = LDAIMetrics (success = False , usage = None ),
90+ )
8691 try :
8792 langchain_messages = LangChainProvider .convert_messages_to_langchain (messages )
88- structured_llm = self ._llm .with_structured_output (response_structure )
93+ structured_llm = self ._llm .with_structured_output (response_structure , include_raw = True )
8994 response = await structured_llm .ainvoke (langchain_messages )
9095
9196 if not isinstance (response , dict ):
9297 log .warning (
9398 f'Structured output did not return a dict. '
9499 f'Got: { type (response )} '
95100 )
96- return StructuredResponse (
97- data = {},
98- raw_response = '' ,
99- metrics = LDAIMetrics (
100- success = False ,
101- usage = TokenUsage (total = 0 , input = 0 , output = 0 ),
102- ),
103- )
101+ return structured_response
104102
105- return StructuredResponse (
106- data = response ,
107- raw_response = str (response ),
108- metrics = LDAIMetrics (
109- success = True ,
110- usage = TokenUsage (total = 0 , input = 0 , output = 0 ),
111- ),
112- )
103+ raw_response = response .get ('raw' )
104+ if raw_response is not None :
105+ if hasattr (raw_response , 'content' ):
106+ structured_response .raw_response = raw_response .content
107+ structured_response .metrics .usage = LangChainProvider .get_ai_usage_from_response (raw_response )
108+
109+ if response .get ('parsing_error' ):
110+ log .warning (f'LangChain structured model invocation had a parsing error' )
111+ return structured_response
112+
113+ structured_response .metrics .success = True
114+ structured_response .data = response .get ('parsed' ) or {}
115+ return structured_response
113116 except Exception as error :
114117 log .warning (f'LangChain structured model invocation failed: { error } ' )
115-
116- return StructuredResponse (
117- data = {},
118- raw_response = '' ,
119- metrics = LDAIMetrics (
120- success = False ,
121- usage = TokenUsage (total = 0 , input = 0 , output = 0 ),
122- ),
123- )
118+ return structured_response
124119
125120 def get_chat_model (self ) -> BaseChatModel :
126121 """
@@ -135,20 +130,47 @@ def map_provider(ld_provider_name: str) -> str:
135130 """
136131 Map LaunchDarkly provider names to LangChain provider names.
137132
138- This method enables seamless integration between LaunchDarkly's standardized
139- provider naming and LangChain's naming conventions.
140-
141133 :param ld_provider_name: LaunchDarkly provider name
142134 :return: LangChain-compatible provider name
143135 """
144136 lowercased_name = ld_provider_name .lower ()
137+ # Bedrock is the only provider that uses "provider:model_family" (e.g. Bedrock:Anthropic).
138+ if lowercased_name .startswith ('bedrock:' ):
139+ return 'bedrock_converse'
145140
146141 mapping : Dict [str , str ] = {
147142 'gemini' : 'google-genai' ,
143+ 'bedrock' : 'bedrock_converse' ,
148144 }
149-
150145 return mapping .get (lowercased_name , lowercased_name )
151146
147+ @staticmethod
148+ def get_ai_usage_from_response (response : BaseMessage ) -> TokenUsage :
149+ """
150+ Get token usage from a LangChain provider response.
151+
152+ :param response: The response from the LangChain model
153+ :return: TokenUsage with success status and token usage
154+ """
155+ # Extract token usage if available
156+ usage : Optional [TokenUsage ] = None
157+ if hasattr (response , 'usage_metadata' ) and response .usage_metadata :
158+ usage = TokenUsage (
159+ total = response .usage_metadata .get ('total_tokens' , 0 ),
160+ input = response .usage_metadata .get ('input_tokens' , 0 ),
161+ output = response .usage_metadata .get ('output_tokens' , 0 ),
162+ )
163+ if not usage and hasattr (response , 'response_metadata' ) and response .response_metadata :
164+ token_usage = response .response_metadata .get ('tokenUsage' ) or response .response_metadata .get ('token_usage' )
165+ if token_usage :
166+ usage = TokenUsage (
167+ total = token_usage .get ('totalTokens' , 0 ) or token_usage .get ('total_tokens' , 0 ),
168+ input = token_usage .get ('promptTokens' , 0 ) or token_usage .get ('prompt_tokens' , 0 ),
169+ output = token_usage .get ('completionTokens' , 0 ) or token_usage .get ('completion_tokens' , 0 ),
170+ )
171+
172+ return usage
173+
152174 @staticmethod
153175 def get_ai_metrics_from_response (response : BaseMessage ) -> LDAIMetrics :
154176 """
@@ -168,15 +190,7 @@ def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics:
168190 )
169191 """
170192 # Extract token usage if available
171- usage : Optional [TokenUsage ] = None
172- if hasattr (response , 'response_metadata' ) and response .response_metadata :
173- token_usage = response .response_metadata .get ('tokenUsage' ) or response .response_metadata .get ('token_usage' )
174- if token_usage :
175- usage = TokenUsage (
176- total = token_usage .get ('totalTokens' , 0 ) or token_usage .get ('total_tokens' , 0 ),
177- input = token_usage .get ('promptTokens' , 0 ) or token_usage .get ('prompt_tokens' , 0 ),
178- output = token_usage .get ('completionTokens' , 0 ) or token_usage .get ('completion_tokens' , 0 ),
179- )
193+ usage = LangChainProvider .get_ai_usage_from_response (response )
180194
181195 return LDAIMetrics (success = True , usage = usage )
182196
@@ -227,10 +241,15 @@ def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel:
227241
228242 model_name = model_dict .get ('name' , '' )
229243 provider = provider_dict .get ('name' , '' )
230- parameters = model_dict .get ('parameters' ) or {}
244+ parameters = dict (model_dict .get ('parameters' ) or {})
245+ mapped_provider = LangChainProvider .map_provider (provider )
231246
247+ # Bedrock requires the foundation provider (e.g. Bedrock:Anthropic) passed in
248+ # parameters separately from model_provider, which is used for LangChain routing.
249+ if mapped_provider == 'bedrock_converse' and 'provider' not in parameters :
250+ parameters ['provider' ] = provider .removeprefix ('bedrock:' )
232251 return init_chat_model (
233252 model_name ,
234- model_provider = LangChainProvider . map_provider ( provider ) ,
253+ model_provider = mapped_provider ,
235254 ** parameters ,
236255 )
0 commit comments