Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,12 @@ def _should_summarize_based_on_reported_tokens(
and last_ai_message.usage_metadata is not None
and (reported_tokens := last_ai_message.usage_metadata.get("total_tokens", -1))
and reported_tokens >= threshold
and (message_provider := last_ai_message.response_metadata.get("model_provider"))
and (
message_provider := last_ai_message.response_metadata.get(
"ls_provider",
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

just curious, for my own learnings what is ls_provider? i understand model/llm provider

last_ai_message.response_metadata.get("model_provider"),
Comment on lines +381 to +384
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is basically, get "ls_provider" and assign to message_provider. if "ls_provider" is not valid then get "model_provider" and assign to message_provider?

)
)
and message_provider == self.model._get_ls_params().get("ls_provider") # noqa: SLF001
):
return True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1219,6 +1219,56 @@ def test_usage_metadata_trigger() -> None:
assert not middleware._should_summarize(messages, 0)


def test_usage_metadata_trigger_ls_provider() -> None:
"""ls_provider in response_metadata takes precedence over model_provider."""

class BedrockMockModel(MockChatModel):
def _get_ls_params(self, **kwargs: Any) -> dict[str, Any]:
return {"ls_provider": "amazon_bedrock"}

model = BedrockMockModel()
middleware = SummarizationMiddleware(
model=model,
trigger=("tokens", 10_000),
keep=("messages", 4),
)
# ls_provider matches even though model_provider doesn't
messages: list[AnyMessage] = [
HumanMessage(content="msg1"),
AIMessage(
content="msg2",
response_metadata={
"model_provider": "bedrock_converse",
"ls_provider": "amazon_bedrock",
Comment on lines +1241 to +1242
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are both these keys in message metadata? I only see model_provider. Do we actually need to update ls_params or message.response_metadata["model_provider"] to make them consistent?

Image

Copy link
Copy Markdown
Contributor Author

@michaelnchin Michael Chin (michaelnchin) Apr 8, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not in a langchain-aws release yet, but ChatBedrock and ChatBedrockConverse will both start adding response_metadata["ls_provider"] = "amazon_bedrock" to outgoing messages with this PR: langchain-ai/langchain-aws#981

model_provider and ls_params can't be directly updated in the Bedrock chat models, as they have distinct values that other mechanisms are dependent on (context here: langchain-ai/langchain-aws#946 (comment)), so this extra metadata field is a workaround to positively identify the base ls_provider directly from messages.

},
usage_metadata={
"input_tokens": 7500,
"output_tokens": 2501,
"total_tokens": 10_001,
},
),
]
assert middleware._should_summarize(messages, 0)

# ls_provider doesn't match — should not trigger
messages_mismatch: list[AnyMessage] = [
HumanMessage(content="msg1"),
AIMessage(
content="msg2",
response_metadata={
"model_provider": "amazon_bedrock",
"ls_provider": "not-amazon_bedrock",
},
usage_metadata={
"input_tokens": 7500,
"output_tokens": 2501,
"total_tokens": 10_001,
},
),
]
assert not middleware._should_summarize(messages_mismatch, 0)


class ConfigCapturingModel(BaseChatModel):
"""Mock model that captures the config passed to invoke/ainvoke."""

Expand Down
Loading