Skip to content

Commit cbaea35

Browse files
authored
style(core,langchain-classic,openai): fix griffe warnings (#34074)
1 parent f070217 commit cbaea35

File tree

6 files changed

+29
-74
lines changed

6 files changed

+29
-74
lines changed

libs/core/langchain_core/prompts/chat.py

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -903,23 +903,28 @@ def __init__(
903903
5. A string which is shorthand for `("human", template)`; e.g.,
904904
`"{user_input}"`
905905
template_format: Format of the template.
906-
input_variables: A list of the names of the variables whose values are
907-
required as inputs to the prompt.
908-
optional_variables: A list of the names of the variables for placeholder
909-
or MessagePlaceholder that are optional.
910-
911-
These variables are auto inferred from the prompt and user need not
912-
provide them.
913-
partial_variables: A dictionary of the partial variables the prompt
914-
template carries.
915-
916-
Partial variables populate the template so that you don't need to pass
917-
them in every time you call the prompt.
918-
validate_template: Whether to validate the template.
919-
input_types: A dictionary of the types of the variables the prompt template
920-
expects.
921-
922-
If not provided, all variables are assumed to be strings.
906+
**kwargs: Additional keyword arguments passed to `BasePromptTemplate`,
907+
including (but not limited to):
908+
909+
- `input_variables`: A list of the names of the variables whose values
910+
are required as inputs to the prompt.
911+
- `optional_variables`: A list of the names of the variables for
912+
placeholder or `MessagePlaceholder` that are optional.
913+
914+
These variables are auto inferred from the prompt and user need not
915+
provide them.
916+
917+
- `partial_variables`: A dictionary of the partial variables the prompt
918+
template carries.
919+
920+
Partial variables populate the template so that you don't need to
921+
pass them in every time you call the prompt.
922+
923+
- `validate_template`: Whether to validate the template.
924+
- `input_types`: A dictionary of the types of the variables the prompt
925+
template expects.
926+
927+
If not provided, all variables are assumed to be strings.
923928
924929
Examples:
925930
Instantiation from a list of message templates:

libs/core/uv.lock

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

libs/langchain/langchain_classic/smith/evaluation/progress.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ def __init__(
2626
total: The total number of items to be processed.
2727
ncols: The character width of the progress bar.
2828
end_with: Last string to print after progress bar reaches end.
29-
**kwargs: Additional keyword arguments.
3029
"""
3130
self.total = total
3231
self.ncols = ncols

libs/langchain/langchain_classic/smith/evaluation/runner_utils.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -295,11 +295,7 @@ def _get_prompt(inputs: dict[str, Any]) -> str:
295295

296296

297297
class ChatModelInput(TypedDict):
298-
"""Input for a chat model.
299-
300-
Args:
301-
messages: List of chat messages.
302-
"""
298+
"""Input for a chat model."""
303299

304300
messages: list[BaseMessage]
305301

libs/langchain/langchain_classic/smith/evaluation/string_run_evaluator.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,8 @@ def serialize_outputs(self, outputs: dict) -> str:
108108
The serialized output text from the first generation.
109109
110110
Raises:
111-
ValueError: If no generations are found in the outputs,
112-
or if the generations are empty.
111+
ValueError: If no generations are found in the outputs or if the generations
112+
are empty.
113113
"""
114114
if not outputs.get("generations"):
115115
msg = "Cannot evaluate LLM Run without generations."
@@ -436,8 +436,8 @@ def from_run_and_data_type(
436436
The instantiated evaluation chain.
437437
438438
Raises:
439-
If the run type is not supported, or if the evaluator requires a
440-
reference from the dataset but the reference key is not provided.
439+
ValueError: If the run type is not supported, or if the evaluator requires a
440+
reference from the dataset but the reference key is not provided.
441441
442442
"""
443443
# Configure how run inputs/predictions are passed to the evaluator

libs/partners/openai/langchain_openai/chat_models/azure.py

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -905,51 +905,6 @@ def with_structured_output(
905905
!!! note
906906
`strict` can only be non-null if `method` is `'json_schema'`
907907
or `'function_calling'`.
908-
tools:
909-
A list of tool-like objects to bind to the chat model. Requires that:
910-
911-
- `method` is `'json_schema'` (default).
912-
- `strict=True`
913-
- `include_raw=True`
914-
915-
If a model elects to call a
916-
tool, the resulting `AIMessage` in `'raw'` will include tool calls.
917-
918-
??? example
919-
920-
```python
921-
from langchain.chat_models import init_chat_model
922-
from pydantic import BaseModel
923-
924-
925-
class ResponseSchema(BaseModel):
926-
response: str
927-
928-
929-
def get_weather(location: str) -> str:
930-
\"\"\"Get weather at a location.\"\"\"
931-
pass
932-
933-
model = init_chat_model("openai:gpt-4o-mini")
934-
935-
structured_model = model.with_structured_output(
936-
ResponseSchema,
937-
tools=[get_weather],
938-
strict=True,
939-
include_raw=True,
940-
)
941-
942-
structured_model.invoke("What's the weather in Boston?")
943-
```
944-
945-
```python
946-
{
947-
"raw": AIMessage(content="", tool_calls=[...], ...),
948-
"parsing_error": None,
949-
"parsed": None,
950-
}
951-
```
952-
953908
kwargs: Additional keyword args are passed through to the model.
954909
955910
Returns:

0 commit comments

Comments
 (0)