-
Notifications
You must be signed in to change notification settings - Fork 54
Model features: native async #110
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
c947b85
a2914c8
61537d4
e1c2011
05beeeb
6adf3de
0efb69c
a254d5e
2296d31
215bd7c
7ca93e9
be10099
b57feb2
ee90cd1
59f9ef2
2c8e639
ca6b4d2
a0b7c83
7cfc197
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,6 +11,7 @@ | |
HumanMessage, | ||
SystemMessage, | ||
) | ||
from langchain_core.outputs import ChatGeneration, LLMResult | ||
|
||
from langchain_nvidia_ai_endpoints.chat_models import ChatNVIDIA | ||
|
||
|
@@ -441,3 +442,33 @@ def test_stop( | |
assert isinstance(token.content, str) | ||
result += f"{token.content}|" | ||
assert all(target not in result for target in targets) | ||
|
||
|
||
def test_generate(chat_model: str, mode: dict) -> None: | ||
"""Test generate method of anthropic.""" | ||
chat = ChatNVIDIA(model=chat_model, **mode) # type: ignore[call-arg] | ||
chat_messages: List[List[BaseMessage]] = [ | ||
[HumanMessage(content="How many toes do dogs have?")] | ||
] | ||
messages_copy = [messages.copy() for messages in chat_messages] | ||
result: LLMResult = chat.generate(chat_messages) | ||
assert isinstance(result, LLMResult) | ||
for response in result.generations[0]: | ||
assert isinstance(response, ChatGeneration) | ||
assert isinstance(response.text, str) | ||
assert response.text == response.message.content | ||
assert chat_messages == messages_copy | ||
|
||
|
||
# @pytest.mark.scheduled | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should this be commented or not? |
||
async def test_async_generate(chat_model: str, mode: dict) -> None: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this will pass even if agenerate() is implemented without truly async communication w/ the server. add a unit text that check that async generation requests are interleaved. for inspiration...
|
||
"""Test async generation.""" | ||
llm = ChatNVIDIA(model=chat_model, **mode) | ||
message = HumanMessage(content="Hello") | ||
response = await llm.agenerate([[message]]) | ||
assert isinstance(response, LLMResult) | ||
for generations in response.generations: | ||
for generation in generations: | ||
assert isinstance(generation, ChatGeneration) | ||
assert isinstance(generation.text, str) | ||
assert generation.text == generation.message.content |
Uh oh!
There was an error while loading. Please reload this page.