feat(together-ai): update model YAMLs [bot]#1016
Conversation
|
/test-models |
Gateway test results
Failures (6)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/nvidia-nemotron-3-nano-omni-30b-a3b-reasoning-fp8",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/nvidia-nemotron-3-nano-omni-30b-a3b-reasoning-fp8",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=True,
)
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/nvidia-nemotron-3-nano-omni-30b-a3b-reasoning-fp8",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=False,
)
print(response.choices[0].message.content)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/nvidia-nemotron-3-nano-omni-30b-a3b-reasoning-fp8",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/nvidia-nemotron-3-nano-omni-30b-a3b-reasoning-fp8",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/nvidia-nemotron-3-nano-omni-30b-a3b-reasoning-fp8",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")Skipped (1)
Skip reason: |
|
/test-models |
|
/test-models |
Gateway test results
Skipped (2)
Skip reason:
Skip reason: |
1 similar comment
Gateway test results
Skipped (2)
Skip reason:
Skip reason: |
There was a problem hiding this comment.
Cursor Bugbot has reviewed your changes and found 1 potential issue.
❌ Bugbot Autofix is OFF. To automatically fix reported issues with cloud agents, enable autofix in the Cursor dashboard.
Reviewed by Cursor Bugbot for commit f391da8. Configure here.
| region: "*" | ||
| features: | ||
| - function_calling | ||
| - system_messages |
There was a problem hiding this comment.
BF16 model missing features present in equivalent FP8 variant
Medium Severity
The newly added features list for the BF16 variant only includes function_calling and system_messages, while the equivalent FP8 variant (NVIDIA-Nemotron-3-Super-120B-A12B-FP8.yaml) declares function_calling, tool_choice, structured_output, and system_messages. Since both are the same base model at different quantization levels (and both are provisioned), the BF16 variant is likely missing tool_choice and structured_output. This could cause the gateway to incorrectly withhold these capabilities for the BF16 model.
Reviewed by Cursor Bugbot for commit f391da8. Configure here.


Auto-generated by poc-agent for provider
together-ai.Note
Low Risk
Low risk metadata-only updates to Together.ai model YAMLs; main risk is incorrect capabilities (modalities/context window/features) causing misrouting or failed requests.
Overview
Updates Together.ai’s NVIDIA Nemotron model YAMLs to better describe capabilities and availability.
NVIDIA-Nemotron-3-Super-120B-A12B-BF16now declaresfunction_calling/system_messages, adds source URLs, and flagsthinking.nemotron-3-nano-omni-30b-a3b-reasoning-fp8is changed from unknown tochat, increasescontext_windowto256000, declares multimodal inputs, and adds provisioning/source/status metadata plusthinking.Reviewed by Cursor Bugbot for commit f391da8. Bugbot is set up for automated code reviews on this repo. Configure here.