feat(together-ai): update model YAMLs [bot]#1008
Conversation
|
/test-models |
Gateway test results
Failures (6)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "List 3 colors with their hex codes in JSON."},
],
response_format={"type": "json_object"},
stream=False,
)
import json as _json
_content = response.choices[0].message.content
print(_content)
if not _content:
raise Exception("VALIDATION FAILED: json-output - response content is empty")
_json.loads(_content)
print("VALIDATION: json-output SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=False,
)
print(response.choices[0].message.content)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS") |
|
/test-models |
Gateway test results
Failures (7)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=False,
)
print(response.choices[0].message.content)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "What is the capital of France?"},
],
stream=True,
)
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=False,
)
_usage = getattr(response, "usage", None)
_reasoning_detected = False
_choices = getattr(response, "choices", None)
if _choices and len(_choices) > 0:
_message = getattr(_choices[0], "message", None)
else:
_message = None
if _message and getattr(_message, "content", None) is not None:
print(_message.content)
if _usage is not None:
_output_token_details = getattr(_usage, "completion_tokens_details", None)
if _output_token_details and getattr(_output_token_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
elif getattr(_usage, "reasoning", None) is not None:
_reasoning_detected = True
if getattr(_message, "reasoning_content", None) is not None:
_reasoning_detected = True
elif getattr(_message, "reasoning", None) is not None:
_reasoning_detected = True
if not _reasoning_detected:
print("Response: ", response)
raise Exception("VALIDATION FAILED: reasoning - no reasoning information in response")
print("VALIDATION: reasoning SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "List 3 colors with their hex codes in JSON."},
],
response_format={"type": "json_object"},
stream=False,
)
import json as _json
_content = response.choices[0].message.content
print(_content)
if not _content:
raise Exception("VALIDATION FAILED: json-output - response content is empty")
_json.loads(_content)
print("VALIDATION: json-output SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=False,
)
_message = response.choices[0].message
if _message.tool_calls:
for _tc in _message.tool_calls:
print(f"Function: {_tc.function.name}")
print(f"Arguments: {_tc.function.arguments}")
else:
print(_message.content)
if not _message.tool_calls or len(_message.tool_calls) == 0:
raise Exception("VALIDATION FAILED: tool-call - no tool calls in response")
print("VALIDATION: tool-call SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name, e.g. London",
},
},
"required": ["location"],
"additionalProperties": False,
},
"strict": True,
},
},
]
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "Use the get_weather tool to check the weather in London. You must call the tool, do not respond with plain text."},
],
tools=tools,
tool_choice="auto",
stream=True,
)
_tool_calls_made = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if delta.tool_calls:
_tool_calls_made = True
for _tc in delta.tool_calls:
if _tc.function:
print(_tc.function.arguments or "", end="", flush=True)
if not _tool_calls_made:
raise Exception("VALIDATION FAILED: tool-call stream - no tool calls received")
print("\nVALIDATION: tool-call stream SUCCESS")
Error: Code snippetfrom openai import OpenAI
client = OpenAI(api_key="***", base_url="https://internal.devtest.truefoundry.tech/api/llm")
response = client.chat.completions.create(
model="test-v2-together-ai/Qwen-Qwen3.6-Plus",
messages=[
{"role": "user", "content": "How to calculate 3^3^3^3? Think step by step and show all reasoning."},
],
reasoning_effort="medium",
stream=True,
)
_reasoning_detected = False
for chunk in response:
if chunk.choices and len(chunk.choices) > 0:
delta = chunk.choices[0].delta
if delta.content is not None:
print(delta.content, end="", flush=True)
if getattr(delta, "reasoning_content", None) is not None:
_reasoning_detected = True
if getattr(delta, "reasoning", None) is not None:
_reasoning_detected = True
_usage = getattr(chunk, "usage", None)
if _usage is not None:
_details = getattr(_usage, "completion_tokens_details", None)
if _details and getattr(_details, "reasoning_tokens", 0) > 0:
_reasoning_detected = True
if not _reasoning_detected:
raise Exception("VALIDATION FAILED: reasoning stream - no reasoning information in stream")
print("\nVALIDATION: reasoning stream SUCCESS") |
Auto-generated by poc-agent for provider
together-ai.Note
Low Risk
Low risk config-only change updating a single model YAML; potential impact is limited to model capability/metadata being consumed incorrectly by downstream tooling if assumptions differ.
Overview
Updates the
Qwen/Qwen3.6-PlusTogether AI model definition to add declared capabilities and metadata:features(function calling + JSON output), multimodalmodalities(text/image/video input), andthinkingsupport.Also fixes
modefromunknowntochatand adds operational metadata (provisioning: serverless,sources,status: active).Reviewed by Cursor Bugbot for commit 29a7b89. Bugbot is set up for automated code reviews on this repo. Configure here.