forked from EricLBuehler/mistral.rs
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgpt_oss.py
More file actions
31 lines (26 loc) · 807 Bytes
/
gpt_oss.py
File metadata and controls
31 lines (26 loc) · 807 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#!/usr/bin/env python
"""
Example of using GPT-OSS model with mistral.rs
GPT-OSS is a Mixture of Experts model with MXFP4 quantized experts
and custom attention with per-head sinks.
"""
from mistralrs import Runner, Which, ChatCompletionRequest, Architecture
# Create a GPT-OSS model runner
runner = Runner(
which=Which.Plain(
model_id="openai/gpt-oss-20b", # Replace with actual model ID
arch=Architecture.GptOss,
),
)
# Send a chat completion request
res = runner.send_chat_completion_request(
ChatCompletionRequest(
model="gpt_oss",
messages=[{"role": "user", "content": "What is the capital of France?"}],
max_tokens=256,
temperature=0.7,
)
)
# Print the response
print(res.choices[0].message.content)
print(f"\nUsage: {res.usage}")