Skip to content

Commit 8b56a00

Browse files
haoye2fengju0213SaedbhatiJINO-ROHITWendong-Fan
authored
feat: Add Minimax M2 model support (#3404)
Co-authored-by: Tao Sun <[email protected]> Co-authored-by: Sun Tao <[email protected]> Co-authored-by: Saed Bhati <[email protected]> Co-authored-by: JINO ROHIT <[email protected]> Co-authored-by: Wendong-Fan <[email protected]>
1 parent 1ebe2fa commit 8b56a00

File tree

12 files changed

+352
-0
lines changed

12 files changed

+352
-0
lines changed

.github/workflows/build_package.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ jobs:
144144
QIANFAN_API_KEY: "${{ secrets.QIANFAN_API_KEY }}"
145145
ACI_API_KEY: "${{ secrets.ACI_API_KEY }}"
146146
BOHRIUM_API_KEY: "${{ secrets.BOHRIUM_API_KEY }}"
147+
MINIMAX_API_KEY: "${{ secrets.MINIMAX_API_KEY}}"
147148
CRYNUX_API_KEY: "${{ secrets.CRYNUX_API_KEY }}"
148149
NEBIUS_API_KEY: "${{ secrets.NEBIUS_API_KEY }}"
149150
COMETAPI_KEY: "${{ secrets.COMETAPI_KEY }}"

.github/workflows/pytest_package.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ jobs:
9090
CRYNUX_API_KEY: "${{ secrets.CRYNUX_API_KEY }}"
9191
NEBIUS_API_KEY: "${{ secrets.NEBIUS_API_KEY }}"
9292
COMETAPI_KEY: "${{ secrets.COMETAPI_KEY }}"
93+
MINIMAX_API_KEY: "${{ secrets.MINIMAX_API_KEY }}"
9394
run: |
9495
source .venv/bin/activate
9596
uv pip install -e ".[all, dev, docs]"
@@ -177,6 +178,7 @@ jobs:
177178
NEBIUS_API_KEY: "${{ secrets.NEBIUS_API_KEY }}"
178179
AMD_API_KEY: "${{ secrets.AMD_API_KEY }}"
179180
COMETAPI_KEY: "${{ secrets.COMETAPI_KEY }}"
181+
MINIMAX_API_KEY: "${{ secrets.MINIMAX_API_KEY }}"
180182
run: |
181183
source .venv/bin/activate
182184
uv pip install -e ".[all, dev, docs]"
@@ -262,6 +264,7 @@ jobs:
262264
NEBIUS_API_KEY: "${{ secrets.NEBIUS_API_KEY }}"
263265
AMD_API_KEY: "${{ secrets.AMD_API_KEY }}"
264266
COMETAPI_KEY: "${{ secrets.COMETAPI_KEY }}"
267+
MINIMAX_API_KEY: "${{ secrets.MINIMAX_API_KEY }}"
265268
run: |
266269
source .venv/bin/activate
267270
uv pip install -e ".[all, dev, docs]"

camel/configs/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
from .internlm_config import INTERNLM_API_PARAMS, InternLMConfig
2727
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
2828
from .lmstudio_config import LMSTUDIO_API_PARAMS, LMStudioConfig
29+
from .minimax_config import MINIMAX_API_PARAMS, MinimaxConfig
2930
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
3031
from .modelscope_config import MODELSCOPE_API_PARAMS, ModelScopeConfig
3132
from .moonshot_config import MOONSHOT_API_PARAMS, MoonshotConfig
@@ -124,6 +125,8 @@
124125
'OPENROUTER_API_PARAMS',
125126
'LMSTUDIO_API_PARAMS',
126127
'LMStudioConfig',
128+
'MINIMAX_API_PARAMS',
129+
'MinimaxConfig',
127130
'WatsonXConfig',
128131
'WATSONX_API_PARAMS',
129132
'QianfanConfig',

camel/configs/minimax_config.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
#
6+
# http://www.apache.org/licenses/LICENSE-2.0
7+
#
8+
# Unless required by applicable law or agreed to in writing, software
9+
# distributed under the License is distributed on an "AS IS" BASIS,
10+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
# See the License for the specific language governing permissions and
12+
# limitations under the License.
13+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14+
from __future__ import annotations
15+
16+
from typing import Optional, Sequence, Union
17+
18+
from camel.configs.base_config import BaseConfig
19+
20+
21+
class MinimaxConfig(BaseConfig):
22+
r"""Defines the parameters for generating chat completions using OpenAI
23+
compatibility with Minimax.
24+
25+
Reference: https://api.minimax.chat/document/guides/chat-model
26+
27+
Args:
28+
temperature (float, optional): Sampling temperature to use, between
29+
:obj:`0.0` and :obj:`1.0`. Higher values make the output more
30+
random, while lower values make it more focused and deterministic.
31+
Recommended to use :obj:`1.0`. Values outside this range will
32+
return an error. (default: :obj:`None`)
33+
top_p (float, optional): An alternative to sampling with temperature,
34+
called nucleus sampling, where the model considers the results of
35+
the tokens with top_p probability mass. So :obj:`0.1` means only
36+
the tokens comprising the top 10% probability mass are considered.
37+
(default: :obj:`None`)
38+
n (int, optional): How many chat completion choices to generate for
39+
each input message. Only supports value :obj:`1`.
40+
(default: :obj:`None`)
41+
response_format (object, optional): An object specifying the format
42+
that the model must output. Setting to
43+
{"type": "json_object"} enables JSON mode, which guarantees the
44+
message the model generates is valid JSON. Important: when using
45+
JSON mode, you must also instruct the model to produce JSON
46+
yourself via a system or user message. Without this, the model
47+
may generate an unending stream of whitespace until the generation
48+
reaches the token limit, resulting in a long-running and seemingly
49+
"stuck" request. Also note that the message content may be
50+
partially cut off if finish_reason="length", which indicates the
51+
generation exceeded max_tokens or the conversation exceeded the
52+
max context length. (default: :obj:`None`)
53+
stream (bool, optional): If set, partial message deltas will be sent,
54+
like in ChatGPT. Tokens will be sent as data-only server-sent
55+
events as they become available, with the stream terminated by
56+
a data: [DONE] message. (default: :obj:`None`)
57+
stop (str or list, optional): Up to :obj:`4` sequences where the API
58+
will stop generating further tokens. (default: :obj:`None`)
59+
max_tokens (int, optional): The maximum number of tokens to generate
60+
in the chat completion. The total length of input tokens and
61+
generated tokens is limited by the model's context length.
62+
(default: :obj:`None`)
63+
user (str, optional): A unique identifier representing your end-user,
64+
which can help to monitor and detect abuse.
65+
(default: :obj:`None`)
66+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
67+
any) tool is called by the model. :obj:`"none"` means the model
68+
will not call any tool and instead generates a message.
69+
:obj:`"auto"` means the model can pick between generating a
70+
message or calling one or more tools. :obj:`"required"` means the
71+
model must call one or more tools. Specifying a particular tool
72+
via {"type": "function", "function": {"name": "my_function"}}
73+
forces the model to call that tool. :obj:`"none"` is the default
74+
when no tools are present. :obj:`"auto"` is the default if tools
75+
are present.
76+
77+
Note:
78+
Some OpenAI parameters such as presence_penalty, frequency_penalty,
79+
and logit_bias will be ignored by Minimax.
80+
"""
81+
82+
temperature: Optional[float] = None
83+
top_p: Optional[float] = None
84+
n: Optional[int] = None
85+
stream: Optional[bool] = None
86+
stop: Optional[Union[str, Sequence[str]]] = None
87+
max_tokens: Optional[int] = None
88+
response_format: Optional[dict] = None
89+
user: Optional[str] = None
90+
tool_choice: Optional[Union[dict[str, str], str]] = None
91+
92+
93+
MINIMAX_API_PARAMS = {param for param in MinimaxConfig.model_fields.keys()}

camel/models/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
from .internlm_model import InternLMModel
3030
from .litellm_model import LiteLLMModel
3131
from .lmstudio_model import LMStudioModel
32+
from .minimax_model import MinimaxModel
3233
from .mistral_model import MistralModel
3334
from .model_factory import ModelFactory
3435
from .model_manager import ModelManager, ModelProcessingError
@@ -104,6 +105,7 @@
104105
'SiliconFlowModel',
105106
'VolcanoModel',
106107
'LMStudioModel',
108+
'MinimaxModel',
107109
'WatsonXModel',
108110
'QianfanModel',
109111
'CrynuxModel',

camel/models/minimax_model.py

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2+
# Licensed under the Apache License, Version 2.0 (the "License");
3+
# you may not use this file except in compliance with the License.
4+
# You may obtain a copy of the License at
5+
#
6+
# http://www.apache.org/licenses/LICENSE-2.0
7+
#
8+
# Unless required by applicable law or agreed to in writing, software
9+
# distributed under the License is distributed on an "AS IS" BASIS,
10+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11+
# See the License for the specific language governing permissions and
12+
# limitations under the License.
13+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14+
import os
15+
from typing import Any, Dict, Optional, Union
16+
17+
from camel.configs import MinimaxConfig
18+
from camel.models.openai_compatible_model import OpenAICompatibleModel
19+
from camel.types import ModelType
20+
from camel.utils import (
21+
BaseTokenCounter,
22+
api_keys_required,
23+
)
24+
25+
26+
class MinimaxModel(OpenAICompatibleModel):
27+
r"""LLM API served by Minimax in a unified OpenAICompatibleModel
28+
interface.
29+
30+
Args:
31+
model_type (Union[ModelType, str]): Model for which a backend is
32+
created.
33+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34+
that will be fed into:obj:`openai.ChatCompletion.create()`.
35+
If:obj:`None`, :obj:`MinimaxConfig().as_dict()` will be used.
36+
(default: :obj:`None`)
37+
api_key (Optional[str], optional): The API key for authenticating
38+
with the Minimax service. (default: :obj:`None`).
39+
url (Optional[str], optional): The url to the Minimax M2 service.
40+
(default: :obj:`None`)
41+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
42+
use for the model. If not provided, :obj:`OpenAITokenCounter(
43+
ModelType.GPT_4O_MINI)` will be used.
44+
(default: :obj:`None`)
45+
timeout (Optional[float], optional): The timeout value in seconds for
46+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
47+
environment variable or default to 180 seconds.
48+
(default: :obj:`None`)
49+
max_retries (int, optional): Maximum number of retries for API calls.
50+
(default: :obj:`3`)
51+
**kwargs (Any): Additional arguments to pass to the client
52+
initialization.
53+
"""
54+
55+
@api_keys_required([("api_key", "MINIMAX_API_KEY")])
56+
def __init__(
57+
self,
58+
model_type: Union[ModelType, str],
59+
model_config_dict: Optional[Dict[str, Any]] = None,
60+
api_key: Optional[str] = None,
61+
url: Optional[str] = None,
62+
token_counter: Optional[BaseTokenCounter] = None,
63+
timeout: Optional[float] = None,
64+
max_retries: int = 3,
65+
**kwargs: Any,
66+
) -> None:
67+
if model_config_dict is None:
68+
model_config_dict = MinimaxConfig().as_dict()
69+
api_key = api_key or os.environ.get("MINIMAX_API_KEY")
70+
url = url or os.environ.get(
71+
"MINIMAX_API_BASE_URL", "https://api.minimaxi.com/v1"
72+
)
73+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
74+
super().__init__(
75+
model_type=model_type,
76+
model_config_dict=model_config_dict,
77+
api_key=api_key,
78+
url=url,
79+
token_counter=token_counter,
80+
timeout=timeout,
81+
max_retries=max_retries,
82+
**kwargs,
83+
)

camel/models/model_factory.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
from camel.models.internlm_model import InternLMModel
3232
from camel.models.litellm_model import LiteLLMModel
3333
from camel.models.lmstudio_model import LMStudioModel
34+
from camel.models.minimax_model import MinimaxModel
3435
from camel.models.mistral_model import MistralModel
3536
from camel.models.modelscope_model import ModelScopeModel
3637
from camel.models.moonshot_model import MoonshotModel
@@ -91,6 +92,7 @@ class ModelFactory:
9192
ModelPlatformType.COMETAPI: CometAPIModel,
9293
ModelPlatformType.NEBIUS: NebiusModel,
9394
ModelPlatformType.LMSTUDIO: LMStudioModel,
95+
ModelPlatformType.MINIMAX: MinimaxModel,
9496
ModelPlatformType.OPENROUTER: OpenRouterModel,
9597
ModelPlatformType.ZHIPU: ZhipuAIModel,
9698
ModelPlatformType.GEMINI: GeminiModel,

camel/types/enums.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,10 @@ class ModelType(UnifiedModelType, Enum):
502502
CRYNUX_NOUS_HERMES_3_LLAMA_3_1_8B = "NousResearch/Hermes-3-Llama-3.1-8B"
503503
CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B = "NousResearch/Hermes-3-Llama-3.2-3B"
504504

505+
# Minimax M2 models
506+
MINIMAX_M2 = "MiniMax-M2"
507+
MINIMAX_M2_STABLE = "MiniMax-M2-Stable"
508+
505509
def __str__(self):
506510
return self.value
507511

@@ -1554,6 +1558,11 @@ def token_limit(self) -> int:
15541558
ModelType.TOGETHER_LLAMA_4_SCOUT,
15551559
}:
15561560
return 10_000_000
1561+
elif self in {
1562+
ModelType.MINIMAX_M2,
1563+
ModelType.MINIMAX_M2_STABLE,
1564+
}:
1565+
return 32_000
15571566

15581567
else:
15591568
logger.warning(
@@ -1787,6 +1796,7 @@ class ModelPlatformType(Enum):
17871796
QIANFAN = "qianfan"
17881797
CRYNUX = "crynux"
17891798
AIHUBMIX = "aihubmix"
1799+
MINIMAX = "minimax"
17901800

17911801
@classmethod
17921802
def from_name(cls, name):
@@ -1972,6 +1982,11 @@ def is_aihubmix(self) -> bool:
19721982
r"""Returns whether this platform is AihubMix."""
19731983
return self is ModelPlatformType.AIHUBMIX
19741984

1985+
@property
1986+
def is_minimax(self) -> bool:
1987+
r"""Returns whether this platform is Minimax M2."""
1988+
return self is ModelPlatformType.MINIMAX
1989+
19751990

19761991
class AudioModelType(Enum):
19771992
TTS_1 = "tts-1"

camel/types/unified_model_type.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,11 @@ def is_crynux(self) -> bool:
195195
r"""Returns whether the model is a Crynux served model."""
196196
return True
197197

198+
@property
199+
def is_minimax(self) -> bool:
200+
r"""Returns whether the model is a Minimax served model."""
201+
return True
202+
198203
@property
199204
def support_native_structured_output(self) -> bool:
200205
r"""Returns whether the model supports native structured output."""

docs/mintlify/key_modules/models.mdx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ CAMEL supports a wide range of models, including [OpenAI’s GPT series](https:/
5252
| **Reka** | reka-core, reka-flash, reka-edge |
5353
| **COHERE** | command-r-plus, command-r, command-light, command, command-nightly |
5454
| **ERNIE** | ernie-x1-turbo-32k, ernie-x1-32k, ernie-x1-32k-preview<br/>ernie-4.5-turbo-128k, ernie-4.5-turbo-32k<br/>deepseek-v3, deepseek-r1, qwen3-235b-a22b |
55+
| **MiniMax** | MiniMax-M2, MiniMax-M2-Stable |
5556

5657

5758
### API & Connector Platforms

0 commit comments

Comments
 (0)