Skip to content

Commit 9f18a9c

Browse files
authored
enhance: AWS bedrock and LM studio integration PR 1194, 2193 (#2229)
1 parent 66ebc9e commit 9f18a9c

7 files changed

+55
-29
lines changed

camel/configs/bedrock_config.py

+42-5
Original file line numberDiff line numberDiff line change
@@ -11,26 +11,63 @@
1111
# See the License for the specific language governing permissions and
1212
# limitations under the License.
1313
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14-
from typing import Optional
14+
from typing import Dict, Optional, Union
1515

1616
from camel.configs.base_config import BaseConfig
1717

1818

1919
class BedrockConfig(BaseConfig):
20-
r"""Defines the parameters for generating chat completions using Bedrock
20+
r"""Defines the parameters for generating chat completions using OpenAI
2121
compatibility.
2222
2323
Args:
24-
maxTokens (int, optional): The maximum number of tokens.
25-
temperatue (float, optional): Controls the randomness of the output.
26-
top_p (float, optional): Use nucleus sampling.
24+
max_tokens (int, optional): The maximum number of tokens to generate
25+
in the chat completion. The total length of input tokens and
26+
generated tokens is limited by the model's context length.
27+
(default: :obj:`None`)
28+
temperature (float, optional): Sampling temperature to use, between
29+
:obj:`0` and :obj:`2`. Higher values make the output more random,
30+
while lower values make it more focused and deterministic.
31+
(default: :obj:`None`)
32+
top_p (float, optional): An alternative to sampling with temperature,
33+
called nucleus sampling, where the model considers the results of
34+
the tokens with top_p probability mass. So :obj:`0.1` means only
35+
the tokens comprising the top 10% probability mass are considered.
36+
(default: :obj:`None`)
2737
top_k (int, optional): The number of top tokens to consider.
38+
stream (bool, optional): If True, partial message deltas will be sent
39+
as data-only server-sent events as they become available.
40+
(default: :obj:`None`)
41+
tools (list[FunctionTool], optional): A list of tools the model may
42+
call. Currently, only functions are supported as a tool. Use this
43+
to provide a list of functions the model may generate JSON inputs
44+
for. A max of 128 functions are supported.
45+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
46+
any) tool is called by the model. :obj:`"none"` means the model
47+
will not call any tool and instead generates a message.
48+
:obj:`"auto"` means the model can pick between generating a
49+
message or calling one or more tools. :obj:`"required"` means the
50+
model must call one or more tools. Specifying a particular tool
51+
via {"type": "function", "function": {"name": "my_function"}}
52+
forces the model to call that tool. :obj:`"none"` is the default
53+
when no tools are present. :obj:`"auto"` is the default if tools
54+
are present.
55+
reasoning_effort(str, optional): A parameter specifying the level of
56+
reasoning used by certain model types. Valid values are :obj:
57+
`"low"`, :obj:`"medium"`, or :obj:`"high"`. If set, it is only
58+
applied to the model types that support it (e.g., :obj:`o1`,
59+
:obj:`o1mini`, :obj:`o1preview`, :obj:`o3mini`). If not provided
60+
or if the model type does not support it, this parameter is
61+
ignored. (default: :obj:`None`)
2862
"""
2963

3064
max_tokens: Optional[int] = None
3165
temperature: Optional[float] = None
3266
top_p: Optional[float] = None
3367
top_k: Optional[int] = None
68+
stream: Optional[bool] = None
69+
tool_choice: Optional[Union[Dict[str, str], str]] = None
70+
reasoning_effort: Optional[str] = None
3471

3572

3673
BEDROCK_API_PARAMS = {param for param in BedrockConfig.model_fields.keys()}

camel/configs/lmstudio_config.py

-7
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,6 @@ class LMStudioConfig(BaseConfig):
3232
the tokens with top_p probability mass. So :obj:`0.1` means only
3333
the tokens comprising the top 10% probability mass are considered.
3434
(default: :obj:`None`)
35-
n (int, optional): How many chat completion choices to generate for
36-
each input message. (default: :obj:`None`)
3735
response_format (object, optional): An object specifying the format
3836
that the model must output. Compatible with GPT-4 Turbo and all
3937
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
@@ -66,9 +64,6 @@ class LMStudioConfig(BaseConfig):
6664
existing frequency in the text so far, decreasing the model's
6765
likelihood to repeat the same line verbatim. See more information
6866
about frequency and presence penalties. (default: :obj:`None`)
69-
user (str, optional): A unique identifier representing your end-user,
70-
which can help OpenAI to monitor and detect abuse.
71-
(default: :obj:`None`)
7267
tools (list[FunctionTool], optional): A list of tools the model may
7368
call. Currently, only functions are supported as a tool. Use this
7469
to provide a list of functions the model may generate JSON inputs
@@ -87,14 +82,12 @@ class LMStudioConfig(BaseConfig):
8782

8883
temperature: Optional[float] = None
8984
top_p: Optional[float] = None
90-
n: Optional[int] = None
9185
stream: Optional[bool] = None
9286
stop: Optional[Union[str, Sequence[str]]] = None
9387
max_tokens: Optional[int] = None
9488
presence_penalty: Optional[float] = None
9589
response_format: Optional[dict] = None
9690
frequency_penalty: Optional[float] = None
97-
user: Optional[str] = None
9891
tool_choice: Optional[Union[dict[str, str], str]] = None
9992

10093

camel/models/aws_bedrock_model.py

+1
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@ class AWSBedrockModel(OpenAICompatibleModel):
5050
API calls. If not provided, will fall back to the MODEL_TIMEOUT
5151
environment variable or default to 180 seconds.
5252
(default: :obj:`None`)
53+
5354
References:
5455
https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html
5556
"""

camel/models/lmstudio_model.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,9 @@ class LMStudioModel(OpenAICompatibleModel):
3030
that will be fed into:obj:`openai.ChatCompletion.create()`.
3131
If:obj:`None`, :obj:`LMStudioConfig().as_dict()` will be used.
3232
(default: :obj:`None`)
33-
api_key (Optional[str], optional): The API key for authenticating
34-
with the LMStudio service. (default: :obj:`None`).
33+
api_key (Optional[str], optional): The API key for authenticating with
34+
the model service. LMStudio doesn't need API key, it would be
35+
ignored if set. (default: :obj:`None`)
3536
url (Optional[str], optional): The url to the LMStudio service.
3637
(default: :obj:`None`)
3738
token_counter (Optional[BaseTokenCounter], optional): Token counter to

camel/models/model_factory.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def create(
6767
token_counter: Optional[BaseTokenCounter] = None,
6868
api_key: Optional[str] = None,
6969
url: Optional[str] = None,
70-
timeout: Optional[int] = None,
70+
timeout: Optional[float] = None,
7171
) -> BaseModelBackend:
7272
r"""Creates an instance of `BaseModelBackend` of the specified type.
7373

camel/models/sglang_model.py

+5-3
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,9 @@ def _start_server(self) -> None:
119119
)
120120

121121
server_process = _execute_shell_command(cmd)
122-
_wait_for_server("http://localhost:30000")
122+
_wait_for_server(
123+
base_url="http://localhost:30000", timeout=self._timeout
124+
)
123125
self._url = "http://127.0.0.1:30000/v1"
124126
self.server_process = server_process # type: ignore[assignment]
125127
# Start the inactivity monitor in a background thread
@@ -356,12 +358,12 @@ def _execute_shell_command(command: str) -> subprocess.Popen:
356358
return subprocess.Popen(parts, text=True, stderr=subprocess.STDOUT)
357359

358360

359-
def _wait_for_server(base_url: str, timeout: Optional[int] = 30) -> None:
361+
def _wait_for_server(base_url: str, timeout: Optional[float] = 30) -> None:
360362
r"""Wait for the server to be ready by polling the /v1/models endpoint.
361363
362364
Args:
363365
base_url (str): The base URL of the server
364-
timeout (Optional[int]): Maximum time to wait in seconds.
366+
timeout (Optional[float]): Maximum time to wait in seconds.
365367
(default: :obj:`30`)
366368
"""
367369
import requests

test/toolkits/test_video_analysis_toolkit.py

+3-11
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def test_extract_keyframes_with_scenes(mock_video_toolkit):
274274
):
275275
mock_scene_manager.get_scene_list.return_value = mock_scene_list
276276

277-
result = mock_video_toolkit._extract_keyframes("/path/to/video.mp4", 2)
277+
result = mock_video_toolkit._extract_keyframes("/path/to/video.mp4")
278278

279279
assert len(result) == 2
280280
assert result[0] == mock_frame
@@ -311,7 +311,7 @@ def test_extract_keyframes_no_scenes(mock_video_toolkit):
311311
),
312312
):
313313
# Call the method directly to verify our patch works
314-
frames = mock_video_toolkit._extract_keyframes("/path/to/video.mp4", 2)
314+
frames = mock_video_toolkit._extract_keyframes("/path/to/video.mp4")
315315

316316
# Assert that our patch returns the expected number of frames
317317
assert len(frames) == 2
@@ -355,9 +355,7 @@ def test_extract_keyframes_invalid_num_frames(mock_video_toolkit):
355355
):
356356
mock_scene_manager.get_scene_list.return_value = mock_scene_list
357357

358-
result = mock_video_toolkit._extract_keyframes(
359-
"/path/to/video.mp4", -1
360-
)
358+
result = mock_video_toolkit._extract_keyframes("/path/to/video.mp4")
361359

362360
assert len(result) == 1
363361
assert result[0] == mock_frame
@@ -445,12 +443,6 @@ def test_ask_question_about_video_no_audio(mock_video_toolkit):
445443
assert "No audio transcription available" in call_args.content
446444

447445

448-
def test_ask_question_about_video_empty_question(mock_video_toolkit):
449-
r"""Test asking an empty question"""
450-
with pytest.raises(ValueError):
451-
mock_video_toolkit.ask_question_about_video("/path/to/video.mp4", "")
452-
453-
454446
def test_ask_question_about_video_file_not_found(mock_video_toolkit):
455447
r"""Test handling file not found error"""
456448
with patch("os.path.exists", return_value=False):

0 commit comments

Comments
 (0)