Skip to content

Commit 6b5c90e

Browse files
committed
fix: updated docs to showcase how we can do parallel tool calling with gemini
1 parent 71f5b76 commit 6b5c90e

File tree

3 files changed

+84
-40
lines changed

3 files changed

+84
-40
lines changed

docs/concepts/parallel.md

Lines changed: 77 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -5,58 +5,101 @@ description: Learn about OpenAI's experimental parallel function calling to redu
55

66
# Parallel Tools
77

8-
One of the latest capabilities that OpenAI has recently introduced is parallel function calling.
9-
To learn more you can read up on [this](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
8+
Parallel Tool Calling is a feature that allows you to call multiple functions in a single request. This makes it faster to get a response from the language model, especially if your tool calls are independent of each other.
109

1110
!!! warning "Experimental Feature"
1211

13-
This feature is currently in preview and is subject to change. only supported by the `gpt-4-turbo-preview` model.
12+
Parallel tool calling is only supported by Gemini and OpenAI at the moment
1413

1514
## Understanding Parallel Function Calling
1615

1716
By using parallel function callings that allow you to call multiple functions in a single request, you can significantly reduce the latency of your application without having to use tricks with now one builds a schema.
1817

19-
```python hl_lines="19 31"
20-
from __future__ import annotations
18+
=== "OpenAI"
2119

22-
import openai
23-
import instructor
20+
```python hl_lines="19 31"
21+
from __future__ import annotations
2422

25-
from typing import Iterable, Literal
26-
from pydantic import BaseModel
23+
import openai
24+
import instructor
2725

26+
from typing import Iterable, Literal
27+
from pydantic import BaseModel
2828

29-
class Weather(BaseModel):
30-
location: str
31-
units: Literal["imperial", "metric"]
3229

30+
class Weather(BaseModel):
31+
location: str
32+
units: Literal["imperial", "metric"]
3333

34-
class GoogleSearch(BaseModel):
35-
query: str
3634

35+
class GoogleSearch(BaseModel):
36+
query: str
3737

38-
client = instructor.from_openai(
39-
openai.OpenAI(), mode=instructor.Mode.PARALLEL_TOOLS
40-
) # (1)!
4138

42-
function_calls = client.chat.completions.create(
43-
model="gpt-4-turbo-preview",
44-
messages=[
45-
{"role": "system", "content": "You must always use tools"},
46-
{
47-
"role": "user",
48-
"content": "What is the weather in toronto and dallas and who won the super bowl?",
49-
},
50-
],
51-
response_model=Iterable[Weather | GoogleSearch], # (2)!
52-
)
39+
client = instructor.from_openai(
40+
openai.OpenAI(), mode=instructor.Mode.PARALLEL_TOOLS
41+
) # (1)!
5342

54-
for fc in function_calls:
55-
print(fc)
56-
#> location='Toronto' units='metric'
57-
#> location='Dallas' units='imperial'
58-
#> query='who won the super bowl'
59-
```
43+
function_calls = client.chat.completions.create(
44+
model="gpt-4o-mini",
45+
messages=[
46+
{"role": "system", "content": "You must always use tools"},
47+
{
48+
"role": "user",
49+
"content": "What is the weather in toronto and dallas and who won the super bowl?",
50+
},
51+
],
52+
response_model=Iterable[Weather | GoogleSearch], # (2)!
53+
)
54+
55+
for fc in function_calls:
56+
print(fc)
57+
#> location='Toronto' units='metric'
58+
#> location='Dallas' units='imperial'
59+
#> query='who won the super bowl'
60+
```
61+
62+
=== "Vertex AI"
63+
64+
```python
65+
import instructor
66+
import vertexai
67+
from vertexai.generative_models import GenerativeModel
68+
from typing import Iterable, Literal
69+
from pydantic import BaseModel
70+
71+
vertexai.init()
72+
73+
class Weather(BaseModel):
74+
location: str
75+
units: Literal["imperial", "metric"]
76+
77+
78+
class GoogleSearch(BaseModel):
79+
query: str
80+
81+
82+
client = instructor.from_vertexai(
83+
GenerativeModel("gemini-1.5-pro-preview-0409"),
84+
mode=instructor.Mode.VERTEXAI_PARALLEL_TOOLS
85+
) # (1)!
86+
87+
function_calls = client.create(
88+
messages=[
89+
{
90+
"role": "user",
91+
"content": "What is the weather in toronto and dallas and who won the super bowl?",
92+
},
93+
],
94+
response_model=Iterable[Weather | GoogleSearch], # (2)!
95+
)
96+
97+
for fc in function_calls:
98+
print(fc)
99+
#> location='Toronto' units='metric'
100+
#> location='Dallas' units='imperial'
101+
#> query='who won the super bowl'
102+
```
60103

61104
1. Set the mode to `PARALLEL_TOOLS` to enable parallel function calling.
62105
2. Set the response model to `Iterable[Weather | GoogleSearch]` to indicate that the response will be a list of `Weather` and `GoogleSearch` objects. This is necessary because the response will be a list of objects, and we need to specify the types of the objects in the list.

instructor/dsl/parallel.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,9 @@ def from_response(
5454
validation_context: Optional[Any] = None,
5555
strict: Optional[bool] = None,
5656
) -> Generator[BaseModel, None, None]:
57-
assert mode == Mode.VERTEXAI_PARALLEL_TOOLS, "Mode must be VERTEXAI_PARALLEL_TOOLS"
57+
assert (
58+
mode == Mode.VERTEXAI_PARALLEL_TOOLS
59+
), "Mode must be VERTEXAI_PARALLEL_TOOLS"
5860

5961
if not response or not response.candidates:
6062
return
@@ -64,9 +66,7 @@ def from_response(
6466
continue
6567

6668
for part in candidate.content.parts:
67-
if (hasattr(part, 'function_call') and
68-
part.function_call is not None):
69-
69+
if hasattr(part, "function_call") and part.function_call is not None:
7070
name = part.function_call.name
7171
arguments = part.function_call.args
7272

@@ -116,6 +116,7 @@ def ParallelModel(typehint: type[Iterable[T]]) -> ParallelBase:
116116
the_types = get_types_array(typehint)
117117
return ParallelBase(*[model for model in the_types])
118118

119+
119120
def VertexAIParallelModel(typehint: type[Iterable[T]]) -> VertexAIParallelBase:
120121
the_types = get_types_array(typehint)
121122
return VertexAIParallelBase(*[model for model in the_types])

tests/llm/test_vertexai/test_modes.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ class Item(BaseModel):
1313

1414

1515
class Order(BaseModel):
16-
items: list[Item] = Field(..., default_factory=list)
16+
items: list[Item]
1717
customer: str
1818

1919

@@ -54,7 +54,7 @@ class Book(BaseModel):
5454

5555

5656
class LibraryRecord(BaseModel):
57-
books: list[Book] = Field(..., default_factory=list)
57+
books: list[Book]
5858
visitor: str
5959
library_id: str
6060

0 commit comments

Comments
 (0)