|
17 | 17 | get_vertex_location_from_url, |
18 | 18 | get_vertex_project_id_from_url, |
19 | 19 | set_schema_property_ordering, |
| 20 | + _get_vertex_url |
20 | 21 | ) |
21 | 22 |
|
22 | 23 |
|
@@ -292,3 +293,38 @@ def test_process_items_basic(): |
292 | 293 | } |
293 | 294 | process_items(schema) |
294 | 295 | assert schema["properties"]["nested"]["items"] == {"type": "object"} |
| 296 | + |
| 297 | +def test_get_vertex_url_global_region(stream): |
| 298 | + """ |
| 299 | + Test _get_vertex_url when vertex_location is 'global' for chat mode. |
| 300 | + """ |
| 301 | + mode = "chat" |
| 302 | + model = "gemini-1.5-pro-preview-0409" |
| 303 | + vertex_project = "test-g-project" |
| 304 | + vertex_location = "global" |
| 305 | + vertex_api_version = "v1" |
| 306 | + |
| 307 | + # Mock litellm.VertexGeminiConfig.get_model_for_vertex_ai_url to return model as is |
| 308 | + # as we are not testing that part here, just the URL construction |
| 309 | + with patch("litellm.VertexGeminiConfig.get_model_for_vertex_ai_url", side_effect=lambda model: model): |
| 310 | + url, endpoint = _get_vertex_url( |
| 311 | + mode=mode, |
| 312 | + model=model, |
| 313 | + stream=stream, |
| 314 | + vertex_project=vertex_project, |
| 315 | + vertex_location=vertex_location, |
| 316 | + vertex_api_version=vertex_api_version, |
| 317 | + ) |
| 318 | + |
| 319 | + expected_url_base = f"https://aiplatform.googleapis.com/{vertex_api_version}/projects/{vertex_project}/locations/global/publishers/google/models/{model}" |
| 320 | + |
| 321 | + if stream: |
| 322 | + expected_endpoint = "streamGenerateContent" |
| 323 | + expected_url = f"{expected_url_base}:{expected_endpoint}?alt=sse" |
| 324 | + else: |
| 325 | + expected_endpoint = "generateContent" |
| 326 | + expected_url = f"{expected_url_base}:{expected_endpoint}" |
| 327 | + |
| 328 | + |
| 329 | + assert endpoint == expected_endpoint |
| 330 | + assert url == expected_url |
0 commit comments