Skip to content

Commit c0b09fa

Browse files
authored
Merge branch 'confident-ai:main' into main
2 parents ee4320d + 7bf6aa5 commit c0b09fa

6 files changed

Lines changed: 14 additions & 14 deletions

File tree

CITATION.cff

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ authors:
66
- family-names: Vongthongsri
77
given-names: Kritin
88
title: deepeval
9-
version: 3.0.1
9+
version: 3.0.2
1010
date-released: "2025-05-28"
1111
url: https://confident-ai.com
1212
repository-code: https://github.com/confident-ai/deepeval

deepeval/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__: str = "3.0.1"
1+
__version__: str = "3.0.2"

deepeval/cli/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from enum import Enum
1010
from deepeval.key_handler import KEY_FILE_HANDLER, KeyValues
1111
from deepeval.cli.recommend import app as recommend_app
12-
from deepeval.telemetry import capture_login_event, get_logged_in_with
12+
from deepeval.telemetry import capture_login_event
1313
from deepeval.cli.test import app as test_app
1414
from deepeval.cli.server import start_server
1515

deepeval/models/llms/azure_model.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,8 @@
2727
class AzureOpenAIModel(DeepEvalBaseLLM):
2828
def __init__(
2929
self,
30-
model_name: Optional[str] = None,
3130
deployment_name: Optional[str] = None,
31+
model_name: Optional[str] = None,
3232
azure_openai_api_key: Optional[str] = None,
3333
openai_api_version: Optional[str] = None,
3434
azure_endpoint: Optional[str] = None,
@@ -79,7 +79,7 @@ def generate(
7979
if schema:
8080
if self.model_name in structured_outputs_models:
8181
completion = client.beta.chat.completions.parse(
82-
model=self.model_name,
82+
model=self.deployment_name,
8383
messages=[
8484
{"role": "user", "content": prompt},
8585
],
@@ -96,7 +96,7 @@ def generate(
9696
return structured_output, cost
9797
if self.model_name in json_mode_models:
9898
completion = client.beta.chat.completions.parse(
99-
model=self.model_name,
99+
model=self.deployment_name,
100100
messages=[
101101
{"role": "user", "content": prompt},
102102
],
@@ -113,7 +113,7 @@ def generate(
113113
return schema.model_validate(json_output), cost
114114

115115
completion = client.chat.completions.create(
116-
model=self.model_name,
116+
model=self.deployment_name,
117117
messages=[
118118
{"role": "user", "content": prompt},
119119
],
@@ -142,7 +142,7 @@ async def a_generate(
142142
if schema:
143143
if self.model_name in structured_outputs_models:
144144
completion = await client.beta.chat.completions.parse(
145-
model=self.model_name,
145+
model=self.deployment_name,
146146
messages=[
147147
{"role": "user", "content": prompt},
148148
],
@@ -159,7 +159,7 @@ async def a_generate(
159159
return structured_output, cost
160160
if self.model_name in json_mode_models:
161161
completion = await client.beta.chat.completions.parse(
162-
model=self.model_name,
162+
model=self.deployment_name,
163163
messages=[
164164
{"role": "user", "content": prompt},
165165
],
@@ -176,7 +176,7 @@ async def a_generate(
176176
return schema.model_validate(json_output), cost
177177

178178
completion = await client.chat.completions.create(
179-
model=self.model_name,
179+
model=self.deployment_name,
180180
messages=[
181181
{"role": "user", "content": prompt},
182182
],
@@ -210,7 +210,7 @@ def generate_raw_response(
210210
# Generate completion
211211
client = self.load_model(async_mode=False)
212212
completion = client.chat.completions.create(
213-
model=self.model_name,
213+
model=self.deployment_name,
214214
messages=[{"role": "user", "content": prompt}],
215215
temperature=self.temperature,
216216
logprobs=True,
@@ -236,7 +236,7 @@ async def a_generate_raw_response(
236236
# Generate completion
237237
client = self.load_model(async_mode=True)
238238
completion = await client.chat.completions.create(
239-
model=self.model_name,
239+
model=self.deployment_name,
240240
messages=[{"role": "user", "content": prompt}],
241241
temperature=self.temperature,
242242
logprobs=True,

docs/docs/evaluation-end-to-end-llm-evals.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ There are **TWO** mandatory and **ONE** optional parameter when calling the `ass
175175

176176
:::tip
177177

178-
If you're logged into Confident AI, you'll also receive a fully sharable [LLM testing report](https://documentation.confident-ai.com/llm-evaluation/testing-reports) on the cloud. Run this in the CLI:
178+
If you're logged into Confident AI, you'll also receive a fully sharable [LLM testing report](https://documentation.confident-ai.com/llm-evaluation/evaluation-features/testing-reports) on the cloud. Run this in the CLI:
179179

180180
```bash
181181
deepeval login

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "deepeval"
3-
version = "3.0.1"
3+
version = "3.0.2"
44
description = "The LLM Evaluation Framework"
55
authors = ["Jeffrey Ip <jeffreyip@confident-ai.com>"]
66
license = "Apache-2.0"

0 commit comments

Comments
 (0)