Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 18 additions & 10 deletions deepeval/config/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,9 +250,7 @@ def __setattr__(self, name: str, value):
return super().__setattr__(name, value)

model_config = SettingsConfigDict(
extra="ignore",
case_sensitive=True,
validate_assignment=True,
extra="ignore", case_sensitive=True, validate_assignment=True,
)

#
Expand All @@ -272,8 +270,7 @@ def __setattr__(self, name: str, value):
description="Extra PYTHONPATH used by the CLI runner (default: current project '.').",
)
CONFIDENT_REGION: Optional[str] = Field(
None,
description="Optional Confident AI region hint (uppercased).",
None, description="Optional Confident AI region hint (uppercased).",
)
CONFIDENT_OPEN_BROWSER: Optional[bool] = Field(
True,
Expand Down Expand Up @@ -361,8 +358,7 @@ def __setattr__(self, name: str, value):
description="CUDA debug toggle (forces synchronous CUDA ops). Useful for debugging GPU errors.",
)
CUDA_VISIBLE_DEVICES: Optional[str] = Field(
None,
description="CUDA device visibility mask (e.g. '0' or '0,1').",
None, description="CUDA device visibility mask (e.g. '0' or '0,1').",
)
TOKENIZERS_PARALLELISM: Optional[bool] = Field(
None,
Expand Down Expand Up @@ -529,6 +525,19 @@ def __setattr__(self, name: str, value):
GROK_COST_PER_OUTPUT_TOKEN: Optional[float] = Field(
None, description="Grok output token cost (used for cost reporting)."
)
# Groq
USE_GROQ_MODEL: Optional[bool] = Field(
None, description="Select Groq as the active LLM provider."
)
GROQ_API_KEY: Optional[SecretStr] = Field(None, description="Groq API key.")
GROQ_MODEL_NAME: Optional[str] = Field(None, description="Groq model name.")
GROQ_COST_PER_INPUT_TOKEN: Optional[float] = Field(
None, description="Groq input token cost (used for cost reporting)."
)
GROQ_COST_PER_OUTPUT_TOKEN: Optional[float] = Field(
None, description="Groq output token cost (used for cost reporting)."
)

# LiteLLM
USE_LITELLM: Optional[bool] = Field(
None, description="Select LiteLLM as the active LLM provider."
Expand Down Expand Up @@ -996,6 +1005,7 @@ def DEEPEVAL_TASK_GATHER_BUFFER_SECONDS(self) -> float:
"USE_GEMINI_MODEL",
"USE_MOONSHOT_MODEL",
"USE_GROK_MODEL",
"USE_GROQ_MODEL",
"USE_DEEPSEEK_MODEL",
"USE_LITELLM",
"USE_AZURE_OPENAI_EMBEDDING",
Expand Down Expand Up @@ -1396,9 +1406,7 @@ def __exit__(self, exc_type, exc, tb):
updates = {k: after[k] for k in changed_keys}

if "LOG_LEVEL" in updates:
from deepeval.config.logging import (
apply_deepeval_log_level,
)
from deepeval.config.logging import apply_deepeval_log_level

apply_deepeval_log_level()

Expand Down
1 change: 1 addition & 0 deletions deepeval/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ class ProviderSlug(str, Enum):
LOCAL = "local"
OLLAMA = "ollama"
OPENROUTER = "openrouter"
GROQ = "groq"


def slugify(value: Union[str, ProviderSlug]) -> str:
Expand Down
2 changes: 2 additions & 0 deletions deepeval/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
LiteLLMModel,
KimiModel,
GrokModel,
GroqModel,
DeepSeekModel,
PortkeyModel,
OpenRouterModel,
Expand All @@ -39,6 +40,7 @@
"LiteLLMModel",
"KimiModel",
"GrokModel",
"GroqModel",
"DeepSeekModel",
"OpenAIEmbeddingModel",
"AzureOpenAIEmbeddingModel",
Expand Down
2 changes: 2 additions & 0 deletions deepeval/models/llms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from .deepseek_model import DeepSeekModel
from .portkey_model import PortkeyModel
from .openrouter_model import OpenRouterModel
from .groq_model import GroqModel

__all__ = [
"AzureOpenAIModel",
Expand All @@ -26,4 +27,5 @@
"DeepSeekModel",
"PortkeyModel",
"OpenRouterModel",
"GroqModel",
]
30 changes: 30 additions & 0 deletions deepeval/models/llms/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from deepeval.models.base_model import DeepEvalModelData


DEFAULT_GPT_MODEL = "gpt-5.4"
# OpenRouter uses provider/model format (e.g., "openai/gpt-4", "anthropic/claude-3-opus")
# DeepEval does not validate OpenRouter model strings.
Expand Down Expand Up @@ -2172,3 +2173,32 @@ def make_model_data(**kwargs: Any) -> ModelDataFactory:
),
}
)

GROQ_MODELS_DATA = ModelDataRegistry(
{
"llama3-8b-8192": make_model_data(
supports_log_probs=False,
supports_multimodal=False,
supports_structured_outputs=True,
supports_json=True,
input_price=0.05 / 1e6,
output_price=0.08 / 1e6,
),
"llama3-70b-8192": make_model_data(
supports_log_probs=False,
supports_multimodal=False,
supports_structured_outputs=True,
supports_json=True,
input_price=0.59 / 1e6,
output_price=0.79 / 1e6,
),
"mixtral-8x7b-32768": make_model_data(
supports_log_probs=False,
supports_multimodal=False,
supports_structured_outputs=True,
supports_json=True,
input_price=0.24 / 1e6,
output_price=0.24 / 1e6,
),
}
)
Loading
Loading