Skip to content

Fix: Enable Gemini support via GOOGLE_API_KEY fallback and explicit api_key injection #2805

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 45 additions & 9 deletions src/crewai/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,16 @@
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
import logging


load_dotenv()

# Patch: Gemini key fallback
if "GEMINI_API_KEY" not in os.environ and "GOOGLE_API_KEY" in os.environ:
os.environ["GEMINI_API_KEY"] = os.environ["GOOGLE_API_KEY"]
logging.info("[CrewAI Gemini Patch] Set GEMINI_API_KEY from GOOGLE_API_KEY")


class FilteredStream:
def __init__(self, original_stream):
Expand Down Expand Up @@ -246,6 +253,17 @@ class AccumulatedToolArgs(BaseModel):


class LLM(BaseLLM):
"""
LLM class for handling language model interactions via LiteLLM.

Features:
- Supports multiple model providers (e.g., OpenAI, Gemini, Anthropic)
- Automatically uses GOOGLE_API_KEY if GEMINI_API_KEY is not explicitly set
- Injects the resolved API key directly into the LLM completion parameters
- Ensures compatibility with both legacy and AI Studio-style key environments
- Designed for use in CrewAI agent workflows and tool-based LLM interactions
"""

def __init__(
self,
model: str,
Expand Down Expand Up @@ -307,6 +325,15 @@ def __init__(
else:
self.stop = stop

# Fallback logic
if "GEMINI_API_KEY" in os.environ:
api_key = os.environ["GEMINI_API_KEY"]
elif "GOOGLE_API_KEY" in os.environ:
api_key = os.environ["GOOGLE_API_KEY"]
os.environ["GEMINI_API_KEY"] = api_key

self.api_key = api_key

self.set_callbacks(callbacks)
self.set_env_callbacks()

Expand All @@ -326,19 +353,26 @@ def _prepare_completion_params(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Prepare parameters for the completion call.
"""
Prepare parameters for the LLM completion API call.

This method:
- Formats input messages for the model provider
- Accepts optional tool definitions
- Injects API key into the request (fallback to GOOGLE_API_KEY if GEMINI_API_KEY is not set)
- Merges additional keyword arguments passed to support flexibility

Args:
messages: Input messages for the LLM
tools: Optional list of tool schemas
callbacks: Optional list of callback functions
available_functions: Optional dict of available functions
messages (Union[str, List[Dict[str, str]]]): Prompt or structured messages to send to the LLM.
tools (Optional[List[dict]]): Optional tool definitions (for function calling).
**kwargs (Any): Additional optional parameters for the completion call.

Returns:
Dict[str, Any]: Parameters for the completion call
Dict[str, Any]: Final parameters dictionary to be passed to `litellm.completion(...)`.
"""
# --- 1) Format messages according to provider requirements
# Format messages
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
formatted_messages = self._format_messages_for_provider(messages)
Expand Down Expand Up @@ -370,8 +404,10 @@ def _prepare_completion_params(
**self.additional_params,
}

# Remove None values from params
return {k: v for k, v in params.items() if v is not None}
# Remove None values
params = {k: v for k, v in params.items() if v is not None}

return params

def _handle_streaming_response(
self,
Expand Down