Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 45 additions & 0 deletions mem0/configs/embeddings/novita.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from typing import Optional

from mem0.configs.embeddings.base import BaseEmbedderConfig


class NovitaEmbeddingConfig(BaseEmbedderConfig):
"""
Configuration class for Novita-specific embedding parameters.
Inherits from BaseEmbedderConfig and adds Novita-specific settings.
"""

def __init__(
self,
# Base parameters
model: Optional[str] = None,
api_key: Optional[str] = None,
embedding_dims: Optional[int] = None,
# OpenAI-compatible base URL option
openai_base_url: Optional[str] = None,
# Novita-specific parameters
novita_base_url: Optional[str] = None,
http_client_proxies: Optional[str] = None,
):
"""
Initialize Novita embedding configuration.

Args:
model: Novita embedding model to use, defaults to None
api_key: Novita API key, defaults to None
embedding_dims: Number of embedding dimensions, defaults to None
openai_base_url: OpenAI-compatible base URL, defaults to None
novita_base_url: Novita-specific base URL, defaults to None
http_client_proxies: HTTP client proxy settings, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
api_key=api_key,
embedding_dims=embedding_dims,
openai_base_url=openai_base_url,
http_client_proxies=http_client_proxies,
)

# Novita-specific parameters
self.novita_base_url = novita_base_url
56 changes: 56 additions & 0 deletions mem0/configs/llms/novita.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from typing import Optional

from mem0.configs.llms.base import BaseLlmConfig


class NovitaConfig(BaseLlmConfig):
"""
Configuration class for Novita-specific parameters.
Inherits from BaseLlmConfig and adds Novita-specific settings.
"""

def __init__(
self,
# Base parameters
model: Optional[str] = None,
temperature: float = 0.1,
api_key: Optional[str] = None,
max_tokens: int = 2000,
top_p: float = 0.1,
top_k: int = 1,
enable_vision: bool = False,
vision_details: Optional[str] = "auto",
http_client_proxies: Optional[dict] = None,
# Novita-specific parameters
novita_base_url: Optional[str] = None,
):
"""
Initialize Novita configuration.

Args:
model: Novita model to use, defaults to None
temperature: Controls randomness, defaults to 0.1
api_key: Novita API key, defaults to None
max_tokens: Maximum tokens to generate, defaults to 2000
top_p: Nucleus sampling parameter, defaults to 0.1
top_k: Top-k sampling parameter, defaults to 1
enable_vision: Enable vision capabilities, defaults to False
vision_details: Vision detail level, defaults to "auto"
http_client_proxies: HTTP client proxy settings, defaults to None
novita_base_url: Novita API base URL, defaults to None
"""
# Initialize base parameters
super().__init__(
model=model,
temperature=temperature,
api_key=api_key,
max_tokens=max_tokens,
top_p=top_p,
top_k=top_k,
enable_vision=enable_vision,
vision_details=vision_details,
http_client_proxies=http_client_proxies,
)

# Novita-specific parameters
self.novita_base_url = novita_base_url
49 changes: 49 additions & 0 deletions mem0/embeddings/novita.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import os
import warnings
from typing import Literal, Optional

from openai import OpenAI

from mem0.configs.embeddings.base import BaseEmbedderConfig
from mem0.embeddings.base import EmbeddingBase


class NovitaEmbedding(EmbeddingBase):
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
super().__init__(config)

self.config.model = self.config.model or "nomic-embed-text"
self.config.embedding_dims = self.config.embedding_dims or 768

api_key = self.config.api_key or os.getenv("NOVITA_API_KEY")
base_url = (
self.config.novita_base_url
or os.getenv("NOVITA_API_BASE")
or os.getenv("NOVITA_API_URL")
or "https://api.novita.ai/openai"
)
if os.environ.get("NOVITA_API_BASE"):
warnings.warn(
"The environment variable 'NOVITA_API_BASE' is deprecated and will be removed in a future version. "
"Please use 'NOVITA_API_URL' instead.",
DeprecationWarning,
)

self.client = OpenAI(api_key=api_key, base_url=base_url)

def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
"""
Get the embedding for the given text using Novita.

Args:
text (str): The text to embed.
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
Returns:
list: The embedding vector.
"""
text = text.replace("\n", " ")
return (
self.client.embeddings.create(input=[text], model=self.config.model, dimensions=self.config.embedding_dims)
.data[0]
.embedding
)
107 changes: 107 additions & 0 deletions mem0/llms/novita.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import json
import os
from typing import Dict, List, Optional, Union

from openai import OpenAI

from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.novita import NovitaConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json


class NovitaLLM(LLMBase):
def __init__(self, config: Optional[Union[BaseLlmConfig, NovitaConfig, Dict]] = None):
# Convert to NovitaConfig if needed
if config is None:
config = NovitaConfig()
elif isinstance(config, dict):
config = NovitaConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, NovitaConfig):
# Convert BaseLlmConfig to NovitaConfig
config = NovitaConfig(
model=config.model,
temperature=config.temperature,
api_key=config.api_key,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=config.enable_vision,
vision_details=config.vision_details,
http_client_proxies=config.http_client,
)

super().__init__(config)

if not self.config.model:
self.config.model = "gpt-4.1-nano-2025-04-14"

api_key = self.config.api_key or os.getenv("NOVITA_API_KEY")
base_url = self.config.novita_base_url or os.getenv("NOVITA_API_BASE") or "https://api.novita.ai/openai"
self.client = OpenAI(api_key=api_key, base_url=base_url)

def _parse_response(self, response, tools):
"""
Process the response based on whether tools are used or not.

Args:
response: The raw response from API.
tools: The list of tools provided in the request.

Returns:
str or dict: The processed response.
"""
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}

if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)

return processed_response
else:
return response.choices[0].message.content

def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
"""
Generate a response based on the given messages using Novita.

Args:
messages (list): List of message dicts containing 'role' and 'content'.
response_format (str or object, optional): Format of the response. Defaults to "text".
tools (list, optional): List of tools that the model can call. Defaults to None.
tool_choice (str, optional): Tool choice method. Defaults to "auto".
**kwargs: Additional Novita-specific parameters.

Returns:
str: The generated response.
"""
params = self._get_supported_params(messages=messages, **kwargs)
params.update(
{
"model": self.config.model,
"messages": messages,
}
)

if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice

response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools)
8 changes: 7 additions & 1 deletion mem0/utils/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from mem0.configs.llms.deepseek import DeepSeekConfig
from mem0.configs.llms.lmstudio import LMStudioConfig
from mem0.configs.llms.ollama import OllamaConfig
from mem0.configs.llms.novita import NovitaConfig
from mem0.configs.llms.openai import OpenAIConfig
from mem0.configs.llms.vllm import VllmConfig
from mem0.configs.rerankers.base import BaseRerankerConfig
Expand Down Expand Up @@ -45,6 +46,7 @@ class LlmFactory:
"azure_openai_structured": ("mem0.llms.azure_openai_structured.AzureOpenAIStructuredLLM", AzureOpenAIConfig),
"gemini": ("mem0.llms.gemini.GeminiLLM", BaseLlmConfig),
"deepseek": ("mem0.llms.deepseek.DeepSeekLLM", DeepSeekConfig),
"novita": ("mem0.llms.novita.NovitaLLM", NovitaConfig),
"xai": ("mem0.llms.xai.XAILLM", BaseLlmConfig),
"sarvam": ("mem0.llms.sarvam.SarvamLLM", BaseLlmConfig),
"lmstudio": ("mem0.llms.lmstudio.LMStudioLLM", LMStudioConfig),
Expand Down Expand Up @@ -140,6 +142,7 @@ class EmbedderFactory:
"huggingface": "mem0.embeddings.huggingface.HuggingFaceEmbedding",
"azure_openai": "mem0.embeddings.azure_openai.AzureOpenAIEmbedding",
"gemini": "mem0.embeddings.gemini.GoogleGenAIEmbedding",
"novita": "mem0.embeddings.novita.NovitaEmbedding",
"vertexai": "mem0.embeddings.vertexai.VertexAIEmbedding",
"together": "mem0.embeddings.together.TogetherEmbedding",
"lmstudio": "mem0.embeddings.lmstudio.LMStudioEmbedding",
Expand Down Expand Up @@ -238,7 +241,10 @@ class RerankerFactory:
# Provider mappings with their config classes
provider_to_class = {
"cohere": ("mem0.reranker.cohere_reranker.CohereReranker", CohereRerankerConfig),
"sentence_transformer": ("mem0.reranker.sentence_transformer_reranker.SentenceTransformerReranker", SentenceTransformerRerankerConfig),
"sentence_transformer": (
"mem0.reranker.sentence_transformer_reranker.SentenceTransformerReranker",
SentenceTransformerRerankerConfig,
),
"zero_entropy": ("mem0.reranker.zero_entropy_reranker.ZeroEntropyReranker", ZeroEntropyRerankerConfig),
"llm_reranker": ("mem0.reranker.llm_reranker.LLMReranker", LLMRerankerConfig),
"huggingface": ("mem0.reranker.huggingface_reranker.HuggingFaceReranker", HuggingFaceRerankerConfig),
Expand Down