diff --git a/README.md b/README.md index bc026e71..c9c61172 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,8 @@ voyager.learn() 3. After the world is created, press `Esc` key and press `Open to LAN`. 4. Select `Allow cheats: ON` and press `Start LAN World`. You will see the bot join the world soon. +Or if you are using dedicated OpenAI API deployments on Azure, you can config your APIs by inferring [this](installation/run_with_azure_api_deployments.md). + # Resume from a checkpoint during learning If you stop the learning process and want to resume from a checkpoint later, you can instantiate Voyager by: diff --git a/installation/run_with_azure_api_deployments.md b/installation/run_with_azure_api_deployments.md new file mode 100644 index 00000000..1be5c89f --- /dev/null +++ b/installation/run_with_azure_api_deployments.md @@ -0,0 +1,52 @@ +# Run With Azure API Deployments + +If you are using dedicated OpenAI API deployments on Azure, you can run Voyager by: + +```python +from voyager import Voyager +from voyager.agents import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig + +# You can also use mc_port instead of azure_login, but azure_login is highly recommended +azure_login = { + "client_id": "YOUR_CLIENT_ID", + "redirect_url": "https://127.0.0.1/auth-response", + "secret_value": "[OPTIONAL] YOUR_SECRET_VALUE", + "version": "fabric-loader-0.14.18-1.19", # the version Voyager is tested on +} +openai_api_key = "YOUR_API_KEY" + +# If you are using OpenAI LLM deployments on Azure, you can config them here +azure_gpt_4_config = AzureChatModelConfig( + openai_api_base="BASE_URL_FOR_AZURE_GPT4_DEPLOYMENT", + openai_api_version="GPT4_API_VERSION", + deployment_name="GPT4_DEPLOYMENT_NAME", + openai_api_type="azure", + openai_api_key="YOUR_AZURE_API_KEY", # Not API keys with prefix "sk-" +) +azure_gpt_35_config = AzureChatModelConfig( + openai_api_base="BASE_URL_FOR_AZURE_GPT35_DEPLOYMENT", + openai_api_version="GPT35_API_VERSION", + deployment_name="GPT35_DEPLOYMENT_NAME", + openai_api_type="azure", + openai_api_key="YOUR_AZURE_API_KEY", # Not API keys with prefix "sk-" +) +azure_openai_embeddings_config = AzureOpenAIEmbeddingsConfig( + openai_api_base="BASE_URL_FOR_AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", + model="MODEL_NAME", # Check https://platform.openai.com/docs/guides/embeddings/embedding-models + openai_api_type="azure", + deployment="YOUR_DEPLOYMENT_NAME", + openai_api_key="YOUR_AZURE_API_KEY", # Not API keys with prefix "sk-" +) + +voyager = Voyager( + azure_login=azure_login, + openai_api_type="azure", + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, + azure_openai_embeddings_config=azure_openai_embeddings_config, +) + +# start lifelong learning +voyager.learn() +``` + diff --git a/voyager/agents/__init__.py b/voyager/agents/__init__.py index 3989162c..d74c9c46 100644 --- a/voyager/agents/__init__.py +++ b/voyager/agents/__init__.py @@ -2,3 +2,4 @@ from .critic import CriticAgent from .curriculum import CurriculumAgent from .skill import SkillManager +from .azure_model_config import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig diff --git a/voyager/agents/action.py b/voyager/agents/action.py index b5b27ac0..d009b855 100644 --- a/voyager/agents/action.py +++ b/voyager/agents/action.py @@ -3,12 +3,13 @@ import voyager.utils as U from javascript import require -from langchain.chat_models import ChatOpenAI from langchain.prompts import SystemMessagePromptTemplate from langchain.schema import AIMessage, HumanMessage, SystemMessage from voyager.prompts import load_prompt from voyager.control_primitives_context import load_control_primitives_context +from voyager.agents.azure_model_config import AzureChatModelConfig +from voyager.agents.get_llm import get_llm class ActionAgent: @@ -21,6 +22,9 @@ def __init__( resume=False, chat_log=True, execution_error=True, + openai_api_type="", + azure_gpt_4_config=AzureChatModelConfig(), + azure_gpt_35_config=AzureChatModelConfig(), ): self.ckpt_dir = ckpt_dir self.chat_log = chat_log @@ -31,10 +35,13 @@ def __init__( self.chest_memory = U.load_json(f"{ckpt_dir}/action/chest_memory.json") else: self.chest_memory = {} - self.llm = ChatOpenAI( + self.llm = get_llm( model_name=model_name, temperature=temperature, - request_timeout=request_timout, + request_timout=request_timout, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, ) def update_chest_memory(self, chests): diff --git a/voyager/agents/azure_model_config.py b/voyager/agents/azure_model_config.py new file mode 100644 index 00000000..49799508 --- /dev/null +++ b/voyager/agents/azure_model_config.py @@ -0,0 +1,19 @@ +from pydantic import BaseModel + + +class AzureChatModelConfig(BaseModel): + """AzureChatOpenAI config profile""" + openai_api_base: str = '' + openai_api_version: str = '' + deployment_name: str = '' + openai_api_type: str = 'azure' + openai_api_key: str = '' + + +class AzureOpenAIEmbeddingsConfig(BaseModel): + """OpenAIEmbeddings config profile""" + openai_api_base: str = '' + model: str = '' + openai_api_type: str = 'azure' + deployment: str = '' + openai_api_key: str = '' diff --git a/voyager/agents/critic.py b/voyager/agents/critic.py index 34639bb8..bf742db0 100644 --- a/voyager/agents/critic.py +++ b/voyager/agents/critic.py @@ -1,6 +1,7 @@ from voyager.prompts import load_prompt from voyager.utils.json_utils import fix_and_parse_json -from langchain.chat_models import ChatOpenAI +from voyager.agents.azure_model_config import AzureChatModelConfig +from voyager.agents.get_llm import get_llm from langchain.schema import HumanMessage, SystemMessage @@ -11,11 +12,17 @@ def __init__( temperature=0, request_timout=120, mode="auto", + openai_api_type="", + azure_gpt_4_config=AzureChatModelConfig(), + azure_gpt_35_config=AzureChatModelConfig(), ): - self.llm = ChatOpenAI( + self.llm = get_llm( model_name=model_name, temperature=temperature, - request_timeout=request_timout, + request_timout=request_timout, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, ) assert mode in ["auto", "manual"] self.mode = mode diff --git a/voyager/agents/curriculum.py b/voyager/agents/curriculum.py index 769c409f..7b56931f 100644 --- a/voyager/agents/curriculum.py +++ b/voyager/agents/curriculum.py @@ -6,7 +6,8 @@ import voyager.utils as U from voyager.prompts import load_prompt from voyager.utils.json_utils import fix_and_parse_json -from langchain.chat_models import ChatOpenAI +from voyager.agents.azure_model_config import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig +from voyager.agents.get_llm import get_llm from langchain.embeddings.openai import OpenAIEmbeddings from langchain.schema import HumanMessage, SystemMessage from langchain.vectorstores import Chroma @@ -25,16 +26,26 @@ def __init__( mode="auto", warm_up=None, core_inventory_items: str | None = None, + openai_api_type="", + azure_gpt_4_config=AzureChatModelConfig(), + azure_gpt_35_config=AzureChatModelConfig(), + azure_openai_embeddings_config=AzureOpenAIEmbeddingsConfig(), ): - self.llm = ChatOpenAI( + self.llm = get_llm( model_name=model_name, temperature=temperature, - request_timeout=request_timout, + request_timout=request_timout, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, ) - self.qa_llm = ChatOpenAI( + self.qa_llm = get_llm( model_name=qa_model_name, temperature=qa_temperature, - request_timeout=request_timout, + request_timout=request_timout, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, ) assert mode in [ "auto", @@ -57,7 +68,7 @@ def __init__( # vectordb for qa cache self.qa_cache_questions_vectordb = Chroma( collection_name="qa_cache_questions_vectordb", - embedding_function=OpenAIEmbeddings(), + embedding_function=OpenAIEmbeddings(**azure_openai_embeddings_config.dict()) if openai_api_type == "azure" else OpenAIEmbeddings(), persist_directory=f"{ckpt_dir}/curriculum/vectordb", ) assert self.qa_cache_questions_vectordb._collection.count() == len( diff --git a/voyager/agents/get_llm.py b/voyager/agents/get_llm.py new file mode 100644 index 00000000..7c0d31f7 --- /dev/null +++ b/voyager/agents/get_llm.py @@ -0,0 +1,27 @@ +from langchain.chat_models import ChatOpenAI, AzureChatOpenAI + +from voyager.agents.azure_model_config import AzureChatModelConfig + + +def get_llm( + model_name: str = "gpt-3.5-turbo", + temperature: float = 0, + request_timout: float = 120, + azure_gpt_4_config: AzureChatModelConfig = AzureChatModelConfig(), + azure_gpt_35_config: AzureChatModelConfig = AzureChatModelConfig(), + openai_api_type: str = "", +) -> ChatOpenAI | AzureChatOpenAI: + if openai_api_type == "azure": + azure_model_config = azure_gpt_4_config if model_name == "gpt-4" else azure_gpt_35_config + llm = AzureChatOpenAI( + temperature=temperature, + **azure_model_config.dict(), + ) + else: + llm = ChatOpenAI( + model_name=model_name, + temperature=temperature, + request_timeout=request_timout, + ) + + return llm diff --git a/voyager/agents/skill.py b/voyager/agents/skill.py index 08499c78..c1c2307e 100644 --- a/voyager/agents/skill.py +++ b/voyager/agents/skill.py @@ -1,7 +1,8 @@ import os import voyager.utils as U -from langchain.chat_models import ChatOpenAI +from voyager.agents.azure_model_config import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig +from voyager.agents.get_llm import get_llm from langchain.embeddings.openai import OpenAIEmbeddings from langchain.schema import HumanMessage, SystemMessage from langchain.vectorstores import Chroma @@ -19,11 +20,18 @@ def __init__( request_timout=120, ckpt_dir="ckpt", resume=False, + openai_api_type="", + azure_gpt_4_config=AzureChatModelConfig(), + azure_gpt_35_config=AzureChatModelConfig(), + azure_openai_embeddings_config=AzureOpenAIEmbeddingsConfig(), ): - self.llm = ChatOpenAI( + self.llm = get_llm( model_name=model_name, temperature=temperature, - request_timeout=request_timout, + request_timout=request_timout, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, ) U.f_mkdir(f"{ckpt_dir}/skill/code") U.f_mkdir(f"{ckpt_dir}/skill/description") @@ -39,7 +47,7 @@ def __init__( self.ckpt_dir = ckpt_dir self.vectordb = Chroma( collection_name="skill_vectordb", - embedding_function=OpenAIEmbeddings(), + embedding_function=OpenAIEmbeddings(**azure_openai_embeddings_config.dict()) if openai_api_type == "azure" else OpenAIEmbeddings(), persist_directory=f"{ckpt_dir}/skill/vectordb", ) assert self.vectordb._collection.count() == len(self.skills), ( diff --git a/voyager/voyager.py b/voyager/voyager.py index 61a0c207..1a29ecc5 100644 --- a/voyager/voyager.py +++ b/voyager/voyager.py @@ -11,6 +11,7 @@ from .agents import CriticAgent from .agents import CurriculumAgent from .agents import SkillManager +from .agents import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig # TODO: remove event memory @@ -48,6 +49,10 @@ def __init__( ckpt_dir: str = "ckpt", skill_library_dir: str = None, resume: bool = False, + openai_api_type="", + azure_gpt_4_config=AzureChatModelConfig(), + azure_gpt_35_config=AzureChatModelConfig(), + azure_openai_embeddings_config=AzureOpenAIEmbeddingsConfig(), ): """ The main class for Voyager. @@ -123,6 +128,9 @@ def __init__( resume=resume, chat_log=action_agent_show_chat_log, execution_error=action_agent_show_execution_error, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, ) self.action_agent_task_max_retries = action_agent_task_max_retries self.curriculum_agent = CurriculumAgent( @@ -136,12 +144,19 @@ def __init__( mode=curriculum_agent_mode, warm_up=curriculum_agent_warm_up, core_inventory_items=curriculum_agent_core_inventory_items, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, + azure_openai_embeddings_config=azure_openai_embeddings_config, ) self.critic_agent = CriticAgent( model_name=critic_agent_model_name, temperature=critic_agent_temperature, request_timout=openai_api_request_timeout, mode=critic_agent_mode, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, ) self.skill_manager = SkillManager( model_name=skill_manager_model_name, @@ -150,6 +165,10 @@ def __init__( request_timout=openai_api_request_timeout, ckpt_dir=skill_library_dir if skill_library_dir else ckpt_dir, resume=True if resume or skill_library_dir else False, + openai_api_type=openai_api_type, + azure_gpt_4_config=azure_gpt_4_config, + azure_gpt_35_config=azure_gpt_35_config, + azure_openai_embeddings_config=azure_openai_embeddings_config, ) self.recorder = U.EventRecorder(ckpt_dir=ckpt_dir, resume=resume) self.resume = resume