From f5f87d12ede70c4deae31c6beb02d0b090b1c4ff Mon Sep 17 00:00:00 2001 From: kong <26889548+k997@users.noreply.github.com> Date: Sun, 6 Aug 2023 08:36:31 +0800 Subject: [PATCH] [Feature] Run different projects with docker-compose profiles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit introduces the usage of Docker Compose profiles to run different projects with varying functionalities. By utilizing the profiles feature, we can now manage and switch between different project configurations easily. Changes made in this commit include: - Added Docker Compose profiles for organizing project configurations - Updated the README file to document the usage of profiles - Modified the docker-compose.yml file to include multiple profile definitions 这次提交引入了使用 Docker Compose 的 profile 功能来运行具有不同功能的项目。通过利用 profile 功能,我们现在可以轻松地管理和切换不同的项目配置。 本次提交的主要更改包括: - 添加了 Docker Compose 的 profile,用于组织项目配置 - 更新了 README 文件,以记录 profile 的使用方法 - 修改了 docker-compose.yml 文件,包含多个 profile 定义 --- README.md | 8 +- docker-compose.yml | 183 ++++++++++++++++++++++----------------------- 2 files changed, 92 insertions(+), 99 deletions(-) diff --git a/README.md b/README.md index 8fac50e7f..b618d0f40 100644 --- a/README.md +++ b/README.md @@ -147,7 +147,7 @@ python main.py ### 安装方法II:使用Docker -1. 仅ChatGPT(推荐大多数人选择,等价于docker-compose方案1) +1. 仅ChatGPT(推荐大多数人选择,等价于docker-compose `nolocal` 方案) [![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) [![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml) @@ -169,16 +169,14 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以 [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) ``` sh -# 修改docker-compose.yml,保留方案2并删除其他方案。修改docker-compose.yml中方案2的配置,参考其中注释即可 -docker-compose up +docker-compose --profile chatglm up ``` 3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉Docker) [![jittorllms](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml) ``` sh -# 修改docker-compose.yml,保留方案3并删除其他方案。修改docker-compose.yml中方案3的配置,参考其中注释即可 -docker-compose up +docker-compose --profile rwkv up ``` diff --git a/docker-compose.yml b/docker-compose.yml index 874bdc216..fd484cccf 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,23 +1,49 @@ -#【请修改完参数后,删除此行】请在以下方案中选择一种,然后删除其他的方案,最后docker-compose up运行 | Please choose from one of these options below, delete other options as well as This Line +# 请在以下方案中选择一种,根据需要修改 `x-environment` 中的环境变量,运行 `docker compose --profile up` +# Please choose one of the following options and modify the environment variables in `x-environment` as needed, then run `docker compose --profile up`. +# +# Profile options: [ nolocal, chatglm, rwkv, latex, audio ] +# +# 1. nolocal: 仅 Chatgpt ,newbing 类远程服务 +# 2. chatglm: ChatGLM 本地模型 +# 3. rwkv: ChatGPT + LLAMA + 盘古 + RWKV本地模型 +# 4. latex: ChatGPT + Latex +# 5. audio: ChatGPT + 语音助手 (请先阅读 docs/use_audio.md) + +x-environment: &env + API_KEY: 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + USE_PROXY: 'True' + proxies: '{ "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", }' + WEB_PORT: '22303' + ADD_WAIFU: 'True' + THEME: 'Chuanhu-Small-and-Beautiful' + + ENABLE_AUDIO: 'False' + ALIYUN_APPKEY: 'RoP1ZrM84DnAFkZK' + ALIYUN_TOKEN: 'f37f30e0f9934c34a992f6f64f7eba4f' + # (无需填写) ALIYUN_ACCESSKEY: 'LTAI5q6BrFUzoRXVGUWnekh1' + # (无需填写) ALIYUN_SECRET: 'eHmI20AVWIaQZ0CiTD2bGQVsaP9i68' + # DEFAULT_WORKER_NUM: '10' + # AUTHENTICATION: '[("username", "passwd"), ("username2", "passwd2")]' + + +# 显卡的使用,nvidia0指第0个GPU +x-devices: &gpu + - /dev/nvidia0:/dev/nvidia0 -## =================================================== -## 【方案一】 如果不需要运行本地模型(仅chatgpt,newbing类远程服务) -## =================================================== version: '3' services: + ## =================================================== + ## 【方案一】 如果不需要运行本地模型(仅chatgpt,newbing类远程服务) + ## =================================================== gpt_academic_nolocalllms: image: ghcr.io/binary-husky/gpt_academic_nolocal:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal) + profiles: + - nolocal environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "newbing"] ' - WEB_PORT: ' 22303 ' - ADD_WAIFU: ' True ' - # DEFAULT_WORKER_NUM: ' 10 ' - # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' + LLM_MODEL: 'gpt-3.5-turbo' + AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "newbing"]' + + <<: *env # 与宿主的网络融合 network_mode: "host" @@ -26,62 +52,47 @@ services: command: > bash -c "python3 -u main.py" + ### =================================================== + ### 【方案二】 如果需要运行ChatGLM本地模型 + ### =================================================== -### =================================================== -### 【方案二】 如果需要运行ChatGLM本地模型 -### =================================================== -version: '3' -services: gpt_academic_with_chatglm: - image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master # (Auto Built by Dockerfile: docs/Dockerfile+ChatGLM) + image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master # (Auto Built by Dockerfile: docs/Dockerfile+ChatGLM) + profiles: + - chatglm environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["chatglm", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"] ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 10 ' - WEB_PORT: ' 12303 ' - ADD_WAIFU: ' True ' - # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' - - # 显卡的使用,nvidia0指第0个GPU + LLM_MODEL: 'gpt-3.5-turbo' + AVAIL_LLM_MODELS: '["chatglm", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"]' + LOCAL_MODEL_DEVICE: 'cuda' + + <<: *env + runtime: nvidia - devices: - - /dev/nvidia0:/dev/nvidia0 - + devices: *gpu + # 与宿主的网络融合 network_mode: "host" command: > bash -c "python3 -u main.py" -### =================================================== -### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型 -### =================================================== -version: '3' -services: + ### =================================================== + ### 【方案三】 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型 + ### =================================================== + gpt_academic_with_rwkv: image: ghcr.io/binary-husky/gpt_academic_jittorllms:master + profiles: + - rwkv environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 10 ' - WEB_PORT: ' 12305 ' - ADD_WAIFU: ' True ' - # AUTHENTICATION: ' [("username", "passwd"), ("username2", "passwd2")] ' - - # 显卡的使用,nvidia0指第0个GPU + LLM_MODEL: 'gpt-3.5-turbo' + AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]' + LOCAL_MODEL_DEVICE: 'cuda' + + <<: *env + runtime: nvidia - devices: - - /dev/nvidia0:/dev/nvidia0 - + devices: *gpu + # 与宿主的网络融合 network_mode: "host" @@ -89,24 +100,20 @@ services: command: > python3 -u main.py + ## =================================================== + ## 【方案四】 ChatGPT + Latex + ## =================================================== -## =================================================== -## 【方案四】 ChatGPT + Latex -## =================================================== -version: '3' -services: gpt_academic_with_latex: - image: ghcr.io/binary-husky/gpt_academic_with_latex:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex) + image: ghcr.io/binary-husky/gpt_academic_with_latex:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex) + profiles: + - latex environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ' - USE_PROXY: ' True ' - proxies: ' { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4"] ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 10 ' - WEB_PORT: ' 12303 ' + LLM_MODEL: 'gpt-3.5-turbo' + AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "gpt-4"]' + LOCAL_MODEL_DEVICE: 'cuda' + + <<: *env # 与宿主的网络融合 network_mode: "host" @@ -115,31 +122,20 @@ services: command: > bash -c "python3 -u main.py" + ## =================================================== + ## 【方案五】 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md) + ## =================================================== -## =================================================== -## 【方案五】 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md) -## =================================================== -version: '3' -services: gpt_academic_with_audio: image: ghcr.io/binary-husky/gpt_academic_audio_assistant:master + profiles: + - audio environment: - # 请查阅 `config.py` 以查看所有的配置信息 - API_KEY: ' fk195831-IdP0Pb3W6DCMUIbQwVX6MsSiyxwqybyS ' - USE_PROXY: ' False ' - proxies: ' None ' - LLM_MODEL: ' gpt-3.5-turbo ' - AVAIL_LLM_MODELS: ' ["gpt-3.5-turbo", "gpt-4"] ' - ENABLE_AUDIO: ' True ' - LOCAL_MODEL_DEVICE: ' cuda ' - DEFAULT_WORKER_NUM: ' 20 ' - WEB_PORT: ' 12343 ' - ADD_WAIFU: ' True ' - THEME: ' Chuanhu-Small-and-Beautiful ' - ALIYUN_APPKEY: ' RoP1ZrM84DnAFkZK ' - ALIYUN_TOKEN: ' f37f30e0f9934c34a992f6f64f7eba4f ' - # (无需填写) ALIYUN_ACCESSKEY: ' LTAI5q6BrFUzoRXVGUWnekh1 ' - # (无需填写) ALIYUN_SECRET: ' eHmI20AVWIaQZ0CiTD2bGQVsaP9i68 ' + LLM_MODEL: 'gpt-3.5-turbo' + AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "gpt-4"]' + LOCAL_MODEL_DEVICE: 'cuda' + + <<: *env # 与宿主的网络融合 network_mode: "host" @@ -147,4 +143,3 @@ services: # 不使用代理网络拉取最新代码 command: > bash -c "python3 -u main.py" -