Skip to content

Commit 9b3fa20

Browse files
committed
Merge branch 'main' into dev-add-monitor-251023
2 parents fc1eba3 + 0817e6a commit 9b3fa20

239 files changed

Lines changed: 8816 additions & 3988 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.dev_scripts/test_pydantic.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
#! /usr/bin/env python
2+
import os
3+
import sys
4+
5+
6+
XTUNER_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
7+
sys.path.insert(0, XTUNER_ROOT)
8+
9+
10+
# Write by GPT-5, hope it is correct.
11+
# XTuner.v1 requires all pydantic.BaseModel forbit extra fields in model_config. This scritps is used to
12+
# scan all pydantic.BaseModel subclasses in xtuner.v1 and check their model_config.extra field.
13+
14+
15+
from pathlib import Path
16+
path_root = Path(__file__).parent.parent / "xtuner" / "v1"
17+
18+
# BaseDataloader is an abstract base class. We only need to check its subclasses for model_config.
19+
skip = ["BaseDataloaderConfig"]
20+
21+
basemodel_obj = []
22+
23+
for module in path_root.rglob("*.py"):
24+
if module.name == "__init__.py":
25+
continue
26+
relative_module = module.relative_to(path_root)
27+
module_parts = relative_module.with_suffix('').parts
28+
module_name = "xtuner.v1." + ".".join(module_parts)
29+
try:
30+
mod = __import__(module_name, fromlist=[''])
31+
except ImportError as e:
32+
print(f"Failed to import {module_name}: {e}")
33+
continue
34+
for attr_name in dir(mod):
35+
attr = getattr(mod, attr_name)
36+
from pydantic import BaseModel
37+
if isinstance(attr, type) and issubclass(attr, BaseModel):
38+
if "xtuner" in attr.__module__:
39+
if attr_name in skip:
40+
print(f"{attr} skipped")
41+
continue
42+
if not hasattr(attr, "model_config"):
43+
raise AssertionError(f"{attr} missing model_config")
44+
if not "extra" in attr.model_config:
45+
raise AssertionError(f"{attr} model_config missing extra")
46+
if attr.model_config["extra"] != "forbid":
47+
raise AssertionError(f"{attr} model_config extra is not forbid")
48+
basemodel_obj.append((module_name, attr_name))

.github/workflows/unit_test.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,4 +30,4 @@ jobs:
3030
- name: unit-test
3131
run: |
3232
export PYTHONPYCACHEPREFIX=/tmp
33-
python ci/scripts/xtuner_unittest.py "$IMAGE" "$CI_IMPORT" "export PYTHONPATH=$PWD:$LM_DEPLOY:$PYTHONPATH; pip install codetiming -i http://mirrors.h.pjlab.org.cn/pypi/simple/ --trusted-host mirrors.h.pjlab.org.cn --trusted-host pypi.i.h.pjlab.org.cn; export PYTHONPATH=/mnt/shared-storage-user/caoweihan/duanyanhui/lmdeploy:$PYTHONPATH; pytest tests/ --ignore=./tests/module/dispatcher/test_deepep.py"
33+
python ci/scripts/xtuner_unittest.py "$IMAGE" "source ${{env.WORKSPACE_PREFIX}}/BASE_ENV.sh;source ci/scripts/CI_ENV.sh" "pytest tests --ignore=./tests/module/dispatcher/test_deepep.py"

.pre-commit-config.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,3 +66,8 @@ repos:
6666
require_serial: true
6767
verbose: true
6868
types: [python]
69+
- id: pydantic-extra-check
70+
name: pydantic-extra-check
71+
language: system
72+
entry: .dev_scripts/test_pydantic.py
73+
verbose: false

Dockerfile

Lines changed: 119 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -1,39 +1,51 @@
11
# syntax=docker/dockerfile:1.10.0
22
# builder
3-
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:25.01-py3
3+
ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:25.03-py3
44

55
## build args
66
FROM ${BASE_IMAGE} AS setup_env
77

8-
ARG CODESPACE=/root/codespace
9-
10-
ARG FLASH_ATTN_DIR=/tmp/flash-attn
11-
ARG FLASH_ATTN3_DIR=/tmp/flash-attn3
12-
ARG ADAPTIVE_GEMM_DIR=/tmp/adaptive_gemm
13-
ARG GROUPED_GEMM_DIR=/tmp/grouped_gemm
14-
158
ARG TORCH_VERSION
16-
179
ARG PPA_SOURCE
1810

19-
RUN if [ -d /etc/pip ] && [ -f /etc/pip/constraint.txt ]; then echo > /etc/pip/constraint.txt; fi
20-
RUN if [ -n "${TORCH_VERSION}" ]; then \
21-
pip install torchvision torch==${TORCH_VERSION} --index-url https://download.pytorch.org/whl/cu126 --no-cache-dir; \
22-
fi
23-
24-
# set reasonable default for CUDA architectures when building ngc image
25-
ENV TORCH_CUDA_ARCH_LIST="7.5 8.0 8.6 9.0 10.0"
26-
27-
RUN sed -i "s@http://.*.ubuntu.com@${PPA_SOURCE}@g" /etc/apt/sources.list.d/ubuntu.sources && \
11+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
12+
sed -i "s@http://.*.ubuntu.com@${PPA_SOURCE}@g" /etc/apt/sources.list.d/ubuntu.sources && \
2813
apt update && \
2914
apt install --no-install-recommends ca-certificates -y && \
3015
apt install --no-install-recommends bc wget -y && \
3116
apt install --no-install-recommends build-essential sudo -y && \
3217
apt install --no-install-recommends git curl pkg-config tree unzip tmux \
33-
openssh-server openssh-client nmap dnsutils iproute2 lsof net-tools -y && \
18+
openssh-server openssh-client dnsutils iproute2 lsof net-tools zsh rclone \
19+
iputils-ping telnet netcat-openbsd -y && \
3420
apt clean && rm -rf /var/lib/apt/lists/*
3521

36-
RUN pip uninstall flash_attn -y
22+
RUN if [ -d /etc/pip ] && [ -f /etc/pip/constraint.txt ]; then echo > /etc/pip/constraint.txt; fi
23+
RUN pip install pystack py-spy --no-cache-dir
24+
RUN git config --system --add safe.directory "*"
25+
26+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
27+
if [ -n "${TORCH_VERSION}" ]; then \
28+
pip install torchvision torch==${TORCH_VERSION} \
29+
--index-url https://download.pytorch.org/whl/cu128 \
30+
--extra-index-url https://download.pytorch.org/whl/cu126 \
31+
--no-cache-dir; \
32+
fi
33+
34+
# set reasonable default for CUDA architectures when building ngc image
35+
ENV TORCH_CUDA_ARCH_LIST="7.5 8.0 8.6 9.0 10.0"
36+
37+
RUN pip uninstall flash_attn opencv -y && rm -rf /usr/local/lib/python3.12/dist-packages/cv2
38+
39+
ARG FLASH_ATTN_DIR=/tmp/flash-attn
40+
ARG CODESPACE=/root/codespace
41+
ARG FLASH_ATTN3_DIR=/tmp/flash-attn3
42+
ARG ADAPTIVE_GEMM_DIR=/tmp/adaptive_gemm
43+
ARG GROUPED_GEMM_DIR=/tmp/grouped_gemm
44+
ARG DEEP_EP_DIR=/tmp/deep_ep
45+
ARG NVSHMEM_PREFIX=/usr/local/nvshmem
46+
47+
RUN mkdir -p $CODESPACE
48+
WORKDIR ${CODESPACE}
3749

3850
# compile flash-attn
3951
FROM setup_env AS flash_attn
@@ -43,16 +55,14 @@ ARG FLASH_ATTN_DIR
4355
ARG FLASH_ATTN3_DIR
4456
ARG FLASH_ATTN_URL
4557

46-
RUN mkdir -p $CODESPACE
47-
WORKDIR ${CODESPACE}
48-
49-
RUN git clone -c https.proxy=$HTTPS_PROXY $(echo ${FLASH_ATTN_URL} | cut -d '@' -f 1) && \
58+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
59+
git clone $(echo ${FLASH_ATTN_URL} | cut -d '@' -f 1) && \
5060
cd ${CODESPACE}/flash-attention && \
51-
git checkout $(echo ${FLASH_ATTN_URL} | cut -d '@' -f 2)
61+
git checkout $(echo ${FLASH_ATTN_URL} | cut -d '@' -f 2) && \
62+
git submodule update --init --recursive --force
5263

5364
WORKDIR ${CODESPACE}/flash-attention
5465

55-
RUN git submodule update --init --recursive --force
5666
RUN cd hopper && FLASH_ATTENTION_FORCE_BUILD=TRUE pip wheel -w ${FLASH_ATTN3_DIR} -v --no-deps .
5767
RUN FLASH_ATTENTION_FORCE_BUILD=TRUE pip wheel -w ${FLASH_ATTN_DIR} -v --no-deps .
5868

@@ -63,16 +73,14 @@ ARG CODESPACE
6373
ARG ADAPTIVE_GEMM_DIR
6474
ARG ADAPTIVE_GEMM_URL
6575

66-
RUN mkdir -p $CODESPACE
67-
WORKDIR ${CODESPACE}
68-
69-
RUN git clone -c https.proxy=$HTTPS_PROXY $(echo ${ADAPTIVE_GEMM_URL} | cut -d '@' -f 1) && \
76+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
77+
git clone $(echo ${ADAPTIVE_GEMM_URL} | cut -d '@' -f 1) && \
7078
cd ${CODESPACE}/AdaptiveGEMM && \
71-
git checkout $(echo ${ADAPTIVE_GEMM_URL} | cut -d '@' -f 2)
79+
git checkout $(echo ${ADAPTIVE_GEMM_URL} | cut -d '@' -f 2) && \
80+
git submodule update --init --recursive --force
7281

7382
WORKDIR ${CODESPACE}/AdaptiveGEMM
7483

75-
RUN git submodule update --init --recursive --force
7684
RUN pip wheel -w ${ADAPTIVE_GEMM_DIR} -v --no-deps .
7785

7886
# compile grouped_gemm(permute and unpermute)
@@ -82,18 +90,52 @@ ARG CODESPACE
8290
ARG GROUPED_GEMM_DIR
8391
ARG GROUPED_GEMM_URL
8492

85-
RUN mkdir -p $CODESPACE
86-
WORKDIR ${CODESPACE}
87-
88-
RUN git clone -c https.proxy=$HTTPS_PROXY $(echo ${GROUPED_GEMM_URL} | cut -d '@' -f 1) && \
93+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
94+
git clone $(echo ${GROUPED_GEMM_URL} | cut -d '@' -f 1) && \
8995
cd ${CODESPACE}/GroupedGEMM && \
90-
git checkout $(echo ${GROUPED_GEMM_URL} | cut -d '@' -f 2)
96+
git checkout $(echo ${GROUPED_GEMM_URL} | cut -d '@' -f 2) && \
97+
git submodule update --init --recursive --force
9198

9299
WORKDIR ${CODESPACE}/GroupedGEMM
93100

94-
RUN git submodule update --init --recursive --force
95101
RUN pip wheel -w ${GROUPED_GEMM_DIR} -v --no-deps .
96102

103+
# pypi install nvshmem and compile deepep
104+
FROM setup_env AS deep_ep
105+
106+
ARG CODESPACE
107+
ARG DEEP_EP_DIR
108+
ARG DEEP_EP_URL
109+
# build sm90 and sm100 for deep_ep for now
110+
ARG TORCH_CUDA_ARCH_LIST="9.0 10.0"
111+
112+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
113+
curl -LO https://github.com/NVIDIA/nvshmem/releases/download/v3.4.5-0/nvshmem_src_cuda-all-all-3.4.5.tar.gz && \
114+
tar -zxvf nvshmem_src_cuda-all-all-3.4.5.tar.gz && \
115+
cd ${CODESPACE}/nvshmem_src && \
116+
NVSHMEM_SHMEM_SUPPORT=0 \
117+
NVSHMEM_UCX_SUPPORT=0 \
118+
NVSHMEM_USE_NCCL=0 \
119+
NVSHMEM_MPI_SUPPORT=0 \
120+
NVSHMEM_IBGDA_SUPPORT=1 \
121+
NVSHMEM_USE_GDRCOPY=0 \
122+
NVSHMEM_PMIX_SUPPORT=0 \
123+
NVSHMEM_TIMEOUT_DEVICE_POLLING=0 \
124+
NVSHMEM_BUILD_TESTS=0 \
125+
NVSHMEM_BUILD_EXAMPLES=0 \
126+
NVSHMEM_BUILD_HYDRA_LAUNCHER=0 \
127+
NVSHMEM_BUILD_TXZ_PACKAGE=0 \
128+
NVSHMEM_BUILD_PYTHON_LIB=OFF \
129+
cmake -S . -B build/ -DCMAKE_INSTALL_PREFIX=${NVSHMEM_PREFIX} -DMLX5_lib=/lib/x86_64-linux-gnu/libmlx5.so.1 && \
130+
cmake --build build --target install --parallel 32 && \
131+
cd ${CODESPACE} && git clone $(echo ${DEEP_EP_URL} | cut -d '@' -f 1) && \
132+
cd ${CODESPACE}/DeepEP && \
133+
git checkout $(echo ${DEEP_EP_URL} | cut -d '@' -f 2) && \
134+
git submodule update --init --recursive --force
135+
136+
WORKDIR ${CODESPACE}/DeepEP
137+
138+
RUN NVSHMEM_DIR=${NVSHMEM_PREFIX} pip wheel -w ${DEEP_EP_DIR} -v --no-deps .
97139

98140
# integration xtuner
99141
FROM setup_env AS xtuner_dev
@@ -105,53 +147,65 @@ ARG FLASH_ATTN_DIR
105147
ARG FLASH_ATTN3_DIR
106148
ARG ADAPTIVE_GEMM_DIR
107149
ARG GROUPED_GEMM_DIR
150+
ARG DEEP_EP_DIR
108151

109152
COPY --from=flash_attn ${FLASH_ATTN3_DIR} ${FLASH_ATTN3_DIR}
110153
COPY --from=flash_attn ${FLASH_ATTN_DIR} ${FLASH_ATTN_DIR}
111154
COPY --from=adaptive_gemm ${ADAPTIVE_GEMM_DIR} ${ADAPTIVE_GEMM_DIR}
112155
COPY --from=grouped_gemm ${GROUPED_GEMM_DIR} ${GROUPED_GEMM_DIR}
156+
COPY --from=deep_ep ${DEEP_EP_DIR} ${DEEP_EP_DIR}
157+
COPY --from=deep_ep ${NVSHMEM_PREFIX} ${NVSHMEM_PREFIX}
113158

114159
RUN unzip ${FLASH_ATTN_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}
115160
RUN unzip ${FLASH_ATTN3_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}
116161
RUN unzip ${ADAPTIVE_GEMM_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}
117162
RUN unzip ${GROUPED_GEMM_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}
163+
RUN unzip ${DEEP_EP_DIR}/*.whl -d ${PYTHON_SITE_PACKAGE_PATH}
118164

119-
ARG XTUNER_URL
120-
ARG XTUNER_COMMIT
165+
# install sglang and its runtime requirements
166+
ARG SGLANG_VERSION
167+
168+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
169+
pip install sglang==${SGLANG_VERSION} sgl_kernel pybase64 orjson uvloop setproctitle msgspec \
170+
compressed_tensors python-multipart torch_memory_saver \
171+
grpcio-tools==1.75.1 hf_transfer interegular llguidance==0.7.11 \
172+
xgrammar==0.1.24 blobfile==3.0.0 flashinfer_python==0.4.0 --no-cache-dir --no-deps
173+
174+
# install lmdeploy and its missing runtime requirements
121175
ARG LMDEPLOY_VERSION
122176
ARG LMDEPLOY_URL
123177

124-
## install xtuner
125-
RUN mkdir -p $CODESPACE
126-
WORKDIR ${CODESPACE}
178+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
179+
pip install fastapi fire openai outlines \
180+
partial_json_parser ray[default] shortuuid uvicorn \
181+
'pydantic>2' openai_harmony --no-cache-dir && \
182+
if [ -n "${LMDEPLOY_VERSION}" ]; then \
183+
pip install lmdeploy==${LMDEPLOY_VERSION} --no-deps --no-cache-dir; \
184+
else \
185+
git clone $(echo ${LMDEPLOY_URL} | cut -d '@' -f 1) && \
186+
cd ${CODESPACE}/lmdeploy && \
187+
git checkout $(echo ${LMDEPLOY_URL} | cut -d '@' -f 2) && \
188+
pip install . -v --no-deps --no-cache-dir; \
189+
fi
127190

128-
#RUN git clone -c https.proxy=$HTTPS_PROXY $(echo ${XTUNER_URL} | cut -d '@' -f 1) && \
129-
#cd ${CODESPACE}/xtuner && \
130-
#git checkout $(echo ${XTUNER_URL} | cut -d '@' -f 2)
191+
## install xtuner
192+
ARG XTUNER_URL
193+
ARG XTUNER_COMMIT
194+
#RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
195+
# git clone $(echo ${XTUNER_URL} | cut -d '@' -f 1) && \
196+
# cd ${CODESPACE}/xtuner && \
197+
# git checkout $(echo ${XTUNER_URL} | cut -d '@' -f 2)
131198
COPY . ${CODESPACE}/xtuner
132199

133200
WORKDIR ${CODESPACE}/xtuner
134-
RUN export HTTPS_PROXY=$HTTPS_PROXY \
135-
&& export https_proxy=$HTTPS_PROXY \
136-
&& pip install liger-kernel parametrize --no-cache-dir \
137-
&& pip install . -v --no-cache-dir
201+
RUN --mount=type=secret,id=HTTPS_PROXY,env=https_proxy \
202+
pip install .[all] -v --no-cache-dir
138203

139-
RUN pip install pystack py-spy --no-cache-dir
140-
RUN git config --system --add safe.directory "*"
141-
142-
# install lmdeploy and its missing runtime requirements
143-
RUN pip install fastapi fire openai outlines \
144-
partial_json_parser ray[default] shortuuid uvicorn \
145-
'numpy<2.0.0' \
146-
python-sat[aiger,approxmc,cryptosat,pblib] distance Faker --no-cache-dir
147204
WORKDIR ${CODESPACE}
148-
RUN if [ -n "${LMDEPLOY_VERSION}" ]; then \
149-
pip install lmdeploy==${LMDEPLOY_VERSION} --no-deps --no-cache-dir; \
150-
else \
151-
git clone -c https.proxy=$HTTPS_PROXY $(echo ${LMDEPLOY_URL} | cut -d '@' -f 1) && \
152-
cd ${CODESPACE}/lmdeploy && \
153-
git checkout $(echo ${LMDEPLOY_URL} | cut -d '@' -f 2) && \
154-
pip install . -v --no-deps --no-cache-dir; \
205+
206+
# nccl update for torch 2.6.0
207+
RUN if [ "x${TORCH_VERSION}" = "x2.6.0" ]; then \
208+
pip install nvidia-nccl-cu12==2.25.1 --no-cache-dir; \
155209
fi
156210

157211
# setup sysctl

README.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,10 @@ XTuner V1 is a next-generation LLM training engine specifically designed for ult
5757

5858

5959
<div align=center>
60-
<img src="https://github.com/user-attachments/assets/98519a93-1ce8-49f0-a7ab-d7968c9d67a6" style="width:90%">
60+
<img src="https://github.com/user-attachments/assets/c4fb2bb4-56bd-4f1c-8188-7f5370314cf8" style="width:90%">
6161
</div>
6262

6363

64-
6564
## 🔥 Roadmap
6665

6766
XTuner V1 is committed to continuously improving training efficiency for pre-training, instruction fine-tuning, and reinforcement learning of ultra-large MoE models, with special focus on Ascend NPU optimization.

ci/scripts/CI_ENV.sh

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
#!/bin/bash
2+
export QWEN3_VL_MOE_PATH=${CI_SHARE_MODEL}/Qwen3-VL-30B-A3B-Instruct_MOE
3+
export QWEN3_VL_DENSE_PATH=${CI_SHARE_MODEL}/Qwen3-VL-4B-Instruct
4+
export INTERN_VL_1B_PATH=${CI_SHARE_MODEL}/InternVL3_5-1B-HF
5+
export VIDEO_ROOT=${CI_SHARE_DATA}/images
6+
export QWEN3_4B_PATH=${CI_SHARE_MODEL}/Qwen3-4B-Instruct-2507
7+
export ROLLOUT_DATA_PATH=${CI_SHARE_DATA}/gsm8k/train.jsonl
8+
export DEEPSEEK_V3_PATH=${CI_SHARE_MODEL}/DeepSeek-V3.1
9+
export GPT_OSS_MINI_PATH=${CI_SHARE_MODEL}/gpt-oss-20b-bf16
10+
export ROLLOUT_TEST_DATA_PATH=${CI_SHARE_DATA}/gsm8k/test.jsonl
11+
export VERL_ROLLOUT_DATA_PATH=${CI_SHARE_DATA}/verl-rollout-step0.jsonl
12+
export QWEN3_PATH=${CI_SHARE_MODEL}/Qwen3-8B
13+
export QWEN3_VL_PATH=${CI_SHARE_MODEL}/Qwen2.5-VL-3B-Instruct
14+
export QWEN3_MOE_PATH=${CI_SHARE_MODEL}/Qwen3-30B-A3B
15+
export INTERNS1_DENSE_PATH=${CI_SHARE_MODEL}/intern-s1-mini
16+
export ROLLOUT_MODEL_PATH=${CI_SHARE_MODEL}/Qwen3-8B
17+
export ALPACA_PATH=${CI_SHARE_DATA}/alpaca
18+
export INTERNS1_DATA_META=${CI_SHARE_DATA}/vlm_ci_data.json
19+
export ROLLOUT_DAPO_DATA_PATH=${CI_SHARE_DATA}/rl_test_judger_dapo_math_data.jsonl
20+
export GEO_ROLLOUT_DATA_PATH=${CI_SHARE_DATA}/rl_test_judge_geo_data.jsonl
21+
export TORCH_ALLOW_TF32_CUBLAS_OVERRIDE=0
22+
export XTUNER_DETERMINISTIC=true
23+
export XTUNER_USE_LMDEPLOY=1
24+
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
25+
export PYTHONPYCACHEPREFIX=/tmp
26+
export TRITON_CACHE_DIR=/tmp/.triton
27+
export PYTEST_ADDOPTS='-o cache_dir=/tmp/.pytest_cache'
28+
29+
proxy_off
30+
pip install -e .
31+
pip install openai-harmony
32+
pip install numpy==1.26.4
33+
34+
export PYTHONPATH=${LM_DEPLOY}:$PYTHONPATH

ci/scripts/test_ray_sft.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
from xtuner.v1.model.moe.moe import BalancingLossConfig, ZLossConfig
1313
from xtuner.v1.datasets import FTDPTokenizeFnConfig
1414
import ray
15-
from xtuner.v1.ray.train import TrainingWorker
16-
from xtuner.v1.ray.accelerator import AutoAcceleratorWorkers, AcceleratorResourcesConfig
15+
from xtuner.v1.rl.base.worker import TrainingWorker
16+
from xtuner.v1.ray.base import AutoAcceleratorWorkers, AcceleratorResourcesConfig
1717
from xtuner.v1.train import TrainerConfig
1818
from xtuner.v1.train.trainer import Trainer
1919
from xtuner.v1.loss.ce_loss import CELossConfig

docs/en/.readthedocs.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ version: 2
33
build:
44
os: ubuntu-22.04
55
tools:
6-
python: "3.8"
6+
python: "3.12"
77

88
formats:
99
- epub

0 commit comments

Comments
 (0)