Skip to content

Commit 5e525a8

Browse files
committed
[CI] Use official Pytorch 2.1, add CUDA 11.8 for Pytorch 2.1
1 parent 21c3b0d commit 5e525a8

4 files changed

Lines changed: 12 additions & 13 deletions

File tree

.github/workflows/publish.yml

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ jobs:
4444
# manylinux docker image, but I haven't figured out how to install CUDA on manylinux.
4545
os: [ubuntu-20.04]
4646
python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
47-
torch-version: ['1.12.1', '1.13.1', '2.0.1', '2.1.0.dev20230731']
47+
torch-version: ['1.12.1', '1.13.1', '2.0.1', '2.1.0']
4848
cuda-version: ['11.6.2', '11.7.1', '11.8.0', '12.1.0', '12.2.0']
4949
# We need separate wheels that either uses C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI) or not.
5050
# Pytorch wheels currently don't use it, but nvcr images have Pytorch compiled with C++11 ABI.
@@ -58,7 +58,7 @@ jobs:
5858
# Pytorch >= 2.0 only supports Python >= 3.8
5959
- torch-version: '2.0.1'
6060
python-version: '3.7'
61-
- torch-version: '2.1.0.dev20230731'
61+
- torch-version: '2.1.0'
6262
python-version: '3.7'
6363
# Pytorch <= 2.0 only supports CUDA <= 11.8
6464
- torch-version: '1.12.1'
@@ -73,17 +73,15 @@ jobs:
7373
cuda-version: '12.1.0'
7474
- torch-version: '2.0.1'
7575
cuda-version: '12.2.0'
76-
# Pytorch >= 2.1 only supports CUDA >= 12.1
77-
- torch-version: '2.1.0.dev20230731'
76+
# Pytorch >= 2.1 only supports CUDA >= 11.8
77+
- torch-version: '2.1.0'
7878
cuda-version: '11.6.2'
79-
- torch-version: '2.1.0.dev20230731'
79+
- torch-version: '2.1.0'
8080
cuda-version: '11.7.1'
81-
- torch-version: '2.1.0.dev20230731'
82-
cuda-version: '11.8.0'
8381
# Pytorch >= 2.1 with nvcc 12.1.0 segfaults during compilation, so
8482
# we only use CUDA 12.2. setup.py as a special case that will
8583
# download the wheel for CUDA 12.2 instead.
86-
- torch-version: '2.1.0.dev20230731'
84+
- torch-version: '2.1.0'
8785
cuda-version: '12.1.0'
8886

8987
steps:
@@ -132,7 +130,7 @@ jobs:
132130
# We want to figure out the CUDA version to download pytorch
133131
# e.g. we can have system CUDA version being 11.7 but if torch==1.12 then we need to download the wheel from cu116
134132
# This code is ugly, maybe there's a better way to do this.
135-
export TORCH_CUDA_VERSION=$(python -c "import os; minv = {'1.12': 113, '1.13': 116, '2.0': 117, '2.1': 121}[os.environ['MATRIX_TORCH_VERSION']]; maxv = {'1.12': 116, '1.13': 117, '2.0': 118, '2.1': 121}[os.environ['MATRIX_TORCH_VERSION']]; print(max(min(int(os.environ['MATRIX_CUDA_VERSION']), maxv), minv))")
133+
export TORCH_CUDA_VERSION=$(python -c "import os; minv = {'1.12': 113, '1.13': 116, '2.0': 117, '2.1': 118}[os.environ['MATRIX_TORCH_VERSION']]; maxv = {'1.12': 116, '1.13': 117, '2.0': 118, '2.1': 121}[os.environ['MATRIX_TORCH_VERSION']]; print(max(min(int(os.environ['MATRIX_CUDA_VERSION']), maxv), minv))")
136134
if [[ ${{ matrix.torch-version }} == *"dev"* ]]; then
137135
pip install --no-cache-dir --pre torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/nightly/cu${TORCH_CUDA_VERSION}
138136
else

flash_attn/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "2.3.1"
1+
__version__ = "2.3.1.post1"
22

33
from flash_attn.flash_attn_interface import (
44
flash_attn_func,

setup.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,8 @@ def get_wheel_url():
233233
# _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME)
234234
torch_cuda_version = parse(torch.version.cuda)
235235
torch_version_raw = parse(torch.__version__)
236-
if torch_version_raw.major == 2 and torch_version_raw.minor == 1:
236+
# Workaround for nvcc 12.1 segfaults when compiling with Pytorch 2.1
237+
if torch_version_raw.major == 2 and torch_version_raw.minor == 1 and torch_cuda_version.major == 12:
237238
torch_cuda_version = parse("12.2")
238239
python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
239240
platform_name = get_platform()

training/Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,11 +85,11 @@ RUN pip install transformers==4.25.1 datasets==2.8.0 pytorch-lightning==1.8.6 tr
8585
RUN pip install git+https://github.com/mlcommons/logging.git@2.1.0
8686

8787
# Install FlashAttention
88-
RUN pip install flash-attn==2.3.1
88+
RUN pip install flash-attn==2.3.1.post1
8989

9090
# Install CUDA extensions for fused dense, layer norm
9191
RUN git clone https://github.com/HazyResearch/flash-attention \
92-
&& cd flash-attention && git checkout v2.3.1 \
92+
&& cd flash-attention && git checkout v2.3.1.post1 \
9393
&& cd csrc/layer_norm && pip install . && cd ../../ \
9494
&& cd csrc/fused_dense_lib && pip install . && cd ../../ \
9595
&& cd .. && rm -rf flash-attention

0 commit comments

Comments
 (0)