Skip to content

Commit 029fc31

Browse files
committed
pip list
Signed-off-by: sirutBuasai <sirutbuasai27@outlook.com>
1 parent 404ef71 commit 029fc31

File tree

1 file changed

+38
-35
lines changed

1 file changed

+38
-35
lines changed

.github/workflows/pr-vllm.yml

Lines changed: 38 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,7 @@ jobs:
311311
uv pip install --system pytest pytest-asyncio
312312
uv pip install --system -e tests/vllm_test_utils
313313
uv pip install --system hf_transfer
314-
uv pip install --system transformers==4.56.1
314+
uv pip install --system transformers<4.56
315315
mkdir src
316316
mv vllm src/vllm
317317
'
@@ -321,6 +321,7 @@ jobs:
321321
docker exec ${CONTAINER_ID} sh -c '
322322
set -eux
323323
nvidia-smi
324+
uv pip list | grep transformers
324325
325326
# Examples Test # 30min
326327
cd /workdir/examples
@@ -476,13 +477,13 @@ jobs:
476477
- name: Run vLLM tests
477478
run: |
478479
docker exec ${CONTAINER_ID} sh -c '
479-
set -eux
480-
nvidia-smi
480+
set -eux
481+
nvidia-smi
481482
482-
# Regression Test # 7min
483-
cd /workdir/tests
484-
uv pip install --system modelscope
485-
pytest -v -s test_regression.py
483+
# Regression Test # 7min
484+
cd /workdir/tests
485+
uv pip install --system modelscope
486+
pytest -v -s test_regression.py
486487
'
487488
488489
vllm-rayserve-cuda-test:
@@ -538,12 +539,12 @@ jobs:
538539
- name: Run vLLM tests
539540
run: |
540541
docker exec ${CONTAINER_ID} sh -c '
541-
set -eux
542-
nvidia-smi
542+
set -eux
543+
nvidia-smi
543544
544-
# Platform Tests (CUDA) # 4min
545-
cd /workdir/tests
546-
pytest -v -s cuda/test_cuda_context.py
545+
# Platform Tests (CUDA) # 4min
546+
cd /workdir/tests
547+
pytest -v -s cuda/test_cuda_context.py
547548
'
548549
549550
vllm-rayserve-example-test:
@@ -592,35 +593,36 @@ jobs:
592593
uv pip install --system pytest pytest-asyncio
593594
uv pip install --system -e tests/vllm_test_utils
594595
uv pip install --system hf_transfer
595-
uv pip install --system transformers==4.56.1
596+
uv pip install --system transformers<4.56
596597
mkdir src
597598
mv vllm src/vllm
598599
'
599600
600601
- name: Run vLLM tests
601602
run: |
602603
docker exec ${CONTAINER_ID} sh -c '
603-
set -eux
604-
nvidia-smi
605-
606-
# Examples Test # 30min
607-
cd /workdir/examples
608-
pip install tensorizer # for tensorizer test
609-
python3 offline_inference/basic/generate.py --model facebook/opt-125m
610-
# python3 offline_inference/basic/generate.py --model meta-llama/Llama-2-13b-chat-hf --cpu-offload-gb 10
611-
python3 offline_inference/basic/chat.py
612-
python3 offline_inference/prefix_caching.py
613-
python3 offline_inference/llm_engine_example.py
614-
python3 offline_inference/audio_language.py --seed 0
615-
python3 offline_inference/vision_language.py --seed 0
616-
python3 offline_inference/vision_language_pooling.py --seed 0
617-
python3 offline_inference/vision_language_multi_image.py --seed 0
618-
VLLM_USE_V1=0 python3 others/tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 others/tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors
619-
python3 offline_inference/encoder_decoder_multimodal.py --model-type whisper --seed 0
620-
python3 offline_inference/basic/classify.py
621-
python3 offline_inference/basic/embed.py
622-
python3 offline_inference/basic/score.py
623-
VLLM_USE_V1=0 python3 offline_inference/profiling.py --model facebook/opt-125m run_num_steps --num-steps 2
604+
set -eux
605+
nvidia-smi
606+
uv pip list | grep transformers
607+
608+
# Examples Test # 30min
609+
cd /workdir/examples
610+
pip install tensorizer # for tensorizer test
611+
python3 offline_inference/basic/generate.py --model facebook/opt-125m
612+
# python3 offline_inference/basic/generate.py --model meta-llama/Llama-2-13b-chat-hf --cpu-offload-gb 10
613+
python3 offline_inference/basic/chat.py
614+
python3 offline_inference/prefix_caching.py
615+
python3 offline_inference/llm_engine_example.py
616+
python3 offline_inference/audio_language.py --seed 0
617+
python3 offline_inference/vision_language.py --seed 0
618+
python3 offline_inference/vision_language_pooling.py --seed 0
619+
python3 offline_inference/vision_language_multi_image.py --seed 0
620+
VLLM_USE_V1=0 python3 others/tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 others/tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors
621+
python3 offline_inference/encoder_decoder_multimodal.py --model-type whisper --seed 0
622+
python3 offline_inference/basic/classify.py
623+
python3 offline_inference/basic/embed.py
624+
python3 offline_inference/basic/score.py
625+
VLLM_USE_V1=0 python3 offline_inference/profiling.py --model facebook/opt-125m run_num_steps --num-steps 2
624626
'
625627
626628
# ====================================================
@@ -873,7 +875,7 @@ jobs:
873875
uv pip install --system pytest pytest-asyncio
874876
uv pip install --system -e tests/vllm_test_utils
875877
uv pip install --system hf_transfer
876-
uv pip install --system transformers==4.56.1
878+
uv pip install --system transformers<4.56
877879
mkdir src
878880
mv vllm src/vllm
879881
'
@@ -883,6 +885,7 @@ jobs:
883885
docker exec ${CONTAINER_ID} sh -c '
884886
set -eux
885887
nvidia-smi
888+
uv pip list | grep transformers
886889
887890
# Examples Test # 30min
888891
cd /workdir/examples

0 commit comments

Comments
 (0)