diff --git a/examples/llm_compression/onnx/tiny_llama/requirements.txt b/examples/llm_compression/onnx/tiny_llama/requirements.txt index 7a3e7067e31..6b7090a9267 100644 --- a/examples/llm_compression/onnx/tiny_llama/requirements.txt +++ b/examples/llm_compression/onnx/tiny_llama/requirements.txt @@ -1,8 +1,8 @@ transformers==4.53.0 openvino==2025.4.1 -optimum-intel[openvino]==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel[openvino]==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 onnx==1.19.1 onnxruntime==1.21.1 torch==2.9.0 diff --git a/examples/llm_compression/onnx/tiny_llama_scale_estimation/requirements.txt b/examples/llm_compression/onnx/tiny_llama_scale_estimation/requirements.txt index 47a3b49e805..f32e289b9f5 100644 --- a/examples/llm_compression/onnx/tiny_llama_scale_estimation/requirements.txt +++ b/examples/llm_compression/onnx/tiny_llama_scale_estimation/requirements.txt @@ -1,9 +1,9 @@ torch==2.9.0 transformers==4.53.0 openvino==2025.4.1 -optimum-intel[openvino]==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel[openvino]==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 onnx==1.17.0 onnxruntime==1.21.1 datasets==4.4.1 diff --git a/examples/llm_compression/openvino/smollm2_360m_codebook/requirements.txt b/examples/llm_compression/openvino/smollm2_360m_codebook/requirements.txt index ff6824dd4de..f6ff871034a 100644 --- a/examples/llm_compression/openvino/smollm2_360m_codebook/requirements.txt +++ b/examples/llm_compression/openvino/smollm2_360m_codebook/requirements.txt @@ -1,7 +1,7 @@ openvino==2025.4.1 -optimum-intel[openvino]==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel[openvino]==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 transformers==4.53.0 onnx==1.17.0 torch==2.9.0 diff --git a/examples/llm_compression/openvino/smollm2_360m_fp8/requirements.txt b/examples/llm_compression/openvino/smollm2_360m_fp8/requirements.txt index f4b7cfbc8b6..3e2c364101e 100644 --- a/examples/llm_compression/openvino/smollm2_360m_fp8/requirements.txt +++ b/examples/llm_compression/openvino/smollm2_360m_fp8/requirements.txt @@ -1,8 +1,8 @@ datasets==4.4.1 openvino==2025.4.1 -optimum-intel[openvino]==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel[openvino]==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 transformers==4.53.0 onnx==1.17.0 torch==2.9.0 diff --git a/examples/llm_compression/openvino/tiny_llama/requirements.txt b/examples/llm_compression/openvino/tiny_llama/requirements.txt index 8d413e2a2e9..fc7c68661ba 100644 --- a/examples/llm_compression/openvino/tiny_llama/requirements.txt +++ b/examples/llm_compression/openvino/tiny_llama/requirements.txt @@ -1,9 +1,9 @@ datasets==4.4.1 onnx==1.17.0 openvino==2025.4.1 -optimum-intel[openvino]==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel[openvino]==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 torch==2.9.0 transformers==4.53.0 pillow==12.0.0 diff --git a/examples/llm_compression/openvino/tiny_llama_find_hyperparams/requirements.txt b/examples/llm_compression/openvino/tiny_llama_find_hyperparams/requirements.txt index 55417f8f798..2bd63973082 100644 --- a/examples/llm_compression/openvino/tiny_llama_find_hyperparams/requirements.txt +++ b/examples/llm_compression/openvino/tiny_llama_find_hyperparams/requirements.txt @@ -1,9 +1,9 @@ whowhatbench @ git+https://github.com/openvinotoolkit/openvino.genai@2025.4.1.0#subdirectory=tools/who_what_benchmark numpy==1.26.4 openvino==2025.4.1 -optimum-intel==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 transformers==4.53.0 onnx==1.17.0 torch==2.9.0 diff --git a/examples/llm_compression/openvino/tiny_llama_synthetic_data/requirements.txt b/examples/llm_compression/openvino/tiny_llama_synthetic_data/requirements.txt index 93dc8b97f0c..03b515ae1bf 100644 --- a/examples/llm_compression/openvino/tiny_llama_synthetic_data/requirements.txt +++ b/examples/llm_compression/openvino/tiny_llama_synthetic_data/requirements.txt @@ -2,9 +2,9 @@ torch==2.9.0 datasets==4.4.1 numpy>=1.23.5,<2 openvino==2025.4.1 -optimum-intel==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 transformers==4.53.0 onnx==1.17.0 pillow==12.0.0 diff --git a/examples/llm_compression/torch/distillation_qat_with_lora/requirements.txt b/examples/llm_compression/torch/distillation_qat_with_lora/requirements.txt index f24baf7ef09..ff91ffdc75c 100644 --- a/examples/llm_compression/torch/distillation_qat_with_lora/requirements.txt +++ b/examples/llm_compression/torch/distillation_qat_with_lora/requirements.txt @@ -2,9 +2,9 @@ tensorboard==2.13.0 torch==2.9.0 numpy>=1.23.5,<2 openvino==2025.4.1 -optimum-intel==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 transformers==4.53.0 lm_eval==0.4.8 pillow==12.0.0 diff --git a/examples/llm_compression/torch/downstream_qat_with_nls/requirements.txt b/examples/llm_compression/torch/downstream_qat_with_nls/requirements.txt index f24baf7ef09..ff91ffdc75c 100644 --- a/examples/llm_compression/torch/downstream_qat_with_nls/requirements.txt +++ b/examples/llm_compression/torch/downstream_qat_with_nls/requirements.txt @@ -2,9 +2,9 @@ tensorboard==2.13.0 torch==2.9.0 numpy>=1.23.5,<2 openvino==2025.4.1 -optimum-intel==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 transformers==4.53.0 lm_eval==0.4.8 pillow==12.0.0 diff --git a/examples/llm_compression/torch_fx/tiny_llama/requirements.txt b/examples/llm_compression/torch_fx/tiny_llama/requirements.txt index 1bd9b9e58a3..355d1f3738c 100644 --- a/examples/llm_compression/torch_fx/tiny_llama/requirements.txt +++ b/examples/llm_compression/torch_fx/tiny_llama/requirements.txt @@ -1,6 +1,6 @@ transformers==4.53.0 datasets==4.4.1 openvino==2025.4.1 -optimum==2.0.0 +optimum==2.1.0 torch==2.9.0 torchvision==0.24.0 diff --git a/tests/openvino/requirements.txt b/tests/openvino/requirements.txt index 2e9296f9229..829d5e3cc06 100644 --- a/tests/openvino/requirements.txt +++ b/tests/openvino/requirements.txt @@ -16,6 +16,6 @@ timm==0.9.2 efficientnet_pytorch==0.7.1 datasets transformers==4.53.0 -optimum-intel==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 diff --git a/tests/post_training/requirements.txt b/tests/post_training/requirements.txt index 44242158c47..07822dec02c 100644 --- a/tests/post_training/requirements.txt +++ b/tests/post_training/requirements.txt @@ -14,9 +14,9 @@ pytest-split librosa==0.10.0 memory-profiler==0.61.0 -optimum-intel==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 scikit-learn>=1.2.2,<=1.5.0 soundfile==0.12.1 tensorboard==2.13.0 diff --git a/tests/torch/requirements.txt b/tests/torch/requirements.txt index bb13771f60a..3217a090021 100644 --- a/tests/torch/requirements.txt +++ b/tests/torch/requirements.txt @@ -16,8 +16,8 @@ efficientnet_pytorch==0.7.1 transformers==4.53.0 sentence-transformers==4.1.0 -optimum-intel==1.26.0 -optimum-onnx==0.0.3 -optimum==2.0.0 +optimum-intel==1.27.0 +optimum-onnx==0.1.0 +optimum==2.1.0 accelerate==1.9.0 fastdownload==0.0.7