Skip to content

Commit 942d8f4

Browse files
Copilotvfdev-5
andcommitted
Fix test failures: remove cuda parameter from test GradScaler instantiations and revert to correct PyTorch version requirements
Co-authored-by: vfdev-5 <[email protected]>
1 parent c430571 commit 942d8f4

File tree

4 files changed

+7
-7
lines changed

4 files changed

+7
-7
lines changed

examples/references/classification/imagenet/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
try:
99
from torch.amp import autocast, GradScaler
1010
except ImportError:
11-
raise RuntimeError("Please, use recent PyTorch version, e.g. >=2.3.1")
11+
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.12.0")
1212

1313
import dataflow as data
1414
import utils

examples/references/segmentation/pascal_voc2012/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
try:
99
from torch.amp import autocast, GradScaler
1010
except ImportError:
11-
raise RuntimeError("Please, use recent PyTorch version, e.g. >=2.3.1")
11+
raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.12.0")
1212

1313
import dataflow as data
1414
import utils

ignite/engine/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def supervised_training_step_amp(
187187
try:
188188
from torch.amp import autocast, GradScaler
189189
except ImportError:
190-
raise ImportError("Please install torch>=2.3.1 to use amp_mode='amp'.")
190+
raise ImportError("Please install torch>=1.12.0 to use amp_mode='amp'.")
191191

192192
if gradient_accumulation_steps <= 0:
193193
raise ValueError(
@@ -412,7 +412,7 @@ def _check_arg(
412412
try:
413413
from torch.amp import GradScaler
414414
except ImportError:
415-
raise ImportError("Please install torch>=2.3.1 to use scaler argument.")
415+
raise ImportError("Please install torch>=1.6.0 to use scaler argument.")
416416
scaler = GradScaler(enabled=True)
417417

418418
if on_tpu:

tests/ignite/engine/test_create_supervised.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ def _():
170170
@pytest.mark.skipif(Version(torch.__version__) < Version("1.12.0"), reason="Skip if < 1.12.0")
171171
def test_create_supervised_training_scalar_assignment():
172172
with mock.patch("ignite.engine._check_arg") as check_arg_mock:
173-
check_arg_mock.return_value = None, torch.amp.GradScaler('cuda', enabled=False)
173+
check_arg_mock.return_value = None, torch.amp.GradScaler(enabled=False)
174174
trainer, _ = _default_create_supervised_trainer(model_device="cpu", trainer_device="cpu", scaler=True)
175175
assert hasattr(trainer.state, "scaler")
176176
assert isinstance(trainer.state.scaler, torch.amp.GradScaler)
@@ -462,7 +462,7 @@ def test_create_supervised_trainer_amp_error(mock_torch_cuda_amp_module):
462462

463463
@pytest.mark.skipif(Version(torch.__version__) < Version("1.12.0"), reason="Skip if < 1.12.0")
464464
def test_create_supervised_trainer_scaler_not_amp():
465-
scaler = torch.amp.GradScaler('cuda', enabled=torch.cuda.is_available())
465+
scaler = torch.amp.GradScaler(enabled=torch.cuda.is_available())
466466

467467
with pytest.raises(ValueError, match=f"scaler argument is {scaler}, but amp_mode is None."):
468468
_test_create_supervised_trainer(amp_mode=None, scaler=scaler)
@@ -540,7 +540,7 @@ def test_create_supervised_trainer_on_cuda_amp_scaler():
540540
_test_create_mocked_supervised_trainer(
541541
model_device=model_device, trainer_device=trainer_device, amp_mode="amp", scaler=True
542542
)
543-
scaler = torch.amp.GradScaler('cuda', enabled=torch.cuda.is_available())
543+
scaler = torch.amp.GradScaler(enabled=torch.cuda.is_available())
544544
_test_create_supervised_trainer(
545545
gradient_accumulation_steps=1,
546546
model_device=model_device,

0 commit comments

Comments
 (0)