29
29
import yaml
30
30
from lightning_utilities import compare_version
31
31
from lightning_utilities .test .warning import no_warning_call
32
- from packaging .version import Version
33
32
from tensorboard .backend .event_processing import event_accumulator
34
33
from tensorboard .plugins .hparams .plugin_data_pb2 import HParamsPluginData
35
34
from torch .optim import SGD
@@ -65,14 +64,6 @@ def lazy_instance(*args, **kwargs):
65
64
return None
66
65
67
66
68
- _xfail_python_ge_3_11_9 = pytest .mark .xfail (
69
- # https://github.com/omni-us/jsonargparse/issues/484
70
- Version (f"{ sys .version_info .major } .{ sys .version_info .minor } .{ sys .version_info .micro } " ) >= Version ("3.11.9" ),
71
- strict = False ,
72
- reason = "jsonargparse + Python 3.11.9 compatibility issue" ,
73
- )
74
-
75
-
76
67
@contextmanager
77
68
def mock_subclasses (baseclass , * subclasses ):
78
69
"""Mocks baseclass so that it only has the given child subclasses."""
@@ -356,7 +347,6 @@ def test_save_to_log_dir_false_error():
356
347
)
357
348
358
349
359
- @_xfail_python_ge_3_11_9
360
350
def test_lightning_cli_logger_save_config (cleandir ):
361
351
class LoggerSaveConfigCallback (SaveConfigCallback ):
362
352
def __init__ (self , * args , ** kwargs ) -> None :
@@ -753,7 +743,6 @@ def add_arguments_to_parser(self, parser):
753
743
assert cli .trainer .lr_scheduler_configs [0 ].scheduler .step_size == 50
754
744
755
745
756
- @_xfail_python_ge_3_11_9
757
746
@RunIf (min_torch = "2.2" )
758
747
@pytest .mark .parametrize ("use_generic_base_class" , [False , True ])
759
748
def test_lightning_cli_optimizers_and_lr_scheduler_with_link_to (use_generic_base_class ):
@@ -801,7 +790,6 @@ def __init__(self, optim1: dict, optim2: dict, scheduler: dict):
801
790
assert isinstance (cli .model .scheduler , torch .optim .lr_scheduler .ExponentialLR )
802
791
803
792
804
- @_xfail_python_ge_3_11_9
805
793
@RunIf (min_torch = "2.2" )
806
794
def test_lightning_cli_optimizers_and_lr_scheduler_with_callable_type ():
807
795
class TestModel (BoringModel ):
@@ -1118,7 +1106,6 @@ def __init__(self, foo, bar=5):
1118
1106
self .bar = bar
1119
1107
1120
1108
1121
- @_xfail_python_ge_3_11_9
1122
1109
def test_lightning_cli_model_short_arguments ():
1123
1110
with (
1124
1111
mock .patch ("sys.argv" , ["any.py" , "fit" , "--model=BoringModel" ]),
@@ -1146,7 +1133,6 @@ def __init__(self, foo, bar=5):
1146
1133
self .bar = bar
1147
1134
1148
1135
1149
- @_xfail_python_ge_3_11_9
1150
1136
def test_lightning_cli_datamodule_short_arguments ():
1151
1137
# with set model
1152
1138
with (
@@ -1200,7 +1186,6 @@ def test_lightning_cli_datamodule_short_arguments():
1200
1186
assert cli .parser .groups ["data" ].group_class is BoringDataModule
1201
1187
1202
1188
1203
- @_xfail_python_ge_3_11_9
1204
1189
@pytest .mark .parametrize ("use_class_path_callbacks" , [False , True ])
1205
1190
def test_callbacks_append (use_class_path_callbacks ):
1206
1191
"""This test validates registries are used when simplified command line are being used."""
@@ -1244,7 +1229,6 @@ def test_callbacks_append(use_class_path_callbacks):
1244
1229
assert all (t in callback_types for t in expected )
1245
1230
1246
1231
1247
- @_xfail_python_ge_3_11_9
1248
1232
def test_optimizers_and_lr_schedulers_reload (cleandir ):
1249
1233
base = ["any.py" , "--trainer.max_epochs=1" ]
1250
1234
input = base + [
@@ -1276,7 +1260,6 @@ def test_optimizers_and_lr_schedulers_reload(cleandir):
1276
1260
LightningCLI (BoringModel , run = False )
1277
1261
1278
1262
1279
- @_xfail_python_ge_3_11_9
1280
1263
def test_optimizers_and_lr_schedulers_add_arguments_to_parser_implemented_reload (cleandir ):
1281
1264
class TestLightningCLI (LightningCLI ):
1282
1265
def __init__ (self , * args ):
@@ -1540,7 +1523,6 @@ def test_cli_help_message():
1540
1523
assert "Implements Adam" in shorthand_help .getvalue ()
1541
1524
1542
1525
1543
- @_xfail_python_ge_3_11_9
1544
1526
def test_cli_reducelronplateau ():
1545
1527
with mock .patch (
1546
1528
"sys.argv" , ["any.py" , "--optimizer=Adam" , "--lr_scheduler=ReduceLROnPlateau" , "--lr_scheduler.monitor=foo" ]
@@ -1551,7 +1533,6 @@ def test_cli_reducelronplateau():
1551
1533
assert config ["lr_scheduler" ]["scheduler" ].monitor == "foo"
1552
1534
1553
1535
1554
- @_xfail_python_ge_3_11_9
1555
1536
def test_cli_configureoptimizers_can_be_overridden ():
1556
1537
class MyCLI (LightningCLI ):
1557
1538
def __init__ (self ):
@@ -1596,7 +1577,6 @@ def __init__(self, activation: torch.nn.Module = lazy_instance(torch.nn.LeakyReL
1596
1577
assert cli .model .activation is not model .activation
1597
1578
1598
1579
1599
- @_xfail_python_ge_3_11_9
1600
1580
def test_ddpstrategy_instantiation_and_find_unused_parameters (mps_count_0 ):
1601
1581
strategy_default = lazy_instance (DDPStrategy , find_unused_parameters = True )
1602
1582
with mock .patch ("sys.argv" , ["any.py" , "--trainer.strategy.process_group_backend=group" ]):
@@ -1612,7 +1592,6 @@ def test_ddpstrategy_instantiation_and_find_unused_parameters(mps_count_0):
1612
1592
assert strategy_default is not cli .config_init .trainer .strategy
1613
1593
1614
1594
1615
- @_xfail_python_ge_3_11_9
1616
1595
def test_cli_logger_shorthand ():
1617
1596
with mock .patch ("sys.argv" , ["any.py" ]):
1618
1597
cli = LightningCLI (TestModel , run = False , trainer_defaults = {"logger" : False })
@@ -1643,7 +1622,6 @@ def _test_logger_init_args(logger_name, init, unresolved=None):
1643
1622
assert data ["dict_kwargs" ] == unresolved
1644
1623
1645
1624
1646
- @_xfail_python_ge_3_11_9
1647
1625
def test_comet_logger_init_args ():
1648
1626
_test_logger_init_args (
1649
1627
"CometLogger" ,
@@ -1664,7 +1642,6 @@ def test_comet_logger_init_args():
1664
1642
strict = False ,
1665
1643
reason = "TypeError on Windows when parsing" ,
1666
1644
)
1667
- @_xfail_python_ge_3_11_9
1668
1645
def test_neptune_logger_init_args ():
1669
1646
_test_logger_init_args (
1670
1647
"NeptuneLogger" ,
@@ -1673,7 +1650,6 @@ def test_neptune_logger_init_args():
1673
1650
)
1674
1651
1675
1652
1676
- @_xfail_python_ge_3_11_9
1677
1653
def test_tensorboard_logger_init_args ():
1678
1654
_test_logger_init_args (
1679
1655
"TensorBoardLogger" ,
@@ -1685,7 +1661,6 @@ def test_tensorboard_logger_init_args():
1685
1661
)
1686
1662
1687
1663
1688
- @_xfail_python_ge_3_11_9
1689
1664
def test_wandb_logger_init_args ():
1690
1665
_test_logger_init_args (
1691
1666
"WandbLogger" ,
@@ -1770,7 +1745,6 @@ def __init__(self, a_func: Callable = torch.nn.Softmax):
1770
1745
assert "a_func: torch.nn.Softmax" in out .getvalue ()
1771
1746
1772
1747
1773
- @_xfail_python_ge_3_11_9
1774
1748
def test_pytorch_profiler_init_args ():
1775
1749
from lightning .pytorch .profilers import Profiler , PyTorchProfiler
1776
1750
0 commit comments