@@ -55,6 +55,14 @@ def ssh():
5555 return SSH ("testssh" , host = "reproman-test" )
5656
5757
58+ @pytest .fixture (scope = "module" )
59+ def ssh_slurm ():
60+ skipif .no_ssh ()
61+ skipif .no_slurm ()
62+ from reproman .resource .ssh import SSH
63+ return SSH ("slurm-res" , host = "slurm" )
64+
65+
5866def test_orc_root_directory (shell ):
5967 orc = orcs .PlainOrchestrator (shell , submission_type = "local" )
6068 assert orc .root_directory == op .expanduser ("~/.reproman/run-root" )
@@ -166,6 +174,49 @@ def container_dataset(tmpdir_factory):
166174 return ds
167175
168176
177+ @pytest .fixture ()
178+ def check_orc_datalad (job_spec , dataset ):
179+ def fn (resource , orc_class , sub_type ):
180+ dataset .repo .tag ("start-pt" )
181+
182+ def run_and_check (spec ):
183+ with chpwd (dataset .path ):
184+ orc = orc_class (resource ,
185+ submission_type = sub_type , job_spec = spec )
186+ orc .prepare_remote ()
187+ orc .submit ()
188+ orc .follow ()
189+
190+ orc .fetch ()
191+ assert dataset .repo .file_has_content ("out" )
192+ assert open ("out" ).read () == "content\n more\n "
193+ return orc
194+
195+ orc = run_and_check (job_spec )
196+
197+ # Perform another run based on the dumped job spec from the first.
198+ assert dataset .repo .get_active_branch () == "master"
199+ metadir = op .relpath (orc .meta_directory , orc .working_directory )
200+ with open (op .join (dataset .path , metadir , "spec.yaml" )) as f :
201+ dumped_spec = yaml .safe_load (f )
202+ assert "_reproman_version" in dumped_spec
203+ assert "_spec_version" in dumped_spec
204+ if orc .name == "datalad-local-run" :
205+ # Our reproman-based copying of data doesn't isn't (yet) OK with
206+ # data files that already exist.
207+ dumped_spec ["inputs" ] = []
208+ # FIXME: Use exposed method once available.
209+ dataset .repo ._git_custom_command (
210+ [], ["git" , "reset" , "--hard" , "start-pt" ])
211+ if dataset .repo .dirty :
212+ # The submitter log file is ignored (currently only relevant for
213+ # condor; see b9277ebc0 for more details). Add the directory to get
214+ # to a clean state.
215+ dataset .add (".reproman" )
216+ orc = run_and_check (dumped_spec )
217+ return fn
218+
219+
169220@pytest .mark .integration
170221@pytest .mark .parametrize ("orc_class" ,
171222 [orcs .DataladLocalRunOrchestrator ,
@@ -175,43 +226,13 @@ def container_dataset(tmpdir_factory):
175226 ["local" ,
176227 pytest .param ("condor" , marks = mark .skipif_no_condor )],
177228 ids = ["sub:local" , "sub:condor" ])
178- def test_orc_datalad_run (job_spec , dataset , shell , orc_class , sub_type ):
179- dataset .repo .tag ("start-pt" )
180-
181- def run_and_check (spec ):
182- with chpwd (dataset .path ):
183- orc = orc_class (shell , submission_type = sub_type , job_spec = spec )
184- orc .prepare_remote ()
185- orc .submit ()
186- orc .follow ()
187-
188- orc .fetch ()
189- assert dataset .repo .file_has_content ("out" )
190- assert open ("out" ).read () == "content\n more\n "
191- return orc
229+ def test_orc_datalad_run (check_orc_datalad , shell , orc_class , sub_type ):
230+ check_orc_datalad (shell , orc_class , sub_type )
192231
193- orc = run_and_check (job_spec )
194232
195- # Perform another run based on the dumped job spec from the first.
196- assert dataset .repo .get_active_branch () == "master"
197- metadir = op .relpath (orc .meta_directory , orc .working_directory )
198- with open (op .join (dataset .path , metadir , "spec.yaml" )) as f :
199- dumped_spec = yaml .safe_load (f )
200- assert "_reproman_version" in dumped_spec
201- assert "_spec_version" in dumped_spec
202- if orc .name == "datalad-local-run" :
203- # Our reproman-based copying of data doesn't isn't (yet) OK with data
204- # files that already exist.
205- dumped_spec ["inputs" ] = []
206- # FIXME: Use exposed method once available.
207- dataset .repo ._git_custom_command (
208- [], ["git" , "reset" , "--hard" , "start-pt" ])
209- if dataset .repo .dirty :
210- # The submitter log file is ignored (currently only relevant for
211- # condor; see b9277ebc0 for more details). Add the directory to get to
212- # a clean state.
213- dataset .add (".reproman" )
214- orc = run_and_check (dumped_spec )
233+ @pytest .mark .integration
234+ def test_orc_datalad_slurm (check_orc_datalad , ssh_slurm ):
235+ check_orc_datalad (ssh_slurm , orcs .DataladLocalRunOrchestrator , "slurm" )
215236
216237
217238@pytest .mark .integration
@@ -574,6 +595,41 @@ def test_dataset_as_dict(shell, dataset, job_spec):
574595 assert "_dataset_id" in d
575596
576597
598+ @pytest .fixture ()
599+ def check_orc_datalad_concurrent (job_spec , dataset ):
600+ def fn (ssh , orc_class , sub_type ):
601+ names = ["paul" , "rosa" ]
602+
603+ job_spec ["inputs" ] = ["{p[name]}.in" ]
604+ job_spec ["outputs" ] = ["{p[name]}.out" ]
605+ job_spec ["_resolved_command_str" ] = "sh -c 'cat {inputs} {inputs} >{outputs}'"
606+ job_spec ["_resolved_batch_parameters" ] = [{"name" : n } for n in names ]
607+
608+ in_files = [n + ".in" for n in names ]
609+ for fname in in_files :
610+ with open (op .join (dataset .path , fname ), "w" ) as fh :
611+ fh .write (fname [0 ])
612+ dataset .save (path = in_files )
613+
614+ with chpwd (dataset .path ):
615+ orc = orc_class (ssh , submission_type = sub_type , job_spec = job_spec )
616+ orc .prepare_remote ()
617+ orc .submit ()
618+ orc .follow ()
619+ # Just make sure each fetch() seems to have wired up
620+ # on_remote_finish. test_run.py tests the actual --follow actions.
621+ remote_fn = MagicMock ()
622+ orc .fetch (on_remote_finish = remote_fn )
623+ remote_fn .assert_called_once_with (orc .resource , [])
624+
625+ out_files = [n + ".out" for n in names ]
626+ for ofile in out_files :
627+ assert dataset .repo .file_has_content (ofile )
628+ with open (ofile ) as ofh :
629+ assert ofh .read () == ofile [0 ] * 2
630+ return fn
631+
632+
577633@pytest .mark .integration
578634@pytest .mark .parametrize ("orc_class" ,
579635 [orcs .DataladLocalRunOrchestrator ,
@@ -584,33 +640,13 @@ def test_dataset_as_dict(shell, dataset, job_spec):
584640 ["local" ,
585641 pytest .param ("condor" , marks = mark .skipif_no_condor )],
586642 ids = ["sub:local" , "sub:condor" ])
587- def test_orc_datalad_concurrent (job_spec , dataset , ssh , orc_class , sub_type ):
588- names = ["paul" , "rosa" ]
643+ def test_orc_datalad_concurrent (check_orc_datalad_concurrent ,
644+ ssh , orc_class , sub_type ):
645+ check_orc_datalad_concurrent (ssh , orc_class , sub_type )
589646
590- job_spec ["inputs" ] = ["{p[name]}.in" ]
591- job_spec ["outputs" ] = ["{p[name]}.out" ]
592- job_spec ["_resolved_command_str" ] = "sh -c 'cat {inputs} {inputs} >{outputs}'"
593- job_spec ["_resolved_batch_parameters" ] = [{"name" : n } for n in names ]
594647
595- in_files = [n + ".in" for n in names ]
596- for fname in in_files :
597- with open (op .join (dataset .path , fname ), "w" ) as fh :
598- fh .write (fname [0 ])
599- dataset .save (path = in_files )
600-
601- with chpwd (dataset .path ):
602- orc = orc_class (ssh , submission_type = sub_type , job_spec = job_spec )
603- orc .prepare_remote ()
604- orc .submit ()
605- orc .follow ()
606- # Just make sure each fetch() seems to have wired up on_remote_finish.
607- # test_run.py tests the actual --follow actions.
608- remote_fn = MagicMock ()
609- orc .fetch (on_remote_finish = remote_fn )
610- remote_fn .assert_called_once_with (orc .resource , [])
611-
612- out_files = [n + ".out" for n in names ]
613- for ofile in out_files :
614- assert dataset .repo .file_has_content (ofile )
615- with open (ofile ) as ofh :
616- assert ofh .read () == ofile [0 ] * 2
648+ @pytest .mark .integration
649+ def test_orc_datalad_concurrent_slurm (check_orc_datalad_concurrent , ssh_slurm ):
650+ check_orc_datalad_concurrent (ssh_slurm ,
651+ orcs .DataladLocalRunOrchestrator ,
652+ "slurm" )
0 commit comments