Skip to content

Commit 66db83e

Browse files
committed
Renamed JobManager as JobSubmitter and ResultRetriever as JobRetriever, and updated examples.
1 parent 277eaea commit 66db83e

File tree

7 files changed

+60
-52
lines changed

7 files changed

+60
-52
lines changed

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ Note that specifying `permissions_given=True` assumes that you have already set
6565

6666
## Submit a job
6767

68-
To submit a job to the LHC@home project, you can use the `JobManager` class from the `xboinc` package. With `JobManager`, you can create a study, which will contain a set of jobs to be executed. Ideally, you should create a study for a single line to track, with multiple jobs for spreading the number of particles to track. However, it is also possible to create a study with multiple lines.
68+
To submit a job to the LHC@home project, you can use the `JobSubmitter` class from the `xboinc` package. With `JobSubmitter`, you can create a study, which will contain a set of jobs to be executed. Ideally, you should create a study for a single line to track, with multiple jobs for spreading the number of particles to track. However, it is also possible to create a study with multiple lines.
6969

7070
Here is an example of how to submit a job:
7171

@@ -77,7 +77,7 @@ import xboinc as xb
7777
line = xt.Line.from_json("path/to/your/line.json")
7878

7979
# create a job manager
80-
job_manager = xb.JobManager(
80+
job_manager = xb.JobSubmitter(
8181
user="mycernshortname",
8282
study_name="a_relevant_study_name",
8383
line=line,
@@ -109,12 +109,12 @@ Note that the jobs will be executed on a single CPU core from a volunteer comput
109109

110110
## Retrieve the results
111111

112-
When the jobs are completed, the Xboinc server will store the results in your allocated folder in compressed tar files. You can decompress and explore them by using the `ResultRetriever` class from the `xboinc` package. The simplest way to do that is:
112+
When the jobs are completed, the Xboinc server will store the results in your allocated folder in compressed tar files. You can decompress and explore them by using the `JobRetriever` class from the `xboinc` package. The simplest way to do that is:
113113

114114
```python
115115
import xboinc as xb
116116

117-
for job_name, result_particles in xb.ResultRetriever.iterate("mycernshortname", "a_relevant_study_name", dev_server=True):
117+
for job_name, result_particles in xb.JobRetriever.iterate("mycernshortname", "a_relevant_study_name", dev_server=True):
118118
print(f"Job {job_name} completed with particles: {result_particles.to_dict()}")
119119

120120
```

examples/results.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
# copyright ############################### #
22
# This file is part of the Xboinc Package. #
3-
# Copyright (c) CERN, 2024. #
3+
# Copyright (c) CERN, 2025. #
44
########################################### #
55

66
import xboinc as xb
77

88
list_of_succeeded_jobs = []
99
user='sixtadm'
1010
study_name='example_study'
11-
for new_particles, jobinfo in xb.RetrieveJobs(user=user, study_name=study_name):
12-
print(jobinfo)
11+
for jobname, new_particles in xb.JobRetriever(user=user, study_name=study_name, dev_server=True):
12+
print(jobname)
1313
print(f"Particles: {new_particles.at_turn}")

examples/submission.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,9 @@
2525
y_norm=np.random.normal(0, 10, num_part_per_job*num_jobs),
2626
nemitt_x=3.5e-6, nemitt_y=3.5e-6)
2727

28+
# Do the submission
2829
study_name = "example_study"
29-
jobs = xb.SubmitJobs(user=user, study_name=study_name, line=line, dev_server=True)
30+
jobs = xb.JobSubmitter(user=user, study_name=study_name, line=line, dev_server=True)
3031
prev = time.time()
3132
for i in range(num_jobs):
3233
# select subgroup of particles
@@ -40,3 +41,10 @@
4041
now = time.time() ; print(f"{i+1}/{num_jobs} ({now-prev:.4}s)"); prev = now
4142
jobs.submit()
4243

44+
45+
# Alternatively, instead of manually looping over each job, you can use the `slice_and_add` method to create n jobs:
46+
study_name = "example_study_2"
47+
jobs = xb.JobSubmitter(user=user, study_name=study_name, line=line, dev_server=True)
48+
jobs.slice_and_add(base_job_name='job', num_turns=num_turns, particles=all_part,
49+
checkpoint_every=checkpoint_every)
50+
jobs.submit()

tests/test_03_submission_and_retrieval.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def submit_study_jobs(
165165
line: xt.Line,
166166
x_sigma: float = 0.01,
167167
y_sigma: float = 0.003,
168-
) -> xb.JobManager:
168+
) -> xb.JobSubmitter:
169169
"""
170170
Submit a complete study with multiple jobs.
171171
@@ -182,10 +182,10 @@ def submit_study_jobs(
182182
183183
Returns
184184
-------
185-
xb.JobManager
185+
xb.JobSubmitter
186186
The job manager used for submission.
187187
"""
188-
jobs = xb.JobManager(user=user, study_name=study_name, line=line, dev_server=True)
188+
jobs = xb.JobSubmitter(user=user, study_name=study_name, line=line, dev_server=True)
189189

190190
for i in range(TestConfig.num_jobs()):
191191
particles = create_random_particles(
@@ -299,7 +299,7 @@ def test_submission(monkeypatch, registered_user, clean_directories):
299299
)
300300

301301
# Test that adding jobs after submission fails
302-
jobs = xb.JobManager(
302+
jobs = xb.JobSubmitter(
303303
registered_user, f"{TestConfig.STUDY_NAME}_temp", line=line, dev_server=True
304304
)
305305
jobs.submit()
@@ -324,7 +324,7 @@ def test_submission(monkeypatch, registered_user, clean_directories):
324324

325325
# Test that production server raises NotImplementedError
326326
with pytest.raises(NotImplementedError):
327-
xb.JobManager(registered_user, f"{TestConfig.STUDY_NAME}_3", line=line)
327+
xb.JobSubmitter(registered_user, f"{TestConfig.STUDY_NAME}_3", line=line)
328328

329329
# Validate submitted tar files
330330
tar_files = list(
@@ -358,7 +358,7 @@ def test_retrieval(registered_user):
358358
shutil.copy(tar_file, output_dir)
359359

360360
# Iterate through jobs and validate results
361-
for _, result_particles in xb.ResultRetriever.iterate(
361+
for _, result_particles in xb.JobRetriever.iterate(
362362
"testuser", "example_study_fourth", dev_server=True
363363
):
364364
assert len(result_particles.x) == 100

xboinc/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@
77
from .executable import generate_executable, generate_executable_source
88
from .general import __version__, __xsuite__versions__, _pkg_root
99
from .register import deregister, register
10-
from .retrieve import ResultRetriever
10+
from .retrieve import JobRetriever
1111
from .simulation_io import (
1212
XbInput,
1313
XbState,
1414
app_version,
1515
app_version_int,
1616
assert_versions,
1717
)
18-
from .submit import JobManager
18+
from .submit import JobSubmitter
1919

2020
_skip_xsuite_version_check = False

xboinc/retrieve.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from .user import get_directory, get_domain
1818

1919

20-
class ResultRetriever:
20+
class JobRetriever:
2121
"""
2222
Class to retrieve and manage results from Xboinc simulations.
2323
@@ -40,7 +40,7 @@ class ResultRetriever:
4040
4141
Examples
4242
--------
43-
>>> retriever = ResultRetriever('myuser', dev_server=True)
43+
>>> retriever = JobRetriever('myuser', dev_server=True)
4444
>>> studies = retriever.get_study_list()
4545
>>> for job_name, particles in retriever.iterate_results('my_study'):
4646
... # Process particles data
@@ -50,7 +50,7 @@ class ResultRetriever:
5050
def _untar_results(self, path: FsPath, silent: bool = False):
5151
"""
5252
Untar all compressed result files in the given path.
53-
53+
5454
Parameters
5555
----------
5656
path : FsPath
@@ -116,7 +116,7 @@ def _index_results(self, path: FsPath, silent: bool = False) -> pd.DataFrame:
116116

117117
def __init__(self, user, dev_server=False, silent=False):
118118
"""
119-
Initialize the ResultRetriever for a specific user.
119+
Initialize the JobRetriever for a specific user.
120120
121121
Parameters
122122
----------
@@ -137,7 +137,7 @@ def __init__(self, user, dev_server=False, silent=False):
137137
138138
Examples
139139
--------
140-
>>> retriever = ResultRetriever('myuser', dev_server=True, silent=True)
140+
>>> retriever = JobRetriever('myuser', dev_server=True, silent=True)
141141
>>> overview = retriever.get_overview()
142142
"""
143143

@@ -314,7 +314,7 @@ def iterate_results(self, study_name):
314314
315315
Examples
316316
--------
317-
>>> retriever = ResultRetriever('myuser', dev_server=True)
317+
>>> retriever = JobRetriever('myuser', dev_server=True)
318318
>>> for job_name, particles in retriever.iterate_results('my_study'):
319319
... print(f"Processing job: {job_name}")
320320
... print(f"Number of particles: {len(particles.x)}")
@@ -375,7 +375,7 @@ def iterate(cls, user, study_name, dev_server=False, silent=False):
375375
"""
376376
Class method to directly iterate over results for a user and study.
377377
378-
Convenient method that creates a ResultRetriever instance and immediately
378+
Convenient method that creates a JobRetriever instance and immediately
379379
starts iterating over results without requiring explicit instantiation.
380380
381381
Parameters
@@ -396,7 +396,7 @@ def iterate(cls, user, study_name, dev_server=False, silent=False):
396396
397397
Examples
398398
--------
399-
>>> for job_name, particles in ResultRetriever.iterate('myuser', 'my_study', dev_server=True):
399+
>>> for job_name, particles in JobRetriever.iterate('myuser', 'my_study', dev_server=True):
400400
... # Process particles data
401401
... pass
402402
"""
@@ -424,7 +424,7 @@ def overview(cls, user, dev_server=False, silent=False):
424424
425425
Examples
426426
--------
427-
>>> overview_df = ResultRetriever.overview('myuser', dev_server=True)
427+
>>> overview_df = JobRetriever.overview('myuser', dev_server=True)
428428
>>> print(overview_df.groupby('study_name').size())
429429
"""
430430
instance = cls(user, dev_server=dev_server, silent=silent)
@@ -456,7 +456,7 @@ def status(cls, user, study_name, dev_server=False, silent=False, verbose=False)
456456
457457
Examples
458458
--------
459-
>>> available, missing = ResultRetriever.status('myuser', 'my_study', dev_server=True)
459+
>>> available, missing = JobRetriever.status('myuser', 'my_study', dev_server=True)
460460
>>> print(f"Available jobs: {len(available)}, Missing jobs: {len(missing)}")
461461
"""
462462
instance = cls(user, dev_server=dev_server, silent=silent)
@@ -483,7 +483,7 @@ def study_list(cls, user, dev_server=False, silent=False):
483483
484484
Examples
485485
--------
486-
>>> studies = ResultRetriever.study_list('myuser', dev_server=True)
486+
>>> studies = JobRetriever.study_list('myuser', dev_server=True)
487487
>>> print(studies)
488488
"""
489489
instance = cls(user, dev_server=dev_server, silent=silent)

0 commit comments

Comments
 (0)