|
21 | 21 |
|
22 | 22 |
|
23 | 23 | def test_submission(): |
24 | | - num_turns = 10 |
| 24 | + num_turns = 100 |
25 | 25 | num_particles = 5000 |
| 26 | + checkpoint_every = 25 |
26 | 27 | line = xt.Line(elements=[ |
27 | | - xt.Drift(length=1.0), xt.Multipole(knl=[1e-6]), xt.Drift(length=1.0)]) |
| 28 | + xt.Drift(length=1.0), xt.Multipole(knl=[1e-4]), xt.Drift(length=1.0)]) |
28 | 29 |
|
29 | 30 | particles_per_sub = 1000 |
30 | 31 | num_jobs = int(num_particles/particles_per_sub) |
31 | 32 |
|
| 33 | + # Clean potential leftover from failed test |
| 34 | + input_folder = xb.user.get_folder(user) / 'input' |
| 35 | + output_folder = xb.user.get_folder(user) / 'output' |
| 36 | + for folder in input_folder.glob('*/'): |
| 37 | + shutil.rmtree(folder) |
| 38 | + for folder in output_folder.glob('*/'): |
| 39 | + shutil.rmtree(folder) |
| 40 | + for file in input_folder.glob('*'): |
| 41 | + file.unlink() |
| 42 | + for file in output_folder.glob('*'): |
| 43 | + file.unlink() |
| 44 | + |
32 | 45 | studyname = "test_study_1" |
33 | 46 | with xb.SubmitJobs(user=user, study=studyname) as job: |
34 | 47 | for i in range(num_jobs): |
35 | 48 | particles = xp.Particles(x=np.random.normal(0, 0.01, particles_per_sub), |
36 | 49 | y=np.random.normal(0, 0.003, particles_per_sub)) |
37 | 50 | job.add(job_name=f'{studyname}_{i}', num_turns=num_turns, line=line, particles=particles, |
38 | | - checkpoint_every=2) |
| 51 | + checkpoint_every=checkpoint_every) |
39 | 52 |
|
40 | 53 | time.sleep(5) |
41 | 54 | studyname = "test_study_2" |
42 | 55 | with xb.SubmitJobs(user=user, study=studyname) as job: |
43 | 56 | for i in range(num_jobs): |
44 | | - particles = xp.Particles(x=np.random.normal(0, 0.7, particles_per_sub), |
| 57 | + particles = xp.Particles(x=np.random.normal(0, 4.7, particles_per_sub), |
45 | 58 | y=np.random.normal(0, 0.39, particles_per_sub)) |
46 | 59 | job.add(job_name=f'{studyname}_{i}', num_turns=num_turns, line=line, particles=particles, |
47 | | - checkpoint_every=2) |
| 60 | + checkpoint_every=checkpoint_every) |
48 | 61 |
|
49 | 62 | now = pd.Timestamp.now().timestamp() |
50 | 63 | tarfiles = list(Path(xb.user.get_folder(user) / 'input').glob(f'{studyname}__*')) |
@@ -108,17 +121,33 @@ def test_running(): |
108 | 121 | # Clean folders |
109 | 122 | for folder in input_folder.glob('*/'): |
110 | 123 | folder.rmdir() |
111 | | -# shutil.rmtree(folder) |
112 | 124 |
|
113 | 125 |
|
114 | 126 | def test_retrieval(): |
115 | 127 | for studyname in ['test_study_1', 'test_study_2']: |
116 | 128 | num_jobs = 0 |
| 129 | + x_mean_prev = 0 |
| 130 | + x_std_prev = 0 |
| 131 | + y_mean_prev = 0 |
| 132 | + y_std_prev = 0 |
117 | 133 | for job, particles in xb.RetrieveJobs(user=user, study=studyname): |
118 | 134 | assert job['user'] == user |
119 | 135 | assert job['study'] == studyname |
120 | 136 | num_jobs += 1 |
121 | | - surv = len(particles.state > 0) |
122 | | - print(f"Job {job['job_name']} : {surv} particles survived.") |
| 137 | + x_mean = np.mean(particles.x) |
| 138 | + x_std = np.std(particles.x) |
| 139 | + y_mean = np.mean(particles.y) |
| 140 | + y_std = np.std(particles.y) |
| 141 | + assert not (np.isclose(x_mean, x_mean_prev) |
| 142 | + and np.isclose(x_std, x_std_prev) |
| 143 | + and np.isclose(y_mean, y_mean_prev) |
| 144 | + and np.isclose(y_std, y_std_prev)) |
| 145 | + x_mean_prev = x_mean |
| 146 | + x_std_prev = x_std |
| 147 | + y_mean_prev = y_mean |
| 148 | + y_std_prev = y_std |
| 149 | + print(f"Job {job['job_name']} : x = {x_mean:.4} +- {x_std:.4} "\ |
| 150 | + + f"y = {y_mean:.4} +- {y_std:.4}") |
| 151 | + |
123 | 152 | assert num_jobs == 5 |
124 | 153 |
|
0 commit comments