|
8 | 8 |
|
9 | 9 | from pathlib import Path |
10 | 10 | import os |
11 | | -import sys |
12 | | -import subprocess |
13 | | -import importlib |
14 | | -import shutil |
15 | | -import inspect |
16 | | -import random |
17 | | -import string |
18 | 11 |
|
19 | | -import numpy as np |
20 | | - |
21 | | -import clawpack.clawutil.runclaw as runclaw |
22 | | -import clawpack.clawutil.claw_git_status as claw_git_status |
23 | | -import clawpack.pyclaw.solution as solution |
24 | | -import clawpack.pyclaw.gauges as gauges |
| 12 | +import clawpack.clawutil.test as test |
25 | 13 |
|
26 | 14 | # Clean library files whenever this module is used |
27 | 15 | if "CLAW" in os.environ: |
|
36 | 24 | for lib_path in (CLAW / "classic" / "src" / "3d").glob("*.o"): |
37 | 25 | lib_path.unlink() |
38 | 26 |
|
39 | | -class ClawpackClassicTestRunner: |
| 27 | +class ClawpackClassicTestRunner(test.ClawpackTestRunner): |
40 | 28 |
|
41 | 29 | def __init__(self, path: Path): |
42 | | - |
43 | | - self.temp_path = path |
44 | | - self.test_path = Path(Path(inspect.stack()[1].filename).absolute()).parent |
45 | | - self.executable_name = 'xclaw' |
46 | | - |
47 | | - |
48 | | - def set_data(self, setrun_path: Path = None): |
49 | | - r"""Set the rundata for the test.""" |
50 | | - |
51 | | - if not setrun_path: |
52 | | - setrun_path = self.test_path / "setrun.py" |
53 | | - |
54 | | - mod_name = '_'.join(("setrun", |
55 | | - "".join(random.choices(string.ascii_letters |
56 | | - + string.digits, k=32)))) |
57 | | - spec = importlib.util.spec_from_file_location(mod_name, setrun_path) |
58 | | - setrun_module = importlib.util.module_from_spec(spec) |
59 | | - sys.modules[mod_name] = setrun_module |
60 | | - spec.loader.exec_module(setrun_module) |
61 | | - self.rundata = setrun_module.setrun() |
62 | | - |
63 | | - |
64 | | - def write_data(self, path=None): |
65 | | - |
66 | | - if not path: |
67 | | - path = self.temp_path |
68 | | - self.rundata.write(out_dir=path) |
69 | | - |
70 | | - |
71 | | - def build_executable(self, make_level='default', FFLAGS=None, LFLAGS=None): |
72 | | - |
73 | | - # Assumes GCC CLI |
74 | | - if not FFLAGS: |
75 | | - FFLAGS = os.environ.get('FFLAGS', "-O2 -fopenmp") |
76 | | - if not LFLAGS: |
77 | | - LFLAGS = os.environ.get('LFLAGS', FFLAGS) |
78 | | - |
79 | | - if make_level.lower() == "new": |
80 | | - cmd = "".join((f"cd {self.test_path} ; make new ", |
81 | | - f"FFLAGS='{FFLAGS}' LFLAGS='{LFLAGS}'")) |
82 | | - elif make_level.lower() == "default": |
83 | | - # clean up *.o and *.mod files in test path only |
84 | | - for path in self.test_path.glob("*.o"): |
85 | | - path.unlink() |
86 | | - for path in self.test_path.glob("*.mod"): |
87 | | - path.unlink() |
88 | | - cmd = "".join((f"cd {self.test_path} ; make .exe ", |
89 | | - f"FFLAGS='{FFLAGS}' LFLAGS='{LFLAGS}'")) |
90 | | - |
91 | | - elif make_level.lower() == "exe": |
92 | | - cmd = "".join((f"cd {self.test_path} ; make .exe ", |
93 | | - f"FFLAGS='{FFLAGS}' LFLAGS='{LFLAGS}'")) |
94 | | - else: |
95 | | - raise ValueError(f"Invaled make_level={make_level} given.") |
96 | | - |
97 | | - try: |
98 | | - subprocess.run(cmd, shell=True, check=True) |
99 | | - except subprocess.CalledProcessError as e: |
100 | | - self.clean_up() |
101 | | - raise e |
102 | | - |
103 | | - shutil.move(self.test_path / self.executable_name, self.temp_path) |
104 | | - |
105 | | - |
106 | | - def run_code(self): |
107 | | - print(f"clawcmd={self.temp_path / self.executable_name}") |
108 | | - print(f"rundir={self.temp_path}") |
109 | | - print(f"outdir={self.temp_path}") |
110 | | - runclaw.runclaw(xclawcmd=self.temp_path / self.executable_name, |
111 | | - rundir=self.temp_path, |
112 | | - outdir=self.temp_path, |
113 | | - overwrite=True, |
114 | | - restart=False) |
115 | | - |
116 | | - |
117 | | - def clean_up(self): |
118 | | - pass |
119 | | - |
120 | | - |
121 | | - def check_frame(self, frame, indices=(0,), regression_path=None, save=False, **kwargs): |
122 | | - |
123 | | - if not(isinstance(indices, tuple) or isinstance(indices, list)): |
124 | | - indices = tuple(indices) |
125 | | - |
126 | | - if not regression_path: |
127 | | - regression_path = self.test_path / "regression_data" |
128 | | - |
129 | | - # Load test output data |
130 | | - sol = solution.Solution(frame, path=self.temp_path) |
131 | | - sol_sums = [sol.q[i, ...].sum() for i in indices] |
132 | | - |
133 | | - # Load regression data |
134 | | - regression_data = regression_path / f"frame{str(frame).zfill(4)}.txt" |
135 | | - if save: |
136 | | - np.savetxt(regression_data, sol_sums) |
137 | | - claw_git_status.make_git_status_file(outdir=regression_path) |
138 | | - regression_sum = np.loadtxt(regression_data) |
139 | | - |
140 | | - # Compare data |
141 | | - kwargs.setdefault('rtol', 1e-14) |
142 | | - kwargs.setdefault('atol', 1e-8) |
143 | | - np.testing.assert_allclose(sol_sums, regression_sum, **kwargs) |
144 | | - |
145 | | - |
146 | | - def check_gauge(self, gauge_id, indices=(0), regression_path=None, save=False, **kwargs): |
147 | | - r"""Basic test to assert gauge equality |
148 | | -
|
149 | | - :Input: |
150 | | - - *save* (bool) - If *True* will save the output from this test to |
151 | | - the file *regresion_data.txt*. Default is *False*. |
152 | | - - *indices* (tuple) - Contains indices to compare in the gague |
153 | | - comparison. Defaults to *(0)*. |
154 | | - - *rtol* (float) - Relative tolerance used in the comparison, default |
155 | | - is *1e-14*. Note that the old *tolerance* input is now synonymous |
156 | | - with this parameter. |
157 | | - - *atol* (float) - Absolute tolerance used in the comparison, default |
158 | | - is *1e-08*. |
159 | | - """ |
160 | | - |
161 | | - if not(isinstance(indices, tuple) or isinstance(indices, list)): |
162 | | - indices = tuple(indices) |
163 | | - |
164 | | - if not regression_path: |
165 | | - regression_path = self.test_path / "regression_data" |
166 | | - |
167 | | - # Load test output data |
168 | | - gauge = gauges.GaugeSolution(gauge_id, path=self.temp_path) |
169 | | - |
170 | | - # Load regression data |
171 | | - if save: |
172 | | - shutil.copy(self.temp_path / f"gauge{str(gauge_id).zfill(5)}.txt", |
173 | | - regression_path) |
174 | | - claw_git_status.make_git_status_file(outdir=regression_path) |
175 | | - regression_gauge = gauges.GaugeSolution(gauge_id, path=regression_path) |
176 | | - |
177 | | - # Compare data |
178 | | - kwargs.setdefault('rtol', 1e-14) |
179 | | - kwargs.setdefault('atol', 1e-8) |
180 | | - try: |
181 | | - for n in indices: |
182 | | - np.testing.assert_allclose(gauge.q[n, :], |
183 | | - regression_gauge.q[n, :], |
184 | | - **kwargs) |
185 | | - except AssertionError as e: |
186 | | - err_msg = "\n".join((e.args[0], |
187 | | - "Gauge Match Failed for gauge = %s" % gauge_id)) |
188 | | - err_msg = "\n".join((err_msg, " failures in fields:")) |
189 | | - failure_indices = [] |
190 | | - for n in indices: |
191 | | - if ~np.allclose(gauge.q[n, :], regression_gauge.q[n, :], |
192 | | - **kwargs): |
193 | | - failure_indices.append(str(n)) |
194 | | - index_str = ", ".join(failure_indices) |
195 | | - raise AssertionError(" ".join((err_msg, index_str))) |
| 30 | + super(ClawpackClassicTestRunner, self).__init__(path) |
0 commit comments