-
-
Notifications
You must be signed in to change notification settings - Fork 251
Expand file tree
/
Copy pathtest_monte_carlo.py
More file actions
233 lines (172 loc) · 8.6 KB
/
test_monte_carlo.py
File metadata and controls
233 lines (172 loc) · 8.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
import matplotlib as plt
import numpy as np
import pytest
import json
import csv
import os
from rocketpy.simulation import MonteCarlo
plt.rcParams.update({"figure.max_open_warning": 0})
def test_stochastic_environment_create_object_with_wind_x(stochastic_environment):
"""Tests the stochastic environment object by checking if the wind velocity
can be generated properly. The goal is to check if the create_object()
method is being called without any problems.
Parameters
----------
stochastic_environment : StochasticEnvironment
The stochastic environment object, this is a pytest fixture.
"""
wind_x_at_1000m = []
for _ in range(10):
random_env = stochastic_environment.create_object()
wind_x_at_1000m.append(random_env.wind_velocity_x(1000))
assert np.isclose(np.mean(wind_x_at_1000m), 0, atol=0.1)
assert np.isclose(np.std(wind_x_at_1000m), 0, atol=0.1)
# TODO: add a new test for the special case of ensemble member
def test_stochastic_solid_motor_create_object_with_impulse(stochastic_solid_motor):
"""Tests the stochastic solid motor object by checking if the total impulse
can be generated properly. The goal is to check if the create_object()
method is being called without any problems.
Parameters
----------
stochastic_solid_motor : StochasticSolidMotor
The stochastic solid motor object, this is a pytest fixture.
"""
total_impulse = [
stochastic_solid_motor.create_object().total_impulse for _ in range(200)
]
assert np.isclose(np.mean(total_impulse), 6500, rtol=0.3)
assert np.isclose(np.std(total_impulse), 1000, rtol=0.4)
def test_stochastic_calisto_create_object_with_static_margin(stochastic_calisto):
"""Tests the stochastic calisto object by checking if the static margin
can be generated properly. The goal is to check if the create_object()
method is being called without any problems.
Parameters
----------
stochastic_calisto : StochasticCalisto
The stochastic calisto object, this is a pytest fixture.
"""
all_margins = []
for _ in range(10):
random_rocket = stochastic_calisto.create_object()
all_margins.append(random_rocket.static_margin(0))
assert np.isclose(np.mean(all_margins), 2.2625350013000434, rtol=0.15)
assert np.isclose(np.std(all_margins), 0.1, atol=0.2)
class MockMonteCarlo(MonteCarlo):
"""Create a mock class to test the method without running a real simulation"""
def __init__(self):
# pylint: disable=super-init-not-called
# Simulate pre-calculated results
# Example: a normal distribution centered on 100 for the apogee
self.results = {
"apogee": [98, 102, 100, 99, 101, 100, 97, 103],
"max_velocity": [250, 255, 245, 252, 248],
"single_point": [100],
"empty_attribute": [],
}
def test_estimate_confidence_interval_contains_known_mean():
"""Checks that the confidence interval contains the known mean."""
mc = MockMonteCarlo()
ci = mc.estimate_confidence_interval("apogee", confidence_level=0.95)
assert ci.low < 100 < ci.high
assert ci.low < ci.high
def test_estimate_confidence_interval_supports_custom_statistic():
"""Checks that the statistic can be changed (e.g., standard deviation instead of mean)."""
mc = MockMonteCarlo()
ci_std = mc.estimate_confidence_interval("apogee", statistic=np.std)
assert ci_std.low > 0
assert ci_std.low < ci_std.high
def test_estimate_confidence_interval_raises_value_error_when_attribute_missing():
"""Checks that the code raises an error if the key does not exist."""
mc = MockMonteCarlo()
# Request a variable that does not exist ("altitude" is not in our mock)
with pytest.raises(ValueError) as excinfo:
mc.estimate_confidence_interval("altitude")
assert "not found in results" in str(excinfo.value)
def test_estimate_confidence_interval_increases_width_with_higher_confidence_level():
"""Checks that a higher confidence level yields a wider interval."""
mc = MockMonteCarlo()
ci_90 = mc.estimate_confidence_interval("apogee", confidence_level=0.90)
width_90 = ci_90.high - ci_90.low
ci_99 = mc.estimate_confidence_interval("apogee", confidence_level=0.99)
width_99 = ci_99.high - ci_99.low
# The more confident we want to be (99%), the wider the interval must be
assert width_99 >= width_90
def test_estimate_confidence_interval_raises_value_error_when_confidence_level_out_of_bounds():
"""Checks that validation fails if confidence_level is not strictly between 0 and 1."""
mc = MockMonteCarlo()
# Case 1: Value <= 0
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
mc.estimate_confidence_interval("apogee", confidence_level=0)
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
mc.estimate_confidence_interval("apogee", confidence_level=-0.5)
# Case 2: Value >= 1
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
mc.estimate_confidence_interval("apogee", confidence_level=1)
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
mc.estimate_confidence_interval("apogee", confidence_level=1.5)
def test_estimate_confidence_interval_raises_value_error_when_n_resamples_invalid():
"""Checks that validation fails if n_resamples is not a positive integer."""
mc = MockMonteCarlo()
# Case 1: Not an integer (e.g. float)
with pytest.raises(ValueError, match="n_resamples must be a positive integer"):
mc.estimate_confidence_interval("apogee", n_resamples=1000.5)
# Case 2: Zero or Negative
with pytest.raises(ValueError, match="n_resamples must be a positive integer"):
mc.estimate_confidence_interval("apogee", n_resamples=0)
with pytest.raises(ValueError, match="n_resamples must be a positive integer"):
mc.estimate_confidence_interval("apogee", n_resamples=-100)
def test_estimate_confidence_interval_raises_value_error_on_empty_data_list():
"""Checks behavior when the attribute exists but contains no data (empty list)."""
mc = MockMonteCarlo()
with pytest.raises(ValueError):
mc.estimate_confidence_interval("empty_attribute")
def test_estimate_confidence_interval_handles_single_data_point():
"""Checks behavior with only one data point. The CI should be [val, val]."""
mc = MockMonteCarlo()
with pytest.raises(ValueError): # two or more value
mc.estimate_confidence_interval("single_point", n_resamples=50)
def test_estimate_confidence_interval_raises_type_error_for_invalid_statistic():
"""Checks that passing a non-callable object (like a string/int) as statistic raises TypeError."""
mc = MockMonteCarlo()
with pytest.raises(TypeError):
mc.estimate_confidence_interval("apogee", statistic=1)
with pytest.raises(TypeError):
mc.estimate_confidence_interval("apogee", statistic="not_a_function")
def test_export_results_creates_csv_and_json_files(monte_carlo_calisto, tmp_path):
"""Checks that the export_results create .csv and .json files
Parameters
----------
monte_carlo_calisto : MonteCarlo
Fixture that has the .txt files necessary for the export_results
"""
try:
mc = monte_carlo_calisto
mc.filename = tmp_path / "mock_output"
mock_data = {"apogee": 100, "max_velocity": 255}
with open(tmp_path / "mock_output.outputs.txt", "w") as f:
f.write(json.dumps(mock_data) + "\n")
mc.export_results(tmp_path / "mock_outputs_in_csv", "csv")
expected_file_in_csv = tmp_path / f"{"mock_outputs_in_csv"}.csv"
assert expected_file_in_csv.exists()
with open(expected_file_in_csv, "r") as f:
reader = csv.DictReader(f)
rows = list(reader)
assert len(rows) == 1
assert rows[0]["apogee"] == "100"
assert rows[0]["max_velocity"] == "255"
mc.export_results(tmp_path / "mock_output_in_json", "json")
expected_file_in_json = tmp_path / "mock_output_in_json.json"
assert expected_file_in_json.exists()
with open(expected_file_in_json, "r") as f:
data = json.load(f)
assert len(data) == 1
assert data[0]["apogee"] == 100
assert data[0]["max_velocity"] == 255
finally:
for filepath in [
"monte_carlo_test.errors.txt",
"monte_carlo_test.inputs.txt",
"monte_carlo_test.outputs.txt",
]:
if os.path.exists(filepath):
os.remove(filepath)