Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 13 additions & 6 deletions openquake/hme/bin/hamlet.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,9 @@
from openquake.hme.__version__ import __version__

from openquake.hme.core.core import run_tests, read_yaml_config
from openquake.hme.utils.log import init_logging, check_for_log, add_logfile

logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
root_logger = init_logging()


def main(arg=None):
Expand All @@ -34,7 +31,17 @@ def main(arg=None):

if arg is None:
yaml_file = args.yaml_file
cfg = read_yaml_config(yaml_file)

# first without validation for logging
cfg = read_yaml_config(yaml_file, validate=False)

logfile = check_for_log(cfg)
if logfile:
add_logfile(logfile, root_logger)
logger.info(f"logging to {logfile}")

# now for real
cfg = read_yaml_config(yaml_file, validate=True)
run_tests(cfg)


Expand Down
89 changes: 83 additions & 6 deletions openquake/hme/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@

from openquake.hme.reporting import generate_basic_report

from openquake.hme.utils.utils import breakpoint

from openquake.hme.utils.io.source_processing import (
rupture_dict_from_logic_tree_dict,
Expand Down Expand Up @@ -66,7 +67,7 @@
}

logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# logger.addHandler(logging.NullHandler())

cfg_defaults = {
"input": {
Expand Down Expand Up @@ -467,18 +468,94 @@ def run_tests(cfg: dict) -> None:
"""


def format_output_for_json(out_results):
def find_dataframe(d, path=None):
if path is None:
path = []

for k, v in d.items():
current_path = path + [k]
if isinstance(v, pd.DataFrame):
return v, current_path
elif isinstance(v, dict):
result = find_dataframe(v, current_path)
if result is not None:
return result
return None

def delete_key_substring(d, target_str, verbose=True):
"""Recursively deletes keys containing target_str from nested dict."""
keys_to_delete = []

for k, v in d.items():
if isinstance(k, str) and target_str in k:
keys_to_delete.append(k)
elif isinstance(v, dict):
delete_key_substring(v, target_str)

for k in keys_to_delete:
if verbose:
logging.warning(f"deleting {k}")
del d[k]

return d

def convert_arrays_to_lists(d):
for k, v in d.items():
if isinstance(v, np.ndarray):
d[k] = v.tolist()
elif isinstance(v, dict):
convert_arrays_to_lists(v)
return d

out_results = delete_key_substring(out_results, "plot")
out_results["gem"]["model_mfd"]["test_data"]["mfd_df"] = eval(
out_results["gem"]["model_mfd"]["test_data"]["mfd_df"].to_json()
)
out_results["gem"]["rupture_matching_eval"]["matched_rups"] = out_results[
"gem"
]["rupture_matching_eval"]["matched_rups"].to_dict()
del out_results["gem"]["rupture_matching_eval"]["unmatched_eqs"]

out_results = convert_arrays_to_lists(out_results)

return out_results


def write_json(cfg: dict, results: dict):
logger.info("Writing results to JSON")
out_results = {}

for test_framework, test_results in results.items():
if test_framework not in out_results.keys():
if test_framework in ["gem", "relm"]:
out_results[test_framework] = {}
for test, res in test_results.items():
if test != "model_mfd":
for test, res in test_results.items():
if test == "geometry":
continue

out_results[test_framework][test] = res["val"]

elif test_framework == "cell_gdf":
out_results[test_framework] = eval(test_results.to_json()) # :(
# process outputs here for now
out_results = format_output_for_json(out_results)


def nan2None(obj):
if isinstance(obj, dict):
return {k:nan2None(v) for k,v in obj.items()}
elif isinstance(obj, list):
return [nan2None(v) for v in obj]
elif isinstance(obj, float) and math.isnan(obj):
return None
return obj

class NanConverter(json.JSONEncoder):
def encode(self, obj, *args, **kwargs):
return super().encode(nan2None(obj), *args, **kwargs)

with open(cfg["json"]["outfile"], "w") as f:
json.dump(out_results, f)
json.dump(out_results, f, cls=NanConverter)


def write_outputs(
Expand Down Expand Up @@ -555,7 +632,7 @@ def write_reports(cfg: dict, results: dict, input_data: dict) -> None:
:param eq_gdf:
:class:`GeoDataFrame` with the observed earthquake catalog.
"""
logger.info("writing reports")
logger.info("Writing reports")

if "basic" in cfg["report"].keys():
generate_basic_report(cfg, results, input_data)
249 changes: 249 additions & 0 deletions openquake/hme/datasets/ne_50m_admin_0_countries.geojson

Large diffs are not rendered by default.

38 changes: 35 additions & 3 deletions openquake/hme/model_test_frameworks/gem/gem_test_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,38 @@ def moment_over_under_eval_fn(
return results


def model_mfd_eval_fn(rup_gdf, eq_gdf, mag_bins, t_yrs):
mod_mfd = get_model_mfd(rup_gdf, mag_bins, cumulative=False)
obs_mfd = get_obs_mfd(eq_gdf, mag_bins, t_yrs=t_yrs, cumulative=False)
def model_mfd_eval_fn(
rup_gdf,
eq_gdf,
mag_bins,
t_yrs=None,
completeness_table=None,
annualize=False,
):

if annualize:
t_yrs_model = 1.0
completeness_table_model = None

else:
completeness_table_model = completeness_table
t_yrs_model = t_yrs

mod_mfd = get_model_mfd(
rup_gdf,
mag_bins,
cumulative=False,
t_yrs=t_yrs_model,
completeness_table=completeness_table_model,
)
obs_mfd = get_obs_mfd(
eq_gdf,
mag_bins,
t_yrs=t_yrs,
cumulative=False,
completeness_table=completeness_table,
annualize=annualize,
)

mfd_df = pd.DataFrame.from_dict(
mod_mfd, orient="index", columns=["mod_mfd"]
Expand All @@ -144,6 +173,9 @@ def model_mfd_eval_fn(rup_gdf, eq_gdf, mag_bins, t_yrs):

mfd_df.index.name = "bin"

# print(mfd_df["obs_mfd_cum"])
# print(mfd_df["mod_mfd_cum"])

return {"test_data": {"mfd_df": mfd_df}}


Expand Down
42 changes: 34 additions & 8 deletions openquake/hme/model_test_frameworks/gem/gem_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def M_test(
t_yrs = test_config["investigation_time"]
else:
eq_gdf = input_data["eq_gdf"]
t_yrs = cfg["input"]["seis_catalog"]["duration"]
t_yrs = cfg["input"]["seis_catalog"].get("duration", 1.0)
stop_date = cfg["input"]["seis_catalog"].get("stop_date")
completeness_table = cfg["input"]["seis_catalog"].get(
"completeness_table"
Expand Down Expand Up @@ -122,7 +122,7 @@ def S_test(
else:
eq_gdf = input_data["eq_gdf"]
eq_groups = input_data["eq_groups"]
t_yrs = cfg["input"]["seis_catalog"]["duration"]
t_yrs = cfg["input"]["seis_catalog"].get("duration")
stop_date = cfg["input"]["seis_catalog"].get("stop_date")
completeness_table = cfg["input"]["seis_catalog"].get(
"completeness_table"
Expand Down Expand Up @@ -199,13 +199,29 @@ def N_test(cfg: dict, input_data: dict) -> dict:
logging.info("Running N-Test")

test_config = cfg["config"]["model_framework"]["gem"]["N_test"]
completeness_table = cfg["input"]["seis_catalog"].get("completeness_table")
test_config["mag_bins"] = get_mag_bins_from_cfg(cfg)

prospective = test_config.get("prospective", False)

if (test_config["prob_model"] == "poisson") and not prospective:
test_config["investigation_time"] = cfg["input"]["seis_catalog"][
"duration"
]
if (
test_config["prob_model"] in ["poisson", "poisson_cum"]
) and not prospective:
if completeness_table is not None:
test_config["completeness_table"] = completeness_table
test_config["mag_bins"] = get_mag_bins_from_cfg(cfg)
else:
inv_time = test_config.get("investigation_time")
seis_duration = cfg["input"]["seis_catalog"]["duration"]
if inv_time is not None and inv_time != seis_duration:
logging.warning(
"N-Test: Investigation time does not match seis catalog "
"duration. Using seis catalog duration."
)

test_config["investigation_time"] = cfg["input"]["seis_catalog"][
"duration"
]

if prospective:
eq_gdf = input_data["pro_gdf"]
Expand All @@ -220,7 +236,7 @@ def N_test(cfg: dict, input_data: dict) -> dict:
"N-Test number obs eqs: {}".format(test_results["n_obs_earthquakes"])
)
logging.info(
"N-Test number pred eqs: {}".format(test_results["inv_time_rate"])
"N-Test number pred eqs: {}".format(test_results["n_pred_earthquakes"])
)
logging.info("N-Test {}".format(test_results["test_pass"]))
return test_results
Expand Down Expand Up @@ -252,9 +268,18 @@ def max_mag_check(cfg: dict, input_data: dict):


def model_mfd_eval(cfg, input_data):
logging.info("Running GEM Model MFD Eval")
mag_bins = get_mag_bins_from_cfg(cfg)
completeness_table = cfg["input"]["seis_catalog"].get("completeness_table")
test_config = cfg["config"]["model_framework"]["gem"]["model_mfd"]

if test_config is None:
test_config = {}

prospective = test_config.get("prospective", False)
test_config["investigation_time"] = test_config.get(
"investigation_time", cfg["input"]["seis_catalog"].get("duration")
)

if prospective:
eq_gdf = input_data["pro_gdf"]
Expand All @@ -265,7 +290,8 @@ def model_mfd_eval(cfg, input_data):
input_data["rupture_gdf"],
eq_gdf,
mag_bins,
test_config["investigation_time"],
t_yrs=test_config["investigation_time"],
completeness_table=completeness_table,
)

return results
Expand Down
Loading
Loading