Skip to content

Commit cb0f2f9

Browse files
committed
seperate station term
1 parent 32d8176 commit cb0f2f9

File tree

5 files changed

+168
-74
lines changed

5 files changed

+168
-74
lines changed

scripts/cut_templates_cc.py

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,12 @@ def fillin_missing_picks(picks, events, stations, config):
4848
picks_ps.drop(columns=["P_source", "S_source"], inplace=True)
4949

5050
picks_ps = picks_ps.merge(events[["event_index", "event_timestamp"]], on="event_index")
51-
picks_ps = picks_ps.merge(stations[["station_id", "station_term_time"]], on="station_id")
51+
# picks_ps = picks_ps.merge(stations[["station_id", "station_term_time"]], on="station_id")
52+
picks_ps = picks_ps.merge(stations[["station_id", "station_term_time_p", "station_term_time_s"]], on="station_id")
53+
picks_ps["station_term_time"] = picks_ps.apply(
54+
lambda row: row[f"station_term_time_{row['phase_type'].lower()}"], axis=1
55+
) ## Separate P and S station term
56+
picks_ps.drop(columns=["station_term_time_p", "station_term_time_s"], inplace=True)
5257
picks_ps["phase_timestamp"] = picks_ps["event_timestamp"] + picks_ps["traveltime"] + picks_ps["station_term_time"]
5358
picks_ps["phase_time"] = reference_t0 + pd.to_timedelta(picks_ps["phase_timestamp"], unit="s")
5459
picks_ps["phase_score"] = min_phase_score
@@ -85,9 +90,16 @@ def predict_full_picks(picks, events, stations, config):
8590
s_picks = picks_full.assign(phase_type="S")
8691
picks_full = pd.concat([p_picks, s_picks])
8792
phase_type = picks_full["phase_type"].values
93+
# picks_full = picks_full.merge(
94+
# stations[["station_id", "station_term_time", "x_km", "y_km", "z_km"]], on="station_id"
95+
# )
8896
picks_full = picks_full.merge(
89-
stations[["station_id", "station_term_time", "x_km", "y_km", "z_km"]], on="station_id"
97+
stations[["station_id", "station_term_time_p", "station_term_time_s", "x_km", "y_km", "z_km"]], on="station_id"
9098
)
99+
picks_full["station_term_time"] = picks_full.apply(
100+
lambda row: row[f"station_term_time_{row['phase_type'].lower()}"], axis=1
101+
)
102+
picks_full.drop(columns=["station_term_time_p", "station_term_time_s"], inplace=True)
91103
station_dt = picks_full["station_term_time"].values
92104
station_locs = picks_full[["x_km", "y_km", "z_km"]].values
93105
picks_full.rename(columns={"x_km": "station_x_km", "y_km": "station_y_km", "z_km": "station_z_km"}, inplace=True)
@@ -379,6 +391,14 @@ def cut_templates(root_path, region, config):
379391
vp_vs_ratio = 1.73
380392
vs = [v / vp_vs_ratio for v in vp]
381393
h = 0.3
394+
395+
if os.path.exists(f"{root_path}/{region}/obspy/velocity.csv"):
396+
velocity = pd.read_csv(f"{root_path}/{region}/obspy/velocity.csv")
397+
zz = velocity["z_km"].values
398+
vp = velocity["vp"].values
399+
vs = velocity["vs"].values
400+
h = 0.1
401+
382402
vel = {"Z": zz, "P": vp, "S": vs}
383403
eikonal = {
384404
"vel": vel,
@@ -396,9 +416,16 @@ def cut_templates(root_path, region, config):
396416
min_phase_score = picks["phase_score"].min()
397417

398418
picks = picks.merge(events[["event_index", "event_timestamp"]], on="event_index")
399-
picks = picks.merge(stations[["station_id", "station_term_time"]], on="station_id")
419+
# picks = picks.merge(stations[["station_id", "station_term_time"]], on="station_id")
420+
picks = picks.merge(stations[["station_id", "station_term_time_p", "station_term_time_s"]], on="station_id")
421+
picks["station_term_time"] = picks.apply(
422+
lambda x: x[f"station_term_time_{x['phase_type'].lower()}"], axis=1
423+
) ## Separate P and S station term
424+
picks.drop(columns=["station_term_time_p", "station_term_time_s"], inplace=True)
400425
picks["phase_timestamp"] = picks["phase_time"].apply(lambda x: (x - reference_t0).total_seconds())
401-
picks["traveltime"] = picks["phase_timestamp"] - picks["event_timestamp"] - picks["station_term_time"]
426+
picks["traveltime"] = (
427+
picks["phase_timestamp"] - picks["event_timestamp"] - picks["station_term_time"]
428+
) ## Separate P and S station term
402429

403430
picks = fillin_missing_picks(
404431
picks,

scripts/plot_catalog.py

Lines changed: 30 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,9 @@
99
import numpy as np
1010
import pandas as pd
1111
import plotly.graph_objects as go
12-
12+
from args import parse_args
1313

1414
# %%
15-
def parse_args():
16-
parser = argparse.ArgumentParser()
17-
parser.add_argument("root_path", nargs="?", type=str, default="local", help="root path")
18-
parser.add_argument("region", nargs="?", type=str, default="demo", help="region")
19-
return parser.parse_args()
20-
21-
2215
args = parse_args()
2316

2417
# %%
@@ -49,6 +42,7 @@ def parse_args():
4942
routine_catalog = f"{root_path}/{region}/obspy/catalog.csv"
5043
routine_exist = False
5144
if os.path.exists(routine_catalog):
45+
print(f"Reading {routine_catalog}")
5246
routine_exist = True
5347
routine_catalog = pd.read_csv(routine_catalog, parse_dates=["time"])
5448

@@ -57,6 +51,7 @@ def parse_args():
5751
gamma_file = f"{root_path}/{region}/gamma/gamma_events_000_001.csv"
5852
gamma_exist = False
5953
if os.path.exists(gamma_file):
54+
print(f"Reading {gamma_file}")
6055
gamma_exist = True
6156
gamma_catalog = pd.read_csv(gamma_file, parse_dates=["time"])
6257
# gamma_catalog["depth_km"] = gamma_catalog["depth(m)"] / 1e3
@@ -66,6 +61,7 @@ def parse_args():
6661
adloc_file = f"{root_path}/{region}/adloc/ransac_events.csv"
6762
adloc_exist = False
6863
if os.path.exists(adloc_file):
64+
print(f"Reading {adloc_file}")
6965
adloc_exist = True
7066
adloc_catalog = pd.read_csv(adloc_file, parse_dates=["time"])
7167
# adloc_catalog["magnitude"] = 0.0
@@ -75,20 +71,23 @@ def parse_args():
7571
adloc_dt_file = f"{root_path}/{region}/adloc_dd/adloc_dt_events.csv"
7672
adloc_dt_exist = False
7773
if os.path.exists(adloc_dt_file):
74+
print(f"Reading {adloc_dt_file}")
7875
adloc_dt_exist = True
7976
adloc_dt_catalog = pd.read_csv(adloc_dt_file, parse_dates=["time"])
8077

8178
# %%
8279
adloc_dtcc_file = f"{root_path}/{region}/adloc_dd/adloc_dtcc_events.csv"
8380
adloc_dtcc_exist = False
8481
if os.path.exists(adloc_dtcc_file):
82+
print(f"Reading {adloc_dtcc_file}")
8583
adloc_dtcc_exist = True
8684
adloc_dtcc_catalog = pd.read_csv(adloc_dtcc_file, parse_dates=["time"])
8785

8886
# %%
8987
hypodd_file = f"{root_path}/{region}/hypodd/hypodd_ct.reloc"
9088
hypodd_ct_exist = False
9189
if os.path.exists(hypodd_file):
90+
print(f"Reading {hypodd_file}")
9291
hypodd_ct_exist = True
9392
columns = [
9493
"ID",
@@ -140,6 +139,7 @@ def parse_args():
140139
hypodd_file = f"{root_path}/{region}/hypodd/hypodd_cc.reloc"
141140
hypodd_cc_exist = False
142141
if os.path.exists(hypodd_file):
142+
print(f"Reading {hypodd_file}")
143143
hypodd_cc_exist = True
144144
columns = [
145145
"ID",
@@ -191,6 +191,7 @@ def parse_args():
191191
growclust_file = f"{root_path}/{region}/growclust/growclust_ct_catalog.txt"
192192
growclust_ct_exist = False
193193
if os.path.exists(growclust_file):
194+
print(f"Reading {growclust_file}")
194195
growclust_ct_exist = True
195196
columns = [
196197
"yr",
@@ -235,6 +236,7 @@ def parse_args():
235236
growclust_file = f"{root_path}/{region}/growclust/growclust_cc_catalog.txt"
236237
growclust_cc_exist = False
237238
if os.path.exists(growclust_file):
239+
print(f"Reading {growclust_file}")
238240
growclust_cc_exist = True
239241
columns = [
240242
"yr",
@@ -746,6 +748,7 @@ def parse_args():
746748
ax[2, 0].set_title(f"HypoDD (CT): {len(catalog_ct_hypodd)}")
747749
ax[2, 0].set_xlim(xlim)
748750
ax[2, 0].set_ylim(ylim)
751+
749752
if hypodd_cc_exist and (len(catalog_cc_hypodd) > 0):
750753
ax[2, 1].scatter(
751754
catalog_cc_hypodd["LAT"],
@@ -950,6 +953,25 @@ def plot3d(x, y, z, config, fig_name):
950953
config_plot3d,
951954
f"{root_path}/{figure_path}/earthquake_location_adloc.html",
952955
)
956+
957+
if adloc_dt_exist and len(adloc_dt_catalog) > 0:
958+
plot3d(
959+
adloc_dt_catalog["longitude"],
960+
adloc_dt_catalog["latitude"],
961+
adloc_dt_catalog["depth_km"],
962+
config_plot3d,
963+
f"{root_path}/{figure_path}/earthquake_location_adloc_dt.html",
964+
)
965+
966+
if adloc_dtcc_exist and len(adloc_dtcc_catalog) > 0:
967+
plot3d(
968+
adloc_dtcc_catalog["longitude"],
969+
adloc_dtcc_catalog["latitude"],
970+
adloc_dtcc_catalog["depth_km"],
971+
config_plot3d,
972+
f"{root_path}/{figure_path}/earthquake_location_adloc_dtcc.html",
973+
)
974+
953975
if hypodd_ct_exist and len(catalog_ct_hypodd) > 0:
954976
plot3d(
955977
catalog_ct_hypodd["LON"],

scripts/run_adloc.py

Lines changed: 33 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,10 @@ def run_adloc(
8686

8787
# %%
8888
stations["depth_km"] = -stations["elevation_m"] / 1000
89-
if "station_term_time" not in stations.columns:
90-
stations["station_term_time"] = 0.0
89+
if "station_term_time_p" not in stations.columns:
90+
stations["station_term_time_p"] = 0.0
91+
if "station_term_time_s" not in stations.columns:
92+
stations["station_term_time_s"] = 0.0
9193
if "station_term_amplitude" not in stations.columns:
9294
stations["station_term_amplitude"] = 0.0
9395
stations[["x_km", "y_km"]] = stations.apply(
@@ -117,6 +119,13 @@ def run_adloc(
117119
# %%
118120
config["eikonal"] = None
119121

122+
if os.path.exists(f"{root_path}/{region}/obspy/velocity.csv"):
123+
velocity = pd.read_csv(f"{root_path}/{region}/obspy/velocity.csv")
124+
zz = velocity["z_km"].values
125+
vp = velocity["vp"].values
126+
vs = velocity["vs"].values
127+
h = 0.1
128+
120129
vel = {"Z": zz, "P": vp, "S": vs}
121130
config["eikonal"] = {
122131
"vel": vel,
@@ -194,38 +203,37 @@ def run_adloc(
194203
for iter in range(MAX_SST_ITER):
195204
# picks, events = invert_location_iter(picks, stations, config, estimator, events_init=events_init, iter=iter)
196205
picks, events = invert_location(picks, stations, config, estimator, events_init=events_init, iter=iter)
197-
# station_term = picks[picks["mask"] == 1.0].groupby("idx_sta").agg({"residual_time": "mean"}).reset_index()
198-
# station_term_time = picks[picks["mask"] == 1.0].groupby("idx_sta").agg({"residual_time": "mean"}).reset_index()
199-
station_term_time = (
200-
picks[picks["mask"] == 1.0].groupby("idx_sta").agg({"residual_time": "median"}).reset_index()
201-
)
202-
# station_term_amp = (
203-
# picks[picks["mask"] == 1.0].groupby("idx_sta").agg({"residual_amplitude": "mean"}).reset_index()
204-
# )
205206
station_term_amp = (
206207
picks[picks["mask"] == 1.0].groupby("idx_sta").agg({"residual_amplitude": "median"}).reset_index()
207208
)
208-
stations["station_term_time"] += (
209-
stations["idx_sta"].map(station_term_time.set_index("idx_sta")["residual_time"]).fillna(0)
210-
)
211209
stations["station_term_amplitude"] += (
212210
stations["idx_sta"].map(station_term_amp.set_index("idx_sta")["residual_amplitude"]).fillna(0)
213211
)
214-
## Separate P and S station term
215-
# station_term = (
216-
# picks[picks["mask"] == 1.0].groupby(["idx_sta", "phase_type"]).agg({"residual": "mean"}).reset_index()
217-
# )
218-
# stations["station_term_p"] = (
219-
# stations["idx_sta"]
220-
# .map(station_term[station_term["phase_type"] == 0].set_index("idx_sta")["residual"])
221-
# .fillna(0)
212+
213+
## Same P and S station term
214+
# station_term_time = picks[picks["mask"] == 1.0].groupby("idx_sta").agg({"residual_time": "mean"}).reset_index()
215+
# stations["station_term_time_p"] += (
216+
# stations["idx_sta"].map(station_term_time.set_index("idx_sta")["residual_time"]).fillna(0)
222217
# )
223-
# stations["station_term_s"] = (
224-
# stations["idx_sta"]
225-
# .map(station_term[station_term["phase_type"] == 1].set_index("idx_sta")["residual"])
226-
# .fillna(0)
218+
# stations["station_term_time_s"] += (
219+
# stations["idx_sta"].map(station_term_time.set_index("idx_sta")["residual_time"]).fillna(0)
227220
# )
228221

222+
## Separate P and S station term
223+
station_term_time = (
224+
picks[picks["mask"] == 1.0].groupby(["idx_sta", "phase_type"]).agg({"residual_time": "mean"}).reset_index()
225+
)
226+
stations["station_term_time_p"] += (
227+
stations["idx_sta"]
228+
.map(station_term_time[station_term_time["phase_type"] == 0].set_index("idx_sta")["residual_time"])
229+
.fillna(0)
230+
)
231+
stations["station_term_time_s"] += (
232+
stations["idx_sta"]
233+
.map(station_term_time[station_term_time["phase_type"] == 1].set_index("idx_sta")["residual_time"])
234+
.fillna(0)
235+
)
236+
229237
plotting_ransac(stations, figure_path, config, picks, events_init, events, suffix=f"_ransac_sst_{iter}")
230238

231239
if "event_index" not in events.columns:

0 commit comments

Comments
 (0)