Skip to content

Commit 5c1adde

Browse files
committed
Add script to prepare SED_?.th which specifies sediment BCs
1 parent a1cf05d commit 5c1adde

File tree

1 file changed

+144
-0
lines changed

1 file changed

+144
-0
lines changed
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
import pandas as pd
2+
import os
3+
import sys
4+
import datetime as dt
5+
6+
import schimpy.station as station
7+
from schimpy import batch_metrics
8+
from vtools.data.vtime import days, hours
9+
10+
11+
# Linear relationship to convert turbidity to ssc
12+
def turb_to_ssc_linear(df_turb, m: float, b: float):
13+
14+
ssc = m * df_turb + b
15+
16+
return ssc
17+
18+
19+
# Power relationship to convert turbidity to ssc
20+
def turb_to_ssc_power(df_turb, c: float, p: float):
21+
22+
ssc = c * df_turb**p
23+
24+
return ssc
25+
26+
27+
#########################################################################
28+
# Housekeeping
29+
#########################################################################
30+
31+
# "Slope" and "intercept" for converting Turbidity to SSC using power relationship
32+
constant_default = 0.00
33+
power_default = 0.1
34+
35+
constants = {
36+
"sacramento": constant_default,
37+
"sjr": constant_default,
38+
"mokelumne": 4.43,
39+
"american": constant_default,
40+
"yolo": constant_default,
41+
"yolo_toedrain": constant_default,
42+
}
43+
44+
powers = {
45+
"sacramento": power_default,
46+
"sjr": power_default,
47+
"mokelumne": 0.592,
48+
"american": power_default,
49+
"yolo": power_default,
50+
"yolo_toedrain": power_default,
51+
}
52+
53+
# Prescribe the composition ratios of the sediment classes, whose sum should be 1.
54+
composition = {
55+
"SED_1": 0.8,
56+
"SED_2": 0.15,
57+
"SED_3": 0.05,
58+
}
59+
60+
# Specify bc locations to link them to obs stations
61+
# Location with "None" indicate obs data is not available
62+
bc_loc_to_stn = {
63+
"sacramento": "fpt",
64+
"sjr": "sjr",
65+
"mokelumne": "dlc",
66+
"american": None,
67+
"yolo": "lib",
68+
"yolo_toedrain": "lis",
69+
}
70+
71+
# Define parameters for the batch metrics object
72+
params = {
73+
"stations_csv": "./inputs/station_dbase.csv",
74+
"obs_links_csv": "./inputs/obs_links_20230315.csv",
75+
"obs_search_path": ["/scratch/nasbdo/modeling_data/repo/continuous/screened"],
76+
}
77+
78+
db_stations = station.read_station_dbase(params["stations_csv"])
79+
db_obs = station.read_obs_links(params["obs_links_csv"])
80+
81+
dt_start = dt.datetime(2021, 1, 1, 0, 0)
82+
dt_end = dt.datetime(2024, 1, 1, 0, 0)
83+
84+
# List of locations with manually entered data
85+
bc_manual = ["american"]
86+
87+
# List of locations with ssc timeseries available.
88+
bc_ssc_th = []
89+
90+
# %%
91+
# Create an empty dataframe to store the observed turbidity
92+
df_all = pd.DataFrame()
93+
94+
# Create a batch metrics object
95+
self = batch_metrics.BatchMetrics(params)
96+
for loc in list(bc_loc_to_stn.keys()):
97+
print(f"Processing {loc}")
98+
if loc in bc_ssc_th: # Read ssc data directly from file
99+
print(" SSC time series read directly from file")
100+
pass
101+
102+
elif loc in bc_manual: # Read ssc data from file or manually prescribe
103+
print(" SSC time series manually prescribed")
104+
df_all[loc] = 0.0099
105+
106+
else: # Convert FNU to SSC from observed data
107+
print(" SSC time series calculated from observed turbidity")
108+
turb_obs = self.retrieve_ts_obs(
109+
bc_loc_to_stn[loc],
110+
"default",
111+
"turbidity",
112+
[dt_start, dt_end],
113+
db_stations,
114+
db_obs,
115+
)
116+
117+
# Save observed turbidity to file for reference
118+
turb_obs.to_csv(loc + ".csv")
119+
120+
# Check for NaNs
121+
if turb_obs.isnull().values.any():
122+
123+
# To do: Decide what to do with NaNs (interpolate?)
124+
# For now, fill NaNs with previous value
125+
turb_obs.ffill(inplace=True)
126+
127+
# Convert turbidity to SSC
128+
ssc_calc = turb_to_ssc_power(turb_obs, constants[loc], powers[loc])
129+
ssc_calc.rename(loc, inplace=True)
130+
131+
# Append to the total dataframe
132+
df_all = pd.concat([df_all, ssc_calc], axis=1)
133+
134+
# Convert index to seconds
135+
df_all.index = (pd.to_datetime(df_all.index) - dt_start).total_seconds()
136+
137+
# Specify index precision
138+
df_all.index = df_all.index.map(lambda x: "%.1f" % x)
139+
140+
# Save to file for each sediment class
141+
for sedclass in composition:
142+
(df_all * composition[sedclass]).to_csv(
143+
sedclass + ".th", header=False, sep="\t", float_format="%.3e"
144+
)

0 commit comments

Comments
 (0)