Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions Evaluate/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -1600,5 +1600,25 @@ def process_args(argv):

log.info("Completed {0}".format(sys.argv[0]))


def bom2tcrm(df, trackId):
"""
Transforms a dataframe in BoM format into a tcrm track.

"""
df['Datetime'] = pd.to_datetime(df.validtime)
df['Speed'] = df.translation_speed
df['CentralPressure'] = df.pcentre
df['Longitude'] = df.longitude
df['Latitude'] = df.latitude
df['EnvPressure'] = df.poci
df['rMax'] = df.rmax
df['trackId'] = df.disturbance.values

track = Track(df)
track.trackId = [trackId]
return track


if __name__ == '__main__':
process_args(sys.argv[1:])
32 changes: 29 additions & 3 deletions Evaluate/interpolateTracks.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,10 @@

from Utilities.maputils import latLon2Azi
from Utilities.loadData import loadTrackFile, maxWindSpeed
from Utilities.parallel import disableOnWorkers
from Utilities.track import Track, ncSaveTracks
from pycxml.pycxml import loadfile
import pandas as pd

LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
Expand Down Expand Up @@ -70,7 +73,7 @@ def interpolate(track, delta, interpolation_type=None):
track.Minute)]
else:
day_ = track.Datetime

timestep = timedelta(delta/24.)
try:
time_ = np.array([d.toordinal() + (d.hour + d.minute/60.)/24.0
Expand Down Expand Up @@ -101,7 +104,7 @@ def interpolate(track, delta, interpolation_type=None):
idx[0] = 1
# TODO: Possibly could change `np.mean(dt)` to `dt`?
track.WindSpeed = maxWindSpeed(idx, np.mean(dt), track.Longitude,
track.Latitude, track.CentralPressure,
track.Latitude, track.CentralPressure,
track.EnvPressure)
# Find the indices of valid pressure observations:
validIdx = np.where(track.CentralPressure < sys.maxsize)[0]
Expand Down Expand Up @@ -140,7 +143,7 @@ def interpolate(track, delta, interpolation_type=None):
# Use the Akima interpolation method:
try:
import akima
except ImportError:
except ModuleNotFoundError:
LOG.exception(("Akima interpolation module unavailable "
" - default to scipy.interpolate"))
nLon = splev(newtime, splrep(timestep, track.Longitude, s=0),
Expand Down Expand Up @@ -262,6 +265,7 @@ def saveTracks(tracks, outputFile):
if len(data) > 0:
np.savetxt(fp, data, fmt=OUTPUT_FMTS)

@disableOnWorkers
def parseTracks(configFile, trackFile, source, delta, outputFile=None,
interpolation_type=None):
"""
Expand Down Expand Up @@ -299,6 +303,9 @@ def parseTracks(configFile, trackFile, source, delta, outputFile=None,
if trackFile.endswith("nc"):
from Utilities.track import ncReadTrackData
tracks = ncReadTrackData(trackFile)
elif trackFile.endswith("xml"):
dfs = loadfile(trackFile)
tracks = [bom2tcrm(df, i) for i, df in enumerate(dfs)]
else:
tracks = loadTrackFile(configFile, trackFile, source)

Expand All @@ -317,3 +324,22 @@ def parseTracks(configFile, trackFile, source, delta, outputFile=None,


return results


def bom2tcrm(df, trackId):
"""
Transforms a dataframe in BoM format into a tcrm track.

"""
df['Datetime'] = pd.to_datetime(df.validtime)
df['Speed'] = df.translation_speed
df['CentralPressure'] = df.pcentre
df['Longitude'] = df.longitude
df['Latitude'] = df.latitude
df['EnvPressure'] = df.poci
df['rMax'] = df.rmax
df['trackId'] = df.disturbance.values

track = Track(df)
track.trackId = [trackId, trackId]
return track
42 changes: 29 additions & 13 deletions Utilities/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,9 @@ def __init__(self, configFile):
stnlat = stndata[:, 2].astype(float)
for sid, lon, lat in zip(stnid, stnlon, stnlat):
self.stations.append(Station(sid, lon, lat))

self.station_lat = np.array([s.lat for s in self.stations])
self.station_lon = np.array([s.lon for s in self.stations])
log.info(f"There are {len(self.stations)} stations that will collect timeseries data")

def sample(self, lon, lat, spd, uu, vv, prs, gridx, gridy):
Expand Down Expand Up @@ -216,20 +219,33 @@ def extract(self, dt, spd, uu, vv, prs, gridx, gridy):
:param gridy: :class:`numpy.ndarray` of grid latitudes.

"""
stns = 0
for stn in self.stations:
if stn.insideGrid(gridx, gridy):
stns += 1
result = self.sample(stn.lon, stn.lat, spd, uu, vv, prs,
gridx, gridy)
ss, ux, vy, bb, pp = result
stn.data.append((str(stn.id), dt, stn.lon, stn.lat, ss,
ux, vy, bb, pp))
# import xarray as xr

else:
stn.data.append((str(stn.id), dt, stn.lon, stn.lat, 0.0, 0.0,
0.0, 0.0, prs[0, 0]))
log.debug("Extracted data for {0} stations".format(stns))
# grid_lon = gridx[0, :]
# grid_lat = gridy[:, 0]

# mask = (self.station_lat <= grid_lat[0]) & (self.station_lat >= grid_lat[-1])
# mask &= (self.station_lon >= grid_lon[0]) & (self.station_lon <= grid_lon[-1])

out = np.zeros((len(self.stations), 5))
# for i, xx in enumerate(spd, uu, vv, prs):
# yy = xr.DataArray(
# xx,
# dims=["lat", "lon"],
# coords=dict(
# lon=grid_lon,
# lat=grid_lat,
# ),
# )

# out[:, i][mask] = yy.interp(lat=grid_lat[mask], lon=grid_lon.mask)

# out[:, -1][mask] = np.mod((180. / np.pi) * np.arctan2(-out[:, 1][mask], -out[:, 2][mask]), 360.)

# for stn in self.stations:
# stn.data.append((str(stn.id), dt, stn.lon, stn.lat, out[i, 0], out[i, 1], out[i, 2], out[i, -1], out[i, 3]))
print("huh")
log.debug("Extracted data for {0} stations".format(mask.sum()))

def shutdown(self):
"""
Expand Down
11 changes: 9 additions & 2 deletions Utilities/track.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,15 +81,22 @@ def __init__(self, data):
self.data = data
self.trackId = None
self.trackfile = None
if (len(data) > 0) and ('CentralPressure' in data.dtype.names):
if (len(data) > 0) and self.has_key('CentralPressure'):
self.trackMinPressure = np.min(data['CentralPressure'])
else:
self.trackMinPressure = None
if (len(data) > 0) and ('WindSpeed' in data.dtype.names):
if (len(data) > 0) and self.has_key('WindSpeed'):
self.trackMaxWind = np.max(data['WindSpeed'])
else:
self.trackMaxWind = None

def has_key(self, key):

try:
return (key in self.data.dtype.names)
except AttributeError:
return (key in self.data.columns)

def __getattr__(self, key):
"""
Get the `key` from the `data` object.
Expand Down
19 changes: 19 additions & 0 deletions docs/examples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,22 @@ the Bureau of Meteorology. See the :ref:`scenariomodelling` section
for more details.


Ensemble simulation
-------------------

Some forecasting agencies provide ensemble track forecasts. We can run
`tcevent.py` in parallel to simulate the wind fields from such a forecast.

An example is provided in `examples/ensemble.ini`

Requirements::

* `pycxml` is a prototype library that reads CycloneXML files, used for
distributing ensemble track forecasts (e.g. by the Australian Bureau of
Meteorology).

To run::

$ mpirun -np 16 python tcevent.py -c example/ensemble.ini


Loading