3737from openquake .hazardlib .contexts import read_cmakers , basename , get_maxsize
3838from openquake .hazardlib .calc .hazard_curve import classical as hazclassical
3939from openquake .hazardlib .calc import disagg
40- from openquake .hazardlib .probability_map import ProbabilityMap , poes_dt
40+ from openquake .hazardlib .probability_map import ProbabilityMap , rates_dt
4141from openquake .commonlib import calc
4242from openquake .calculators import base , getters
4343
@@ -177,10 +177,10 @@ def postclassical(pgetter, N, hstats, individual_rlzs,
177177 for sid in sids :
178178 idx = sidx [sid ]
179179 with combine_mon :
180- pc = pgetter .get_pcurve (sid ) # shape (L, R)
180+ pc = pgetter .get_hcurve (sid ) # shape (L, R)
181181 if amplifier :
182182 pc = amplifier .amplify (ampcode [sid ], pc )
183- # NB: the pcurve have soil levels != IMT levels
183+ # NB: the hcurve have soil levels != IMT levels
184184 if pc .array .sum () == 0 : # no data
185185 continue
186186 with compute_mon :
@@ -265,7 +265,7 @@ def get_rates(self, pmap):
265265
266266 def store_poes (self , pnes , the_sids , gid = 0 ):
267267 """
268- Store 1-pnes inside the _poes dataset
268+ Store 1-pnes inside the _rates dataset
269269 """
270270 avg_poe = 0
271271 # store by IMT to save memory
@@ -286,15 +286,16 @@ def store_poes(self, pnes, the_sids, gid=0):
286286 if len (idxs ) == 0 : # happens in case_60
287287 return 0
288288 sids = the_sids [idxs ]
289- hdf5 .extend (self .datastore ['_poes/sid' ], sids )
290- hdf5 .extend (self .datastore ['_poes/gid' ], gids + gid )
291- hdf5 .extend (self .datastore ['_poes/lid' ], lids + slc .start )
292- hdf5 .extend (self .datastore ['_poes/poe' ], poes [idxs , lids , gids ])
289+ hdf5 .extend (self .datastore ['_rates/sid' ], sids )
290+ hdf5 .extend (self .datastore ['_rates/gid' ], gids + gid )
291+ hdf5 .extend (self .datastore ['_rates/lid' ], lids + slc .start )
292+ hdf5 .extend (self .datastore ['_rates/rate' ],
293+ disagg .to_rates (poes [idxs , lids , gids ]))
293294
294295 # slice_by_sid contains 3x6=18 slices in classical/case_22
295296 # which has 6 IMTs each one with 20 levels
296297 sbs = build_slice_by_sid (sids , self .offset )
297- hdf5 .extend (self .datastore ['_poes /slice_by_sid' ], sbs )
298+ hdf5 .extend (self .datastore ['_rates /slice_by_sid' ], sbs )
298299 self .offset += len (sids )
299300 avg_poe += poes .mean (axis = (0 , 2 )) @ self .level_weights [slc ]
300301 self .acc ['avg_poe' ] = avg_poe
@@ -395,8 +396,8 @@ def init_poes(self):
395396 self .cmakers = read_cmakers (self .datastore , self .csm )
396397 self .cfactor = numpy .zeros (3 )
397398 self .rel_ruptures = AccumDict (accum = 0 ) # grp_id -> rel_ruptures
398- self .datastore .create_df ('_poes ' , poes_dt .items ())
399- self .datastore .create_dset ('_poes /slice_by_sid' , slice_dt )
399+ self .datastore .create_df ('_rates ' , rates_dt .items ())
400+ self .datastore .create_dset ('_rates /slice_by_sid' , slice_dt )
400401 # NB: compressing the dataset causes a big slowdown in writing :-(
401402
402403 oq = self .oqparam
@@ -444,7 +445,7 @@ def execute(self):
444445 oq .mags_by_trt = {
445446 trt : python3compat .decode (dset [:])
446447 for trt , dset in parent ['source_mags' ].items ()}
447- if '_poes ' in parent :
448+ if '_rates ' in parent :
448449 self .build_curves_maps () # repeat post-processing
449450 return {}
450451 else :
@@ -475,7 +476,7 @@ def execute(self):
475476 logging .info ('cfactor = {:_d}/{:_d} = {:.1f}' .format (
476477 int (self .cfactor [1 ]), int (self .cfactor [0 ]),
477478 self .cfactor [1 ] / self .cfactor [0 ]))
478- if '_poes ' in self .datastore :
479+ if '_rates ' in self .datastore :
479480 self .build_curves_maps ()
480481 if not oq .hazard_calculation_id :
481482 self .classical_time = time .time () - t0
@@ -663,19 +664,19 @@ def build_curves_maps(self):
663664 if not oq .hazard_curves : # do nothing
664665 return
665666 N , S , M , P , L1 , individual = self ._create_hcurves_maps ()
666- poes_gb = self .datastore .getsize ('_poes ' ) / 1024 ** 3
667+ poes_gb = self .datastore .getsize ('_rates ' ) / 1024 ** 3
667668 if poes_gb < 1 :
668669 ct = int (poes_gb * 32 ) or 1
669670 else :
670671 ct = int (poes_gb ) + 32 # number of tasks > number of GB
671672 if ct > 1 :
672673 logging .info ('Producing %d postclassical tasks' , ct )
673- if '_poes ' in set (self .datastore ):
674+ if '_rates ' in set (self .datastore ):
674675 dstore = self .datastore
675676 else :
676677 dstore = self .datastore .parent
677678 sites_per_task = int (numpy .ceil (self .N / ct ))
678- sbs = dstore ['_poes /slice_by_sid' ][:]
679+ sbs = dstore ['_rates /slice_by_sid' ][:]
679680 sbs ['sid' ] //= sites_per_task
680681 # NB: there is a genious idea here, to split in tasks by using
681682 # the formula ``taskno = sites_ids // sites_per_task`` and then
0 commit comments