code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
dbserver.ensure_on() try: calc_id = int(calc_id) except ValueError: # assume calc_id is a pathname calc_id, datadir = datastore.extract_calc_id_datadir(calc_id) status = 'complete' remote = False else: remote = True job = logs.dbcmd('get_job', calc_id) if job is not None: sys.exit('There is already a job #%d in the local db' % calc_id) if remote: datadir = datastore.get_datadir() webex = WebExtractor(calc_id) status = webex.status['status'] hc_id = webex.oqparam.hazard_calculation_id if hc_id: sys.exit('The job has a parent (#%d) and cannot be ' 'downloaded' % hc_id) webex.dump('%s/calc_%d.hdf5' % (datadir, calc_id)) webex.close() with datastore.read(calc_id) as dstore: engine.expose_outputs(dstore, status=status) logging.info('Imported calculation %d successfully', calc_id)
def importcalc(calc_id)
Import a remote calculation into the local database. server, username and password must be specified in an openquake.cfg file. NB: calc_id can be a local pathname to a datastore not already present in the database: in that case it is imported in the db.
6.257043
5.897098
1.061038
# extract dictionaries of coefficients specific to required # intensity measure type and for PGA C = self.COEFFS[imt] # For inslab GMPEs the correction term is fixed at -0.3 dc1 = -0.3 C_PGA = self.COEFFS[PGA()] # compute median pga on rock (vs30=1000), needed for site response # term calculation pga1000 = np.exp( self._compute_pga_rock(C_PGA, dc1, sites, rup, dists)) mean = (self._compute_magnitude_term(C, dc1, rup.mag) + self._compute_distance_term(C, rup.mag, dists) + self._compute_focal_depth_term(C, rup) + self._compute_forearc_backarc_term(C, sites, dists) + self._compute_site_response_term(C, sites, pga1000)) stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30)) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.171959
4.191787
0.99527
return ((C['theta2'] + C['theta14'] + C['theta3'] * (mag - 7.8)) * np.log(dists.rhypo + self.CONSTS['c4'] * np.exp((mag - 6.) * self.CONSTS['theta9'])) + (C['theta6'] * dists.rhypo)) + C["theta10"]
def _compute_distance_term(self, C, mag, dists)
Computes the distance scaling term, as contained within equation (1b)
7.901997
7.592187
1.040806
global sock if sock is None: sock = zeromq.Socket( 'tcp://%s:%s' % (config.dbserver.host, DBSERVER_PORT), zeromq.zmq.REQ, 'connect').__enter__() # the socket will be closed when the calculation ends res = sock.send((action,) + args) if isinstance(res, parallel.Result): return res.get() return res
def dbcmd(action, *args)
A dispatcher to the database server. :param action: database action to perform :param args: arguments
7.508914
8.164474
0.919706
if not hasattr(record, 'hostname'): record.hostname = '-' if not hasattr(record, 'job_id'): record.job_id = self.job_id
def _update_log_record(self, record)
Massage a log record before emitting it. Intended to be used by the custom log handlers defined in this module.
3.988848
3.996055
0.998197
handlers = [LogDatabaseHandler(job_id)] # log on db always if log_file is None: # add a StreamHandler if not already there if not any(h for h in logging.root.handlers if isinstance(h, logging.StreamHandler)): handlers.append(LogStreamHandler(job_id)) else: handlers.append(LogFileHandler(job_id, log_file)) for handler in handlers: logging.root.addHandler(handler) init(job_id, LEVELS.get(log_level, logging.WARNING)) try: yield finally: # sanity check to make sure that the logging on file is working if (log_file and log_file != os.devnull and os.path.getsize(log_file) == 0): logging.root.warn('The log file %s is empty!?' % log_file) for handler in handlers: logging.root.removeHandler(handler)
def handle(job_id, log_level='info', log_file=None)
Context manager adding and removing log handlers. :param job_id: ID of the current job :param log_level: one of debug, info, warn, error, critical :param log_file: log file path (if None, logs on stdout only)
3.394696
3.427925
0.990306
if config.dbserver.multi_user: job = dbcmd('get_job', -1, username) # can be None return getattr(job, 'id', 0) else: # single user return datastore.get_last_calc_id()
def get_last_calc_id(username=None)
:param username: if given, restrict to it :returns: the last calculation in the database or the datastore
10.244162
9.415932
1.08796
if not logging.root.handlers: # first time logging.basicConfig(level=level) if calc_id == 'job': # produce a calc_id by creating a job in the db calc_id = dbcmd('create_job', datastore.get_datadir()) elif calc_id == 'nojob': # produce a calc_id without creating a job calc_id = datastore.get_last_calc_id() + 1 else: assert isinstance(calc_id, int), calc_id fmt = '[%(asctime)s #{} %(levelname)s] %(message)s'.format(calc_id) for handler in logging.root.handlers: handler.setFormatter(logging.Formatter(fmt)) return calc_id
def init(calc_id='nojob', level=logging.INFO)
1. initialize the root logger (if not already initialized) 2. set the format of the root handlers (if any) 3. return a new calculation ID candidate if calc_id is 'job' or 'nojob' (with 'nojob' the calculation ID is not stored in the database)
3.411689
3.261748
1.04597
# strike slip length = 10.0 ** (-2.57 + 0.62 * mag) seis_wid = 20.0 # estimate area based on length if length < seis_wid: return length ** 2. else: return length * seis_wid
def get_median_area(self, mag, rake)
The values are a function of magnitude.
7.153076
7.288639
0.981401
depths = np.array([ np.zeros_like(lons) + upper_depth, np.zeros_like(lats) + lower_depth ]) mesh = RectangularMesh( np.tile(lons, (2, 1)), np.tile(lats, (2, 1)), depths ) return SimpleFaultSurface(mesh)
def _construct_surface(lons, lats, upper_depth, lower_depth)
Utility method that constructs and return a simple fault surface with top edge specified by `lons` and `lats` and extending vertically from `upper_depth` to `lower_depth`. The underlying mesh is built by repeating the same coordinates (`lons` and `lats`) at the two specified depth levels.
3.001932
2.90238
1.0343
trench = _construct_surface(SUB_TRENCH_LONS, SUB_TRENCH_LATS, 0., 10.) sites = Mesh(lons, lats, None) return np.abs(trench.get_rx_distance(sites))
def _get_min_distance_to_sub_trench(lons, lats)
Compute and return minimum distance between subduction trench and points specified by 'lon' and 'lat' The method creates an instance of :class:`openquake.hazardlib.geo.SimpleFaultSurface` to model the subduction trench. The surface is assumed vertical and extending from 0 to 10 km depth. The 10 km depth value is arbitrary given that distance calculation depend only on top edge depth. The method calls then :meth:`openquake.hazardlib.geo.base.BaseSurface.get_rx_distance` and return its absolute value.
7.808249
5.202763
1.500789
vf = _construct_surface(VOLCANIC_FRONT_LONS, VOLCANIC_FRONT_LATS, 0., 10.) sites = Mesh(lons, lats, None) return vf.get_rx_distance(sites)
def _get_min_distance_to_volcanic_front(lons, lats)
Compute and return minimum distance between volcanic front and points specified by 'lon' and 'lat'. Distance is negative if point is located east of the volcanic front, positive otherwise. The method uses the same approach as :meth:`_get_min_distance_to_sub_trench` but final distance is returned without taking the absolute value.
8.699282
10.353076
0.840261
if imt.name == 'PGV': V1 = 10 ** ((-4.021e-5 * x_tr + 9.905e-3) * (H - 30)) V2 = np.maximum(1., (10 ** (-0.012)) * ((rrup / 300.) ** 2.064)) corr = V2 if H > 30: corr *= V1 else: V2 = np.maximum(1., (10 ** (+0.13)) * ((rrup / 300.) ** 3.2)) corr = V2 if H > 30: V1 = 10 ** ((-8.1e-5 * x_tr + 2.0e-2) * (H - 30)) corr *= V1 return np.log(np.exp(mean) * corr)
def _apply_subduction_trench_correction(mean, x_tr, H, rrup, imt)
Implement equation for subduction trench correction as described in equation 3.5.2-1, page 3-148 of "Technical Reports on National Seismic Hazard Maps for Japan"
3.381471
3.326967
1.016383
V1 = np.zeros_like(x_vf) if imt.name == 'PGV': idx = x_vf <= 75 V1[idx] = 4.28e-5 * x_vf[idx] * (H - 30) idx = x_vf > 75 V1[idx] = 3.21e-3 * (H - 30) V1 = 10 ** V1 else: idx = x_vf <= 75 V1[idx] = 7.06e-5 * x_vf[idx] * (H - 30) idx = x_vf > 75 V1[idx] = 5.30e-3 * (H - 30) V1 = 10 ** V1 return np.log(np.exp(mean) * V1)
def _apply_volcanic_front_correction(mean, x_vf, H, imt)
Implement equation for volcanic front correction as described in equation 3.5.2.-2, page 3-149 of "Technical Reports on National Seismic Hazard Maps for Japan"
2.193542
2.158489
1.01624
mean = self._get_mean(imt, rup.mag, rup.hypo_depth, dists.rrup, d=0) stddevs = self._get_stddevs(stddev_types, dists.rrup) mean = self._apply_amplification_factor(mean, sites.vs30) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-2 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.351478
3.585812
0.93465
# clip magnitude at 8.3 as per note at page 3-36 in table Table 3.3.2-6 # in "Technical Reports on National Seismic Hazard Maps for Japan" mag = min(mag, 8.3) if imt.name == 'PGV': mean = ( 0.58 * mag + 0.0038 * hypo_depth + d - 1.29 - np.log10(rrup + 0.0028 * 10 ** (0.5 * mag)) - 0.002 * rrup ) else: mean = ( 0.50 * mag + 0.0043 * hypo_depth + d + 0.61 - np.log10(rrup + 0.0055 * 10 ** (0.5 * mag)) - 0.003 * rrup ) mean = np.log10(10**(mean)/(g*100)) return mean
def _get_mean(self, imt, mag, hypo_depth, rrup, d)
Return mean value as defined in equation 3.5.1-1 page 148
4.458847
4.408688
1.011377
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) std = np.zeros_like(rrup) std[rrup <= 20] = 0.23 idx = (rrup > 20) & (rrup <= 30) std[idx] = 0.23 - 0.03 * np.log10(rrup[idx] / 20) / np.log10(30. / 20.) std[rrup > 30] = 0.20 # convert from log10 to ln std = np.log(10 ** std) return [std for stddev_type in stddev_types]
def _get_stddevs(self, stddev_types, rrup)
Return standard deviations as defined in equation 3.5.5-2 page 151
2.339285
2.322194
1.00736
assert np.all(vs30 == vs30[0]) if abs(vs30[0]-600.) < 1e-10: return mean * np.log(10) elif abs(vs30[0]-400.) < 1e-10: return mean * np.log(10) + np.log(self.AMP_F) elif abs(vs30[0]-800.) < 1e-10: return mean * np.log(10) - np.log(1.25) else: raise ValueError('Si and Midorikawa 1999 do not support this Vs30 value')
def _apply_amplification_factor(self, mean, vs30)
Apply amplification factor to scale PGV value from 600 to 400 m/s vs30 and convert mean from base 10 to base e. The scaling factor from 600 m/s to 400 m/s was defined by NIED. The scaling factor from 600 m/s to 800m/s is valid just for the elastic case as no adjustment for kappa was considered.
3.426885
3.225914
1.062299
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) std = np.zeros_like(pgv) std[pgv <= 25] = 0.20 idx = (pgv > 25) & (pgv <= 50) std[idx] = 0.20 - 0.05 * (pgv[idx] - 25) / 25 std[pgv > 50] = 0.15 # convert from log10 to ln std = np.log(10 ** std) return [std for stddev_type in stddev_types]
def _get_stddevs(self, stddev_types, pgv)
Return standard deviations as defined in equation 3.5.5-1 page 151
2.496996
2.501745
0.998102
mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) x_tr = _get_min_distance_to_sub_trench(sites.lon, sites.lat) mean = _apply_subduction_trench_correction( mean, x_tr, rup.hypo_depth, dists.rrup, imt) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-1 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.829053
4.314232
0.88754
mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) x_vf = _get_min_distance_to_volcanic_front(sites.lon, sites.lat) mean = _apply_volcanic_front_correction(mean, x_vf, rup.hypo_depth, imt) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-1 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.719337
4.15396
0.895371
# NB: matplotlib is imported inside since it is a costly import import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.grid(True) ax.set_xlabel('tasks') ax.set_ylabel('GB') start = 0 for task_name, mem in plots: ax.plot(range(start, start + len(mem)), mem, label=task_name) start += len(mem) ax.legend() return plt
def make_figure(plots)
:param plots: list of pairs (task_name, memory array)
3.893698
2.976459
1.308164
dstore = util.read(calc_id) plots = [] for task_name in dstore['task_info']: mem = dstore['task_info/' + task_name]['mem_gb'] plots.append((task_name, mem)) plt = make_figure(plots) plt.show()
def plot_memory(calc_id=-1)
Plot the memory occupation
5.445975
5.645708
0.964622
lst = [] # build the export dtype, of the form PGA-0.1, PGA-0.2 ... for imt, imls in imtls.items(): for iml in imls: lst.append(('%s-%s' % (imt, iml), F32)) curves = numpy.zeros(nsites, numpy.dtype(lst)) for sid, pcurve in pmap.items(): curve = curves[sid] idx = 0 for imt, imls in imtls.items(): for iml in imls: curve['%s-%s' % (imt, iml)] = pcurve.array[idx, inner_idx] idx += 1 return curves
def convert_to_array(pmap, nsites, imtls, inner_idx=0)
Convert the probability map into a composite array with header of the form PGA-0.1, PGA-0.2 ... :param pmap: probability map :param nsites: total number of sites :param imtls: a DictArray with IMT and levels :returns: a composite array of lenght nsites
3.623506
3.070353
1.180159
poes = numpy.array(poes) if len(poes.shape) == 0: # `poes` was passed in as a scalar; # convert it to 1D array of 1 element poes = poes.reshape(1) if len(curves.shape) == 1: # `curves` was passed as 1 dimensional array, there is a single site curves = curves.reshape((1,) + curves.shape) # 1 x L L = curves.shape[1] # number of levels if L != len(imls): raise ValueError('The curves have %d levels, %d were passed' % (L, len(imls))) result = [] with warnings.catch_warnings(): warnings.simplefilter("ignore") # avoid RuntimeWarning: divide by zero encountered in log # happening in the classical_tiling tests imls = numpy.log(numpy.array(imls[::-1])) for curve in curves: # the hazard curve, having replaced the too small poes with EPSILON curve_cutoff = [max(poe, EPSILON) for poe in curve[::-1]] hmap_val = [] for poe in poes: # special case when the interpolation poe is bigger than the # maximum, i.e the iml must be smaller than the minumum if poe > curve_cutoff[-1]: # the greatest poes in the curve # extrapolate the iml to zero as per # https://bugs.launchpad.net/oq-engine/+bug/1292093 # a consequence is that if all poes are zero any poe > 0 # is big and the hmap goes automatically to zero hmap_val.append(0) else: # exp-log interpolation, to reduce numerical errors # see https://bugs.launchpad.net/oq-engine/+bug/1252770 val = numpy.exp( numpy.interp( numpy.log(poe), numpy.log(curve_cutoff), imls)) hmap_val.append(val) result.append(hmap_val) return numpy.array(result)
def compute_hazard_maps(curves, imls, poes)
Given a set of hazard curve poes, interpolate a hazard map at the specified ``poe``. :param curves: 2D array of floats. Each row represents a curve, where the values in the row are the PoEs (Probabilities of Exceedance) corresponding to ``imls``. Each curve corresponds to a geographical location. :param imls: Intensity Measure Levels associated with these hazard ``curves``. Type should be an array-like of floats. :param poes: Value(s) on which to interpolate a hazard map from the input ``curves``. Can be an array-like or scalar value (for a single PoE). :returns: An array of shape N x P, where N is the number of curves and P the number of poes.
4.83796
4.863406
0.994768
# convert to numpy array and redimension so that it can be broadcast with # the gmvs for computing PoE values; there is a gmv for each rupture # here is an example: imls = [0.03, 0.04, 0.05], gmvs=[0.04750576] # => num_exceeding = [1, 1, 0] coming from 0.04750576 > [0.03, 0.04, 0.05] imls = numpy.array(imls).reshape((len(imls), 1)) num_exceeding = numpy.sum(numpy.array(gmvs) >= imls, axis=1) poes = 1 - numpy.exp(- (invest_time / duration) * num_exceeding) return poes
def _gmvs_to_haz_curve(gmvs, imls, invest_time, duration)
Given a set of ground motion values (``gmvs``) and intensity measure levels (``imls``), compute hazard curve probabilities of exceedance. :param gmvs: A list of ground motion values, as floats. :param imls: A list of intensity measure levels, as floats. :param float invest_time: Investigation time, in years. It is with this time span that we compute probabilities of exceedance. Another way to put it is the following. When computing a hazard curve, we want to answer the question: What is the probability of ground motion meeting or exceeding the specified levels (``imls``) in a given time span (``invest_time``). :param float duration: Time window during which GMFs occur. Another was to say it is, the period of time over which we simulate ground motion occurrences. NOTE: Duration is computed as the calculation investigation time multiplied by the number of stochastic event sets. :returns: Numpy array of PoEs (probabilities of exceedance).
4.880785
4.606614
1.059517
M, P = len(imtls), len(poes) hmap = probability_map.ProbabilityMap.build(M, P, pmap, dtype=F32) if len(pmap) == 0: return hmap # empty hazard map for i, imt in enumerate(imtls): curves = numpy.array([pmap[sid].array[imtls(imt), 0] for sid in pmap.sids]) data = compute_hazard_maps(curves, imtls[imt], poes) # array (N, P) for sid, value in zip(pmap.sids, data): array = hmap[sid].array for j, val in enumerate(value): array[i, j] = val return hmap
def make_hmap(pmap, imtls, poes)
Compute the hazard maps associated to the passed probability map. :param pmap: hazard curves in the form of a ProbabilityMap :param imtls: DictArray with M intensity measure types :param poes: P PoEs where to compute the maps :returns: a ProbabilityMap with size (N, M, P)
5.180434
4.3554
1.189428
if isinstance(pmap, probability_map.ProbabilityMap): # this is here for compatibility with the # past, it could be removed in the future hmap = make_hmap(pmap, imtls, poes) pdic = general.DictArray({imt: poes for imt in imtls}) return convert_to_array(hmap, nsites, pdic) try: hcurves = pmap.value except AttributeError: hcurves = pmap dtlist = [('%s-%s' % (imt, poe), F32) for imt in imtls for poe in poes] array = numpy.zeros(len(pmap), dtlist) for imt, imls in imtls.items(): curves = hcurves[:, imtls(imt)] for poe in poes: array['%s-%s' % (imt, poe)] = compute_hazard_maps( curves, imls, poe).flat return array
def make_hmap_array(pmap, imtls, poes, nsites)
:returns: a compound array of hazard maps of shape nsites
4.652668
4.468728
1.041161
uhs = numpy.zeros(len(hmap), info['uhs_dt']) for p, poe in enumerate(info['poes']): for m, imt in enumerate(info['imtls']): if imt.startswith(('PGA', 'SA')): uhs[str(poe)][imt] = hmap[:, m, p] return uhs
def make_uhs(hmap, info)
Make Uniform Hazard Spectra curves for each location. :param hmap: array of shape (N, M, P) :param info: a dictionary with keys poes, imtls, uhs_dt :returns: a composite array containing uniform hazard spectra
5.47691
3.630936
1.508402
data = [] for ebr in ebruptures: rup = ebr.rupture self.cmaker.add_rup_params(rup) ruptparams = tuple(getattr(rup, param) for param in self.params) point = rup.surface.get_middle_point() multi_lons, multi_lats = rup.surface.get_surface_boundaries() bounds = ','.join('((%s))' % ','.join( '%.5f %.5f' % (lon, lat) for lon, lat in zip(lons, lats)) for lons, lats in zip(multi_lons, multi_lats)) try: rate = ebr.rupture.occurrence_rate except AttributeError: # for nonparametric sources rate = numpy.nan data.append( (ebr.serial, ebr.srcidx, ebr.n_occ, rate, rup.mag, point.x, point.y, point.z, rup.surface.get_strike(), rup.surface.get_dip(), rup.rake, 'MULTIPOLYGON(%s)' % decode(bounds)) + ruptparams) return numpy.array(data, self.dt)
def to_array(self, ebruptures)
Convert a list of ebruptures into an array of dtype RuptureRata.dt
4.053836
3.989175
1.016209
self.nruptures += len(rup_array) offset = len(self.datastore['rupgeoms']) rup_array.array['gidx1'] += offset rup_array.array['gidx2'] += offset previous = self.datastore.get_attr('ruptures', 'nbytes', 0) self.datastore.extend( 'ruptures', rup_array, nbytes=previous + rup_array.nbytes) self.datastore.extend('rupgeoms', rup_array.geom) # TODO: PMFs for nonparametric ruptures are not stored self.datastore.flush()
def save(self, rup_array)
Store the ruptures in array format.
5.858317
5.751989
1.018485
if 'ruptures' not in self.datastore: # for UCERF return codes = numpy.unique(self.datastore['ruptures']['code']) attr = {'code_%d' % code: ' '.join( cls.__name__ for cls in BaseRupture.types[code]) for code in codes} self.datastore.set_attrs('ruptures', **attr)
def close(self)
Save information about the rupture codes as attributes of the 'ruptures' dataset.
8.40486
5.659595
1.485064
oq = dstore['oqparam'] weights = dstore['weights'][:, 0] eff_time = oq.investigation_time * oq.ses_per_logic_tree_path num_events = countby(dstore['events'].value, 'rlz') periods = return_periods or oq.return_periods or scientific.return_periods( eff_time, max(num_events.values())) return scientific.LossCurvesMapsBuilder( oq.conditional_loss_poes, numpy.array(periods), loss_dt or oq.loss_dt(), weights, num_events, eff_time, oq.risk_investigation_time)
def get_loss_builder(dstore, return_periods=None, loss_dt=None)
:param dstore: datastore for an event based risk calculation :returns: a LossCurvesMapsBuilder instance
11.728495
9.946182
1.179196
if '/' not in what: key, spec = what, '' else: key, spec = what.split('/') if spec and not spec.startswith(('ref-', 'sid-')): raise ValueError('Wrong specification in %s' % what) elif spec == '': # export losses for all assets aids = [] arefs = [] for aid, rec in enumerate(self.assetcol.array): aids.append(aid) arefs.append(self.asset_refs[aid]) elif spec.startswith('sid-'): # passed the site ID sid = int(spec[4:]) aids = [] arefs = [] for aid, rec in enumerate(self.assetcol.array): if rec['site_id'] == sid: aids.append(aid) arefs.append(self.asset_refs[aid]) elif spec.startswith('ref-'): # passed the asset name arefs = [spec[4:]] aids = [self.str2asset[arefs[0]]['ordinal']] else: raise ValueError('Wrong specification in %s' % what) return aids, arefs, spec, key
def parse(self, what)
:param what: can be 'rlz-1/ref-asset1', 'rlz-2/sid-1', ...
3.6281
3.325171
1.091102
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS) ebr = hasattr(self, 'builder') for key in sorted(curves_dict): recs = curves_dict[key] data = [['asset', 'loss_type', 'loss', 'period' if ebr else 'poe']] for li, loss_type in enumerate(self.loss_types): if ebr: # event_based_risk array = recs[:, :, li] # shape (A, P, LI) periods = self.builder.return_periods for aref, losses in zip(asset_refs, array): for period, loss in zip(periods, losses): data.append((aref, loss_type, loss, period)) else: # classical_risk array = recs[loss_type] # shape (A,) loss_curve_dt for aref, losses, poes in zip( asset_refs, array['losses'], array['poes']): for loss, poe in zip(losses, poes): data.append((aref, loss_type, loss, poe)) dest = self.dstore.build_fname( 'loss_curves', '%s-%s' % (spec, key) if spec else key, 'csv') writer.save(data, dest) return writer.getsaved()
def export_csv(self, spec, asset_refs, curves_dict)
:param asset_ref: name of the asset :param curves_dict: a dictionary tag -> loss curves
5.03787
4.92031
1.023893
aids, arefs, spec, key = self.parse(what) if key.startswith('rlz'): curves = self.export_curves_rlzs(aids, key) else: # statistical exports curves = self.export_curves_stats(aids, key) return getattr(self, 'export_' + export_type)(spec, arefs, curves)
def export(self, export_type, what)
:param export_type: 'csv', 'json', ... :param what: string describing what to export :returns: list of exported file names
7.10112
7.54105
0.941662
if 'loss_curves-stats' in self.dstore: # classical_risk if self.R == 1: data = self.dstore['loss_curves-stats'][aids] # shape (A, 1) else: data = self.dstore['loss_curves-rlzs'][aids] # shape (A, R) if key.startswith('rlz-'): rlzi = int(key[4:]) return {key: data[:, rlzi]} # else key == 'rlzs', returns all data return {'rlz-%03d' % rlzi: data[:, rlzi] for rlzi in range(self.R)} # otherwise event_based curves = self.dstore['curves-rlzs'][aids] # shape (A, R, P) if key.startswith('rlz-'): rlzi = int(key[4:]) return {'rlz-%03d' % rlzi: curves[:, rlzi]} else: # key is 'rlzs', return a dictionary will all realizations # this may be disabled in the future unless an asset is specified dic = {} for rlzi in range(self.R): dic['rlz-%03d' % rlzi] = curves[:, rlzi] return dic
def export_curves_rlzs(self, aids, key)
:returns: a dictionary key -> record of dtype loss_curve_dt
3.769628
3.615208
1.042714
oq = self.dstore['oqparam'] stats = oq.hazard_stats().items() # pair (name, func) stat2idx = {stat[0]: s for s, stat in enumerate(stats)} if 'loss_curves-stats' in self.dstore: # classical_risk dset = self.dstore['loss_curves-stats'] data = dset[aids] # shape (A, S) if key == 'stats': return {stat[0]: data[:, s] for s, stat in enumerate(stats)} else: # a specific statistics return {key: data[:, stat2idx[key]]} elif 'curves-stats' in self.dstore: # event_based_risk dset = self.dstore['curves-stats'] data = dset[aids] if key == 'stats': return {stat[0]: data[:, s] for s, stat in enumerate(stats)} else: # a specific statistics return {key: data[:, stat2idx[key]]} else: raise KeyError('no loss curves in %s' % self.dstore)
def export_curves_stats(self, aids, key)
:returns: a dictionary rlzi -> record of dtype loss_curve_dt
3.722455
3.530003
1.054519
C = self.COEFFS[imt] C_PGA = self.COEFFS[PGA()] C_AMP = self.AMP_COEFFS[imt] # Gets the PGA on rock - need to convert from g to cm/s/s pga_rock = self._compute_pga_rock(C_PGA, rup.mag, dists.rjb) * 980.665 # Get the mean ground motion value mean = (self._compute_nonlinear_magnitude_term(C, rup.mag) + self._compute_magnitude_distance_term(C, dists.rjb, rup.mag) + self._get_site_amplification(C_AMP, sites.vs30, pga_rock)) # Get standard deviations stddevs = self._get_stddevs(C, stddev_types, dists.rjb.shape) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. Implements equation 14 of Hong & Goda (2007)
3.421331
3.404528
1.004936
return np.exp(self._compute_linear_magnitude_term(C_PGA, mag) + self._compute_simple_distance_term(C_PGA, rjb))
def _compute_pga_rock(self, C_PGA, mag, rjb)
Returns the PGA (g) on rock, as defined in equation 15
4.992195
5.224727
0.955494
return self._compute_linear_magnitude_term(C, mag) +\ C["b3"] * ((mag - 7.0) ** 2.)
def _compute_nonlinear_magnitude_term(self, C, mag)
Computes the non-linear magnitude term
7.269055
7.310943
0.99427
return C["b4"] * np.log(np.sqrt(rjb ** 2. + C["h"] ** 2.))
def _compute_simple_distance_term(self, C, rjb)
The distance term for the PGA case ignores magnitude (equation 15)
6.901823
5.649414
1.221688
rval = np.sqrt(rjb ** 2. + C["h"] ** 2.) return (C["b4"] + C["b5"] * (mag - 4.5)) * np.log(rval)
def _compute_magnitude_distance_term(self, C, rjb, mag)
Returns the magntude dependent distance term
5.089188
5.056911
1.006383
# Get nonlinear term bnl = self._get_bnl(C_AMP, vs30) # f_nl_coeff = np.log(60.0 / 100.0) * np.ones_like(vs30) idx = pga_rock > 60.0 f_nl_coeff[idx] = np.log(pga_rock[idx] / 100.0) return np.log(np.exp( C_AMP["blin"] * np.log(vs30 / self.CONSTS["Vref"]) + bnl * f_nl_coeff))
def _get_site_amplification(self, C_AMP, vs30, pga_rock)
Gets the site amplification term based on equations 7 and 8 of Atkinson & Boore (2006)
4.551487
4.614746
0.986292
# Default case 8d bnl = np.zeros_like(vs30) if np.all(vs30 >= self.CONSTS["Vref"]): return bnl # Case 8a bnl[vs30 < self.CONSTS["v1"]] = C_AMP["b1sa"] # Cade 8b idx = np.logical_and(vs30 > self.CONSTS["v1"], vs30 <= self.CONSTS["v2"]) if np.any(idx): bnl[idx] = (C_AMP["b1sa"] - C_AMP["b2sa"]) *\ (np.log(vs30[idx] / self.CONSTS["v2"]) / np.log(self.CONSTS["v1"] / self.CONSTS["v2"])) + C_AMP["b2sa"] # Case 8c idx = np.logical_and(vs30 > self.CONSTS["v2"], vs30 < self.CONSTS["Vref"]) if np.any(idx): bnl[idx] = C_AMP["b2sa"] *\ np.log(vs30[idx] / self.CONSTS["Vref"]) /\ np.log(self.CONSTS["v2"] / self.CONSTS["Vref"]) return bnl
def _get_bnl(self, C_AMP, vs30)
Gets the nonlinear term, given by equation 8 of Atkinson & Boore 2006
2.151289
2.148534
1.001283
stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(C["sigtot"] + np.zeros(stddev_shape)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(C['sig2'] + np.zeros(stddev_shape)) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(C['sig1'] + np.zeros(stddev_shape)) return stddevs
def _get_stddevs(self, C, stddev_types, stddev_shape)
Returns the standard deviations given in Table 2
1.968019
1.964371
1.001857
for fname in fnames: try: node = nrml.read(fname) except ValueError as err: print(err) return with open(fname + '.bak', 'wb') as f: f.write(open(fname, 'rb').read()) with open(fname, 'wb') as f: # make sure the xmlns i.e. the NRML version is unchanged nrml.write(node.nodes, f, writers.FIVEDIGITS, xmlns=node['xmlns']) print('Reformatted %s, original left in %s.bak' % (fname, fname))
def tidy(fnames)
Reformat a NRML file in a canonical form. That also means reducing the precision of the floats to a standard value. If the file is invalid, a clear error message is shown.
6.159224
5.351033
1.151035
if is_from_fault_source: # for simple and complex fault sources, # rupture surface geometry is represented by a mesh surf_mesh = surface.mesh lons = surf_mesh.lons lats = surf_mesh.lats depths = surf_mesh.depths else: if is_multi_surface: # `list` of # openquake.hazardlib.geo.surface.planar.PlanarSurface # objects: surfaces = surface.surfaces # lons, lats, and depths are arrays with len == 4*N, # where N is the number of surfaces in the # multisurface for each `corner_*`, the ordering is: # - top left # - top right # - bottom left # - bottom right lons = numpy.concatenate([x.corner_lons for x in surfaces]) lats = numpy.concatenate([x.corner_lats for x in surfaces]) depths = numpy.concatenate([x.corner_depths for x in surfaces]) elif is_gridded_surface: # the surface mesh has shape (1, N) lons = surface.mesh.lons[0] lats = surface.mesh.lats[0] depths = surface.mesh.depths[0] else: # For area or point source, # rupture geometry is represented by a planar surface, # defined by 3D corner points lons = numpy.zeros((4)) lats = numpy.zeros((4)) depths = numpy.zeros((4)) # NOTE: It is important to maintain the order of these # corner points. TODO: check the ordering for i, corner in enumerate((surface.top_left, surface.top_right, surface.bottom_left, surface.bottom_right)): lons[i] = corner.longitude lats[i] = corner.latitude depths[i] = corner.depth return lons, lats, depths
def get_geom(surface, is_from_fault_source, is_multi_surface, is_gridded_surface)
The following fields can be interpreted different ways, depending on the value of `is_from_fault_source`. If `is_from_fault_source` is True, each of these fields should contain a 2D numpy array (all of the same shape). Each triple of (lon, lat, depth) for a given index represents the node of a rectangular mesh. If `is_from_fault_source` is False, each of these fields should contain a sequence (tuple, list, or numpy array, for example) of 4 values. In order, the triples of (lon, lat, depth) represent top left, top right, bottom left, and bottom right corners of the the rupture's planar surface. Update: There is now a third case. If the rupture originated from a characteristic fault source with a multi-planar-surface geometry, `lons`, `lats`, and `depths` will contain one or more sets of 4 points, similar to how planar surface geometry is stored (see above). :param surface: a Surface instance :param is_from_fault_source: a boolean :param is_multi_surface: a boolean
2.751593
2.766425
0.994639
all_eids = [] for rup in rup_array: grp_id = rup['grp_id'] samples = samples_by_grp[grp_id] num_rlzs = num_rlzs_by_grp[grp_id] num_events = rup['n_occ'] if samples > 1 else rup['n_occ'] * num_rlzs eids = TWO32 * U64(rup['serial']) + numpy.arange(num_events, dtype=U64) all_eids.append(eids) return numpy.concatenate(all_eids)
def get_eids(rup_array, samples_by_grp, num_rlzs_by_grp)
:param rup_array: a composite array with fields serial, n_occ and grp_id :param samples_by_grp: a dictionary grp_id -> samples :param num_rlzs_by_grp: a dictionary grp_id -> num_rlzs
3.026639
2.480789
1.220031
# compute cdf from pmf cdf = numpy.cumsum(self.probs_occur) n_occ = numpy.digitize(numpy.random.random(n), cdf) return n_occ
def sample_number_of_occurrences(self, n=1)
See :meth:`superclass method <.rupture.BaseRupture.sample_number_of_occurrences>` for spec of input and result values. Uses 'Inverse Transform Sampling' method.
5.764899
6.06933
0.949841
j = 0 dic = {} if self.samples == 1: # full enumeration or akin to it for rlzs in rlzs_by_gsim.values(): for rlz in rlzs: dic[rlz] = numpy.arange(j, j + self.n_occ, dtype=U64) + ( TWO32 * U64(self.serial)) j += self.n_occ else: # associated eids to the realizations rlzs = numpy.concatenate(list(rlzs_by_gsim.values())) assert len(rlzs) == self.samples, (len(rlzs), self.samples) histo = general.random_histogram( self.n_occ, self.samples, self.serial) for rlz, n in zip(rlzs, histo): dic[rlz] = numpy.arange(j, j + n, dtype=U64) + ( TWO32 * U64(self.serial)) j += n return dic
def get_eids_by_rlz(self, rlzs_by_gsim)
:param n_occ: number of occurrences :params rlzs_by_gsim: a dictionary gsims -> rlzs array :param samples: number of samples in current source group :returns: a dictionary rlz index -> eids array
3.771306
3.602437
1.046876
all_eids, rlzs = [], [] for rlz, eids in self.get_eids_by_rlz(rlzs_by_gsim).items(): all_eids.extend(eids) rlzs.extend([rlz] * len(eids)) return numpy.fromiter(zip(all_eids, rlzs), events_dt)
def get_events(self, rlzs_by_gsim)
:returns: an array of events with fields eid, rlz
3.482543
2.787791
1.249212
num_events = self.n_occ if self.samples > 1 else self.n_occ * num_rlzs return TWO32 * U64(self.serial) + numpy.arange(num_events, dtype=U64)
def get_eids(self, num_rlzs)
:param num_rlzs: the number of realizations for the given group :returns: an array of event IDs
10.398494
11.180071
0.930092
numpy.random.seed(self.serial) sess = numpy.random.choice(num_ses, len(events)) + 1 events_by_ses = collections.defaultdict(list) for ses, event in zip(sess, events): events_by_ses[ses].append(event) for ses in events_by_ses: events_by_ses[ses] = numpy.array(events_by_ses[ses]) return events_by_ses
def get_events_by_ses(self, events, num_ses)
:returns: a dictionary ses index -> events array
2.333844
2.179869
1.070635
rupture = self.rupture events = self.get_events(rlzs_by_gsim) events_by_ses = self.get_events_by_ses(events, num_ses) new = ExportedRupture(self.serial, events_by_ses) new.mesh = mesh[()] if isinstance(rupture.surface, geo.ComplexFaultSurface): new.typology = 'complexFaultsurface' elif isinstance(rupture.surface, geo.SimpleFaultSurface): new.typology = 'simpleFaultsurface' elif isinstance(rupture.surface, geo.GriddedSurface): new.typology = 'griddedRupture' elif isinstance(rupture.surface, geo.MultiSurface): new.typology = 'multiPlanesRupture' else: new.typology = 'singlePlaneRupture' new.is_from_fault_source = iffs = isinstance( rupture.surface, (geo.ComplexFaultSurface, geo.SimpleFaultSurface)) new.is_gridded_surface = igs = isinstance( rupture.surface, geo.GriddedSurface) new.is_multi_surface = ims = isinstance( rupture.surface, geo.MultiSurface) new.lons, new.lats, new.depths = get_geom( rupture.surface, iffs, ims, igs) new.surface = rupture.surface new.strike = rupture.surface.get_strike() new.dip = rupture.surface.get_dip() new.rake = rupture.rake new.hypocenter = rupture.hypocenter new.tectonic_region_type = rupture.tectonic_region_type new.magnitude = new.mag = rupture.mag new.top_left_corner = None if iffs or ims or igs else ( new.lons[0], new.lats[0], new.depths[0]) new.top_right_corner = None if iffs or ims or igs else ( new.lons[1], new.lats[1], new.depths[1]) new.bottom_left_corner = None if iffs or ims or igs else ( new.lons[2], new.lats[2], new.depths[2]) new.bottom_right_corner = None if iffs or ims or igs else ( new.lons[3], new.lats[3], new.depths[3]) return new
def export(self, mesh, rlzs_by_gsim, num_ses)
Yield :class:`Rupture` objects, with all the attributes set, suitable for export in XML format.
2.129582
2.111441
1.008592
with performance.Monitor('to_hdf5') as mon: for input_file in input: if input_file.endswith('.npz'): output = convert_npz_hdf5(input_file, input_file[:-3] + 'hdf5') elif input_file.endswith('.xml'): # for source model files output = convert_xml_hdf5(input_file, input_file[:-3] + 'hdf5') else: continue print('Generated %s' % output) print(mon)
def to_hdf5(input)
Convert .xml and .npz files to .hdf5 files.
3.791095
3.433414
1.104177
for source, s_sites in source_site_filter(sources): try: for rupture in source.iter_ruptures(): [n_occ] = rupture.sample_number_of_occurrences() for _ in range(n_occ): yield rupture except Exception as err: etype, err, tb = sys.exc_info() msg = 'An error occurred with source id=%s. Error: %s' msg %= (source.source_id, str(err)) raise_(etype, msg, tb)
def stochastic_event_set(sources, source_site_filter=nofilter)
Generates a 'Stochastic Event Set' (that is a collection of earthquake ruptures) representing a possible *realization* of the seismicity as described by a source model. The calculator loops over sources. For each source, it loops over ruptures. For each rupture, the number of occurrence is randomly sampled by calling :meth:`openquake.hazardlib.source.rupture.BaseProbabilisticRupture.sample_number_of_occurrences` .. note:: This calculator is using random numbers. In order to reproduce the same results numpy random numbers generator needs to be seeded, see http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html :param sources: An iterator of seismic sources objects (instances of subclasses of :class:`~openquake.hazardlib.source.base.BaseSeismicSource`). :param source_site_filter: The source filter to use (default noop filter) :returns: Generator of :class:`~openquake.hazardlib.source.rupture.Rupture` objects that are contained in an event set. Some ruptures can be missing from it, others can appear one or more times in a row.
3.682089
3.180903
1.157561
if not BaseRupture._code: BaseRupture.init() # initialize rupture codes rups = [] geoms = [] nbytes = 0 offset = 0 for ebrupture in ebruptures: rup = ebrupture.rupture mesh = surface_to_array(rup.surface) sy, sz = mesh.shape[1:] # sanity checks assert sy < TWO16, 'Too many multisurfaces: %d' % sy assert sz < TWO16, 'The rupture mesh spacing is too small' points = mesh.reshape(3, -1).T # shape (n, 3) minlon = points[:, 0].min() minlat = points[:, 1].min() maxlon = points[:, 0].max() maxlat = points[:, 1].max() if srcfilter.integration_distance and len(srcfilter.close_sids( (minlon, minlat, maxlon, maxlat), rup.tectonic_region_type, rup.mag)) == 0: continue hypo = rup.hypocenter.x, rup.hypocenter.y, rup.hypocenter.z rate = getattr(rup, 'occurrence_rate', numpy.nan) tup = (ebrupture.serial, ebrupture.srcidx, ebrupture.grp_id, rup.code, ebrupture.n_occ, rup.mag, rup.rake, rate, minlon, minlat, maxlon, maxlat, hypo, offset, offset + len(points), sy, sz) offset += len(points) rups.append(tup) geoms.append(numpy.array([tuple(p) for p in points], point3d)) nbytes += rupture_dt.itemsize + mesh.nbytes if not rups: return () dic = dict(geom=numpy.concatenate(geoms), nbytes=nbytes) # TODO: PMFs for nonparametric ruptures are not converted return hdf5.ArrayWrapper(numpy.array(rups, rupture_dt), dic)
def get_rup_array(ebruptures, srcfilter=nofilter)
Convert a list of EBRuptures into a numpy composite array, by filtering out the ruptures far away from every site
4.551273
4.625269
0.984002
eb_ruptures = [] numpy.random.seed(sources[0].serial) [grp_id] = set(src.src_group_id for src in sources) # AccumDict of arrays with 3 elements weight, nsites, calc_time calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32)) # Set the parameters required to compute the number of occurrences # of the group of sources # assert param['oqparam'].number_of_logic_tree_samples > 0 samples = getattr(sources[0], 'samples', 1) tom = getattr(sources, 'temporal_occurrence_model') rate = tom.occurrence_rate time_span = tom.time_span # Note that using a single time interval corresponding to the product # of the investigation time and the number of realisations as we do # here is admitted only in the case of a time-independent model grp_num_occ = numpy.random.poisson(rate * time_span * samples * num_ses) # Now we process the sources included in the group. Possible cases: # * The group is a cluster. In this case we choose one rupture per each # source; uncertainty in the ruptures can be handled in this case # using mutually exclusive ruptures (note that this is admitted # only for nons-parametric sources). # * The group contains mutually exclusive sources. In this case we # choose one source and then one rupture from this source. rup_counter = {} rup_data = {} eff_ruptures = 0 for rlz_num in range(grp_num_occ): if sources.cluster: for src, _sites in srcfilter(sources): # Sum Ruptures if rlz_num == 0: eff_ruptures += src.num_ruptures # Track calculation time t0 = time.time() rup = src.get_one_rupture() # The problem here is that we do not know a-priori the # number of occurrences of a given rupture. if src.id not in rup_counter: rup_counter[src.id] = {} rup_data[src.id] = {} if rup.idx not in rup_counter[src.id]: rup_counter[src.id][rup.idx] = 1 rup_data[src.id][rup.idx] = [rup, src.id, grp_id] else: rup_counter[src.id][rup.idx] += 1 # Store info dt = time.time() - t0 calc_times[src.id] += numpy.array([len(rup_data[src.id]), src.nsites, dt]) elif param['src_interdep'] == 'mutex': print('Not yet implemented') exit(0) # Create event based ruptures for src_key in rup_data: for rup_key in rup_data[src_key]: dat = rup_data[src_key][rup_key] cnt = rup_counter[src_key][rup_key] ebr = EBRupture(dat[0], dat[1], dat[2], cnt, samples) eb_ruptures.append(ebr) return eb_ruptures, calc_times, eff_ruptures, grp_id
def sample_cluster(sources, srcfilter, num_ses, param)
Yields ruptures generated by a cluster of sources. :param sources: A sequence of sources of the same group :param num_ses: Number of stochastic event sets :param param: a dictionary of additional parameters including ses_per_logic_tree_path :yields: dictionaries with keys rup_array, calc_times, eff_ruptures
5.221614
5.068709
1.030167
# AccumDict of arrays with 3 elements weight, nsites, calc_time calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32)) # Compute and save stochastic event sets num_ses = param['ses_per_logic_tree_path'] eff_ruptures = 0 ir_mon = monitor('iter_ruptures', measuremem=False) # Compute the number of occurrences of the source group. This is used # for cluster groups or groups with mutually exclusive sources. if (getattr(sources, 'atomic', False) and getattr(sources, 'cluster', False)): eb_ruptures, calc_times, eff_ruptures, grp_id = sample_cluster( sources, srcfilter, num_ses, param) # Yield ruptures yield AccumDict(rup_array=get_rup_array(eb_ruptures), calc_times=calc_times, eff_ruptures={grp_id: eff_ruptures}) else: eb_ruptures = [] # AccumDict of arrays with 3 elements weight, nsites, calc_time calc_times = AccumDict(accum=numpy.zeros(3, numpy.float32)) [grp_id] = set(src.src_group_id for src in sources) for src, _sites in srcfilter(sources): t0 = time.time() if len(eb_ruptures) > MAX_RUPTURES: # yield partial result to avoid running out of memory yield AccumDict(rup_array=get_rup_array(eb_ruptures, srcfilter), calc_times={}, eff_ruptures={grp_id: eff_ruptures}) eb_ruptures.clear() samples = getattr(src, 'samples', 1) n_occ = 0 for rup, n_occ in src.sample_ruptures(samples * num_ses, ir_mon): ebr = EBRupture(rup, src.id, grp_id, n_occ, samples) eb_ruptures.append(ebr) n_occ += ebr.n_occ eff_ruptures += src.num_ruptures dt = time.time() - t0 calc_times[src.id] += numpy.array([n_occ, src.nsites, dt]) rup_array = get_rup_array(eb_ruptures, srcfilter) yield AccumDict(rup_array=rup_array, calc_times=calc_times, eff_ruptures={grp_id: eff_ruptures})
def sample_ruptures(sources, srcfilter, param, monitor=Monitor())
:param sources: a sequence of sources of the same group :param srcfilter: SourceFilter instance used also for bounding box post filtering :param param: a dictionary of additional parameters including ses_per_logic_tree_path :param monitor: monitor instance :yields: dictionaries with keys rup_array, calc_times, eff_ruptures
4.276579
3.771639
1.133878
data = _get_shakemap_array(grid_file) if uncertainty_file: data2 = _get_shakemap_array(uncertainty_file) # sanity check: lons and lats must be the same for coord in ('lon', 'lat'): numpy.testing.assert_equal(data[coord], data2[coord]) # copy the stddevs from the uncertainty array for imt in data2['std'].dtype.names: data['std'][imt] = data2['std'][imt] return data
def get_shakemap_array(grid_file, uncertainty_file=None)
:param grid_file: a shakemap grid file :param uncertainty_file: a shakemap uncertainty_file file :returns: array with fields lon, lat, vs30, val, std
2.978899
3.104834
0.959439
# NB: matplotlib is imported inside since it is a costly import import matplotlib.pyplot as p dstore = util.read(calc_id) sitecol = dstore['sitecol'] lons, lats = sitecol.lons, sitecol.lats if len(lons) > 1 and cross_idl(*lons): lons %= 360 fig, ax = p.subplots() ax.grid(True) if 'site_model' in dstore: sm = dstore['site_model'] sm_lons, sm_lats = sm['lon'], sm['lat'] if len(sm_lons) > 1 and cross_idl(*sm_lons): sm_lons %= 360 p.scatter(sm_lons, sm_lats, marker='.', color='orange') p.scatter(lons, lats, marker='+') p.show()
def plot_sites(calc_id=-1)
Plot the sites
3.690222
3.693407
0.999138
C = self.COEFFS[imt] imean = (self._get_magnitude_term(C, rup.mag) + self._get_distance_term(C, dists.rrup, sites.backarc) + self._get_site_term(C, sites.vs30) + self._get_scaling_term(C, dists.rrup)) # Convert mean from cm/s and cm/s/s and from common logarithm to # natural logarithm if imt.name in "SA PGA": mean = np.log((10.0 ** (imean - 2.0)) / g) else: mean = np.log((10.0 ** (imean))) stddevs = self._get_stddevs(C, len(dists.rrup), stddev_types) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.479802
3.473545
1.001801
# Geometric attenuation function distance_scale = -np.log10(np.sqrt(rrup ** 2 + 3600.0)) # Anelastic attenuation in the backarc distance_scale[backarc] += (C["c2"] * rrup[backarc]) # Anelastic Attenuation in the forearc idx = np.logical_not(backarc) distance_scale[idx] += (C["c1"] * rrup[idx]) return distance_scale
def _get_distance_term(self, C, rrup, backarc)
Returns the distance scaling term, which varies depending on whether the site is in the forearc or the backarc
4.563389
3.987283
1.144486
a_f = 0.15 + 0.0007 * rrup a_f[a_f > 0.35] = 0.35 return C["af"] + a_f
def _get_scaling_term(self, C, rrup)
Applies the Cascadia correction factor from Table 2 and the positive correction factor given on Page 567
6.340125
5.975488
1.061022
# see https://bugs.launchpad.net/oq-engine/+bug/1279247 for an explanation # of the algorithm used result = {'trti': trti, 'num_ruptures': 0} # all the time is spent in collect_bin_data ruptures = [] for src in sources: ruptures.extend(src.iter_ruptures()) bin_data = disagg.collect_bin_data( ruptures, sitecol, cmaker, iml4, oqparam.truncation_level, oqparam.num_epsilon_bins, monitor) if bin_data: # dictionary poe, imt, rlzi -> pne for sid in sitecol.sids: for (poe, imt, rlzi), matrix in disagg.build_disagg_matrix( bin_data, bin_edges, sid, monitor).items(): result[sid, rlzi, poe, imt] = matrix result['cache_info'] = monitor.cache_info result['num_ruptures'] = len(bin_data.mags) return result
def compute_disagg(sitecol, sources, cmaker, iml4, trti, bin_edges, oqparam, monitor)
:param sitecol: a :class:`openquake.hazardlib.site.SiteCollection` instance :param sources: list of hazardlib source objects :param cmaker: a :class:`openquake.hazardlib.gsim.base.ContextMaker` instance :param iml4: an array of intensities of shape (N, R, M, P) :param dict trti: tectonic region type index :param bin_egdes: a dictionary site_id -> edges :param oqparam: the parameters in the job.ini file :param monitor: monitor of the currently running job :returns: a dictionary of probability arrays, with composite key (sid, rlzi, poe, imt, iml, trti).
5.97215
5.217347
1.144672
m1 = 6.4 r1 = 50. h = 6. R = np.sqrt(rjb ** 2 + h ** 2) R1 = np.sqrt(r1 ** 2 + h ** 2) less_r1 = rjb < r1 ge_r1 = rjb >= r1 mean = (C['c1'] + C['c4'] * (mag - m1) * np.log(R) + C['c5'] * rjb + C['c8'] * (8.5 - mag) ** 2) mean[less_r1] += C['c3'] * np.log(R[less_r1]) mean[ge_r1] += (C['c3'] * np.log(R1) + C['c6'] * (np.log(R[ge_r1]) - np.log(R1))) if mag < m1: mean += C['c2'] * (mag - m1) else: mean += C['c7'] * (mag - m1) return mean
def _compute_mean(self, C, mag, rjb)
Compute mean value, see table 2.
2.641412
2.60097
1.015549
''' Returns the adjustment factors (fval, fival) proposed by Hermann (1978) :param float bval: Gutenberg & Richter (1944) b-value :param np.ndarray min_mag: Minimum magnitude of completeness table :param non-negative float mag_inc: Magnitude increment of the completeness table ''' fval = 10. ** (bval * min_mag) fival = 10. ** (bval * (mag_inc / 2.)) - 10. ** (-bval * (mag_inc / 2.)) return fval, fival
def hermann_adjustment_factors(bval, min_mag, mag_inc)
Returns the adjustment factors (fval, fival) proposed by Hermann (1978) :param float bval: Gutenberg & Richter (1944) b-value :param np.ndarray min_mag: Minimum magnitude of completeness table :param non-negative float mag_inc: Magnitude increment of the completeness table
4.820876
1.73625
2.776602
''' Incremental a-value from cumulative - using the version of the Hermann (1979) formula described in Wesson et al. (2003) :param float bval: Gutenberg & Richter (1944) b-value :param np.ndarray min_mag: Minimum magnitude of completeness table :param float mag_inc: Magnitude increment of the completeness table ''' a_cum = 10. ** (bval * min_mag) a_inc = a_cum + np.log10((10. ** (bval * mag_inc)) - (10. ** (-bval * mag_inc))) return a_inc
def incremental_a_value(bval, min_mag, mag_inc)
Incremental a-value from cumulative - using the version of the Hermann (1979) formula described in Wesson et al. (2003) :param float bval: Gutenberg & Richter (1944) b-value :param np.ndarray min_mag: Minimum magnitude of completeness table :param float mag_inc: Magnitude increment of the completeness table
5.240383
2.003698
2.615356
''' Gets the Weichert adjustment factor for each the magnitude bins :param float beta: Beta value of Gutenberg & Richter parameter (b * log(10.)) :param np.ndarray cmag: Magnitude values of the completeness table :param np.ndarray cyear: Year values of the completeness table :param float end_year: Last year for consideration in the catalogue :returns: Weichert adjustment factor (float) ''' if len(cmag) > 1: # cval corresponds to the mid-point of the completeness bins # In the original code it requires that the magnitude bins be # equal sizedclass IsotropicGaussian(BaseSmoothingKernel): dmag = (cmag[1:] + cmag[:-1]) / 2. cval = np.hstack([dmag, cmag[-1] + (dmag[-1] - cmag[-2])]) else: # Single completeness value so Weichert factor is unity return 1.0 / (end_year - cyear[0] + 1), None t_f = sum(np.exp(-beta * cval)) / sum((end_year - cyear + 1) * np.exp(-beta * cval)) return t_f, cval
def get_weichert_factor(beta, cmag, cyear, end_year)
Gets the Weichert adjustment factor for each the magnitude bins :param float beta: Beta value of Gutenberg & Richter parameter (b * log(10.)) :param np.ndarray cmag: Magnitude values of the completeness table :param np.ndarray cyear: Year values of the completeness table :param float end_year: Last year for consideration in the catalogue :returns: Weichert adjustment factor (float)
5.625714
3.183433
1.767184
''' Check to ensure completeness table is in the correct format `completeness_table = np.array([[year_, mag_i]]) for i in number of bins` :param np.ndarray completeness_table: Completeness table in format [[year, mag]] :param catalogue: Instance of openquake.hmtk.seismicity.catalogue.Catalogue class :returns: Correct completeness table ''' if isinstance(completeness_table, np.ndarray): assert np.shape(completeness_table)[1] == 2 return completeness_table elif isinstance(completeness_table, list): # Assuming list has only two elements assert len(completeness_table) == 2 return np.array([[completeness_table[0], completeness_table[1]]]) else: # Accepts the minimum magnitude and earliest year of the catalogue return np.array([[np.min(catalogue.data['year']), np.min(catalogue.data['magnitude'])]])
def check_completeness_table(completeness_table, catalogue)
Check to ensure completeness table is in the correct format `completeness_table = np.array([[year_, mag_i]]) for i in number of bins` :param np.ndarray completeness_table: Completeness table in format [[year, mag]] :param catalogue: Instance of openquake.hmtk.seismicity.catalogue.Catalogue class :returns: Correct completeness table
3.704793
2.071567
1.788401
''' To make the magnitudes evenly spaced, render to a constant 0.1 magnitude unit :param np.ndarray completeness_table: Completeness table in format [[year, mag]] :param catalogue: Instance of openquake.hmtk.seismicity.catalogue.Catalogue class :returns: Correct completeness table ''' mmax = np.floor(10. * np.max(catalogue.data['magnitude'])) / 10. check_completeness_table(completeness_table, catalogue) cmag = np.hstack([completeness_table[:, 1], mmax + 0.1]) cyear = np.hstack([completeness_table[:, 0], completeness_table[-1, 0]]) if np.shape(completeness_table)[0] == 1: # Simple single-valued table return completeness_table, 0.1 for iloc in range(0, len(cmag) - 1): mrange = np.arange(np.floor(10. * cmag[iloc]) / 10., (np.ceil(10. * cmag[iloc + 1]) / 10.), 0.1) temp_table = np.column_stack([ cyear[iloc] * np.ones(len(mrange), dtype=float), mrange]) if iloc == 0: completeness_table = np.copy(temp_table) else: completeness_table = np.vstack([completeness_table, temp_table]) # completeness_table = np.vstack([completeness_table, # np.array([[cyear[-1], cmag[-1]]])]) return completeness_table, 0.1
def get_even_magnitude_completeness(completeness_table, catalogue=None)
To make the magnitudes evenly spaced, render to a constant 0.1 magnitude unit :param np.ndarray completeness_table: Completeness table in format [[year, mag]] :param catalogue: Instance of openquake.hmtk.seismicity.catalogue.Catalogue class :returns: Correct completeness table
3.053761
2.196669
1.390178
dupl = [] for obj, group in itertools.groupby(sorted(objects), key): if sum(1 for _ in group) > 1: dupl.append(obj) if dupl: raise ValueError('Found duplicates %s' % dupl) return objects
def unique(objects, key=None)
Raise a ValueError if there is a duplicated object, otherwise returns the objects as they are.
3.300337
3.127399
1.055298
effective = [] for uid, group in groupby(rlzs, operator.attrgetter('uid')).items(): rlz = group[0] if all(path == '@' for path in rlz.lt_uid): # empty realization continue effective.append( Realization(rlz.value, sum(r.weight for r in group), rlz.lt_path, rlz.ordinal, rlz.lt_uid)) return effective
def get_effective_rlzs(rlzs)
Group together realizations with the same unique identifier (uid) and yield the first representative of each group.
6.337165
5.69647
1.112472
weights = [] for obj in weighted_objects: w = obj.weight if isinstance(obj.weight, float): weights.append(w) else: weights.append(w['weight']) numpy.random.seed(seed) idxs = numpy.random.choice(len(weights), num_samples, p=weights) # NB: returning an array would break things return [weighted_objects[idx] for idx in idxs]
def sample(weighted_objects, num_samples, seed)
Take random samples of a sequence of weighted objects :param weighted_objects: A finite sequence of objects with a `.weight` attribute. The weights must sum up to 1. :param num_samples: The number of samples to return :param seed: A random seed :return: A subsequence of the original sequence with `num_samples` elements
2.90482
3.321805
0.87447
n = nrml.read(smlt) try: blevels = n.logicTree except Exception: raise InvalidFile('%s is not a valid source_model_logic_tree_file' % smlt) paths = collections.defaultdict(set) # branchID -> paths applytosources = collections.defaultdict(list) # branchID -> source IDs for blevel in blevels: for bset in blevel: if 'applyToSources' in bset.attrib: applytosources[bset['branchSetID']].extend( bset['applyToSources'].split()) for br in bset: with node.context(smlt, br): fnames = unique(br.uncertaintyModel.text.split()) paths[br['branchID']].update(get_paths(smlt, fnames)) return Info({k: sorted(v) for k, v in paths.items()}, applytosources)
def collect_info(smlt)
Given a path to a source model logic tree, collect all of the path names to the source models it contains and build 1. a dictionary source model branch ID -> paths 2. a dictionary source model branch ID -> source IDs in applyToSources :param smlt: source model logic tree file :returns: an Info namedtupled containing the two dictionaries
6.217753
4.523678
1.374491
smodel = nrml.read(fname).sourceModel src_groups = [] if smodel[0].tag.endswith('sourceGroup'): # NRML 0.5 format for sg_node in smodel: sg = SourceGroup(sg_node['tectonicRegion']) sg.sources = sg_node.nodes src_groups.append(sg) else: # NRML 0.4 format: smodel is a list of source nodes src_groups.extend(SourceGroup.collect(smodel)) return src_groups
def read_source_groups(fname)
:param fname: a path to a source model XML file :return: a list of SourceGroup objects containing source nodes
5.29392
4.92266
1.075419
text = uncertainty.text.strip() if not text.startswith('['): # a bare GSIM name was passed text = '[%s]' % text for k, v in uncertainty.attrib.items(): try: v = ast.literal_eval(v) except ValueError: v = repr(v) text += '\n%s = %s' % (k, v) return text
def toml(uncertainty)
Converts an uncertainty node into a TOML string
4.474813
4.006005
1.117026
names = self.names.split() if len(names) == 1: return names[0] elif len(names) == 2: return ' '.join(names) else: return ' '.join([names[0], '...', names[-1]])
def name(self)
Compact representation for the names
2.737685
2.481452
1.103259
src_groups = [] for grp in self.src_groups: sg = copy.copy(grp) sg.sources = [] src_groups.append(sg) return self.__class__(self.names, self.weight, self.path, src_groups, self.num_gsim_paths, self.ordinal, self.samples)
def get_skeleton(self)
Return an empty copy of the source model, i.e. without sources, but with the proper attributes for each SourceGroup contained within.
7.300448
5.022285
1.453611
for path in self._enumerate_paths([]): flat_path = [] weight = 1.0 while path: path, branch = path weight *= branch.weight flat_path.append(branch) yield weight, flat_path[::-1]
def enumerate_paths(self)
Generate all possible paths starting from this branch set. :returns: Generator of two-item tuples. Each tuple contains weight of the path (calculated as a product of the weights of all path's branches) and list of path's :class:`Branch` objects. Total sum of all paths' weights is 1.0
5.059124
3.90782
1.294615
for branch in self.branches: path = [prefix_path, branch] if branch.child_branchset is not None: for subpath in branch.child_branchset._enumerate_paths(path): yield subpath else: yield path
def _enumerate_paths(self, prefix_path)
Recursive (private) part of :func:`enumerate_paths`. Returns generator of recursive lists of two items, where second item is the branch object and first one is itself list of two items.
3.431244
2.922738
1.173983
for branch in self.branches: if branch.branch_id == branch_id: return branch raise AssertionError("couldn't find branch '%s'" % branch_id)
def get_branch_by_id(self, branch_id)
Return :class:`Branch` object belonging to this branch set with id equal to ``branch_id``.
3.054509
3.127254
0.976738
# pylint: disable=R0911,R0912 for key, value in self.filters.items(): if key == 'applyToTectonicRegionType': if value != source.tectonic_region_type: return False elif key == 'applyToSourceType': if value == 'area': if not isinstance(source, ohs.AreaSource): return False elif value == 'point': # area source extends point source if (not isinstance(source, ohs.PointSource) or isinstance(source, ohs.AreaSource)): return False elif value == 'simpleFault': if not isinstance(source, ohs.SimpleFaultSource): return False elif value == 'complexFault': if not isinstance(source, ohs.ComplexFaultSource): return False elif value == 'characteristicFault': if not isinstance(source, ohs.CharacteristicFaultSource): return False else: raise AssertionError("unknown source type '%s'" % value) elif key == 'applyToSources': if source and source.source_id not in value: return False else: raise AssertionError("unknown filter '%s'" % key) # All filters pass, return True. return True
def filter_source(self, source)
Apply filters to ``source`` and return ``True`` if uncertainty should be applied to it.
2.688224
2.626789
1.023388
if not self.filter_source(source): # source didn't pass the filter return 0 if self.uncertainty_type in MFD_UNCERTAINTY_TYPES: self._apply_uncertainty_to_mfd(source.mfd, value) elif self.uncertainty_type in GEOMETRY_UNCERTAINTY_TYPES: self._apply_uncertainty_to_geometry(source, value) else: raise AssertionError("unknown uncertainty type '%s'" % self.uncertainty_type) return 1
def apply_uncertainty(self, value, source)
Apply this branchset's uncertainty with value ``value`` to source ``source``, if it passes :meth:`filters <filter_source>`. This method is not called for uncertainties of types "gmpeModel" and "sourceModel". :param value: The actual uncertainty value of :meth:`sampled <sample>` branch. Type depends on uncertainty type. :param source: The opensha source data object. :return: 0 if the source was not changed, 1 otherwise
2.865255
2.569253
1.115209
if self.uncertainty_type == 'simpleFaultDipRelative': source.modify('adjust_dip', dict(increment=value)) elif self.uncertainty_type == 'simpleFaultDipAbsolute': source.modify('set_dip', dict(dip=value)) elif self.uncertainty_type == 'simpleFaultGeometryAbsolute': trace, usd, lsd, dip, spacing = value source.modify( 'set_geometry', dict(fault_trace=trace, upper_seismogenic_depth=usd, lower_seismogenic_depth=lsd, dip=dip, spacing=spacing)) elif self.uncertainty_type == 'complexFaultGeometryAbsolute': edges, spacing = value source.modify('set_geometry', dict(edges=edges, spacing=spacing)) elif self.uncertainty_type == 'characteristicFaultGeometryAbsolute': source.modify('set_geometry', dict(surface=value))
def _apply_uncertainty_to_geometry(self, source, value)
Modify ``source`` geometry with the uncertainty value ``value``
3.22976
3.146883
1.026336
if self.uncertainty_type == 'abGRAbsolute': a, b = value mfd.modify('set_ab', dict(a_val=a, b_val=b)) elif self.uncertainty_type == 'bGRRelative': mfd.modify('increment_b', dict(value=value)) elif self.uncertainty_type == 'maxMagGRRelative': mfd.modify('increment_max_mag', dict(value=value)) elif self.uncertainty_type == 'maxMagGRAbsolute': mfd.modify('set_max_mag', dict(value=value)) elif self.uncertainty_type == 'incrementalMFDAbsolute': min_mag, bin_width, occur_rates = value mfd.modify('set_mfd', dict(min_mag=min_mag, bin_width=bin_width, occurrence_rates=occur_rates))
def _apply_uncertainty_to_mfd(self, mfd, value)
Modify ``mfd`` object with uncertainty value ``value``.
2.921812
2.798183
1.044182
num_gsim_paths = 1 if self.num_samples else gsim_lt.get_num_paths() for i, rlz in enumerate(self): yield LtSourceModel( rlz.value, rlz.weight, ('b1',), [], num_gsim_paths, i, 1)
def gen_source_models(self, gsim_lt)
Yield the underlying LtSourceModel, multiple times if there is sampling
8.355865
6.755532
1.236892
return (self.info.applytosources and self.info.applytosources == self.source_ids)
def on_each_source(self)
True if there is an applyToSources for each source.
20.010319
8.589417
2.329648
self.info = collect_info(self.filename) self.source_ids = collections.defaultdict(list) t0 = time.time() for depth, branchinglevel_node in enumerate(tree_node.nodes): self.parse_branchinglevel(branchinglevel_node, depth, validate) dt = time.time() - t0 if validate: bname = os.path.basename(self.filename) logging.info('Validated %s in %.2f seconds', bname, dt)
def parse_tree(self, tree_node, validate)
Parse the whole tree and point ``root_branchset`` attribute to the tree's root.
4.224448
4.193323
1.007423
new_open_ends = set() branchsets = branchinglevel_node.nodes for number, branchset_node in enumerate(branchsets): branchset = self.parse_branchset(branchset_node, depth, number, validate) self.parse_branches(branchset_node, branchset, validate) if self.root_branchset is None: # not set yet self.num_paths = 1 self.root_branchset = branchset else: self.apply_branchset(branchset_node, branchset) for branch in branchset.branches: new_open_ends.add(branch) self.num_paths *= len(branchset.branches) if number > 0: logging.warning('There is a branching level with multiple ' 'branchsets in %s', self.filename) self.open_ends.clear() self.open_ends.update(new_open_ends)
def parse_branchinglevel(self, branchinglevel_node, depth, validate)
Parse one branching level. :param branchinglevel_node: ``etree.Element`` object with tag "logicTreeBranchingLevel". :param depth: The sequential number of this branching level, based on 0. :param validate: Whether or not the branching level, its branchsets and their branches should be validated. Enumerates children branchsets and call :meth:`parse_branchset`, :meth:`validate_branchset`, :meth:`parse_branches` and finally :meth:`apply_branchset` for each. Keeps track of "open ends" -- the set of branches that don't have any child branchset on this step of execution. After processing of every branching level only those branches that are listed in it can have child branchsets (if there is one on the next level).
3.244989
2.995875
1.083152
uncertainty_type = branchset_node.attrib.get('uncertaintyType') filters = dict((filtername, branchset_node.attrib.get(filtername)) for filtername in self.FILTERS if filtername in branchset_node.attrib) if validate: self.validate_filters(branchset_node, uncertainty_type, filters) filters = self.parse_filters(branchset_node, uncertainty_type, filters) branchset = BranchSet(uncertainty_type, filters) if validate: self.validate_branchset(branchset_node, depth, number, branchset) return branchset
def parse_branchset(self, branchset_node, depth, number, validate)
Create :class:`BranchSet` object using data in ``branchset_node``. :param branchset_node: ``etree.Element`` object with tag "logicTreeBranchSet". :param depth: The sequential number of branchset's branching level, based on 0. :param number: Index number of this branchset inside branching level, based on 0. :param validate: Whether or not filters defined in branchset and the branchset itself should be validated. :returns: An instance of :class:`BranchSet` with filters applied but with no branches (they're attached in :meth:`parse_branches`).
2.478075
2.374397
1.043665
weight_sum = 0 branches = branchset_node.nodes values = [] for branchnode in branches: weight = ~branchnode.uncertaintyWeight weight_sum += weight value_node = node_from_elem(branchnode.uncertaintyModel) if value_node.text is not None: values.append(value_node.text.strip()) if validate: self.validate_uncertainty_value( value_node, branchnode, branchset) value = self.parse_uncertainty_value(value_node, branchset) branch_id = branchnode.attrib.get('branchID') branch = Branch(branch_id, weight, value) if branch_id in self.branches: raise LogicTreeError( branchnode, self.filename, "branchID '%s' is not unique" % branch_id) self.branches[branch_id] = branch branchset.branches.append(branch) if abs(weight_sum - 1.0) > pmf.PRECISION: raise LogicTreeError( branchset_node, self.filename, "branchset weights don't sum up to 1.0") if len(set(values)) < len(values): # TODO: add a test for this case # <logicTreeBranch branchID="b71"> # <uncertaintyModel> 7.7 </uncertaintyModel> # <uncertaintyWeight>0.333</uncertaintyWeight> # </logicTreeBranch> # <logicTreeBranch branchID="b72"> # <uncertaintyModel> 7.695 </uncertaintyModel> # <uncertaintyWeight>0.333</uncertaintyWeight> # </logicTreeBranch> # <logicTreeBranch branchID="b73"> # <uncertaintyModel> 7.7 </uncertaintyModel> # <uncertaintyWeight>0.334</uncertaintyWeight> # </logicTreeBranch> raise LogicTreeError( branchset_node, self.filename, "there are duplicate values in uncertaintyModel: " + ' '.join(values))
def parse_branches(self, branchset_node, branchset, validate)
Create and attach branches at ``branchset_node`` to ``branchset``. :param branchset_node: Same as for :meth:`parse_branchset`. :param branchset: An instance of :class:`BranchSet`. :param validate: Whether or not branches' uncertainty values should be validated. Checks that each branch has :meth:`valid <validate_uncertainty_value>` value, unique id and that all branches have total weight of 1.0. :return: ``None``, all branches are attached to provided branchset.
2.566741
2.538838
1.01099
samples_by_lt_path = self.samples_by_lt_path() for i, rlz in enumerate(get_effective_rlzs(self)): smpath = rlz.lt_path num_samples = samples_by_lt_path[smpath] num_gsim_paths = (num_samples if self.num_samples else gsim_lt.get_num_paths()) yield LtSourceModel( rlz.value, rlz.weight / num_samples, smpath, [], num_gsim_paths, i, num_samples)
def gen_source_models(self, gsim_lt)
Yield empty LtSourceModel instances (one per effective realization)
6.092728
4.991512
1.220618
branchset = self.root_branchset branch_ids = [] while branchset is not None: [branch] = sample(branchset.branches, 1, seed) branch_ids.append(branch.branch_id) branchset = branch.child_branchset modelname = self.root_branchset.get_branch_by_id(branch_ids[0]).value return modelname, branch_ids
def sample_path(self, seed)
Return the model name and a list of branch ids. :param seed: the seed used for the sampling
3.81218
3.468431
1.099108
if branchset.uncertainty_type == 'sourceModel': return node.text.strip() elif branchset.uncertainty_type == 'abGRAbsolute': [a, b] = node.text.strip().split() return float(a), float(b) elif branchset.uncertainty_type == 'incrementalMFDAbsolute': min_mag, bin_width = (node.incrementalMFD["minMag"], node.incrementalMFD["binWidth"]) return min_mag, bin_width, ~node.incrementalMFD.occurRates elif branchset.uncertainty_type == 'simpleFaultGeometryAbsolute': return self._parse_simple_fault_geometry_surface( node.simpleFaultGeometry) elif branchset.uncertainty_type == 'complexFaultGeometryAbsolute': return self._parse_complex_fault_geometry_surface( node.complexFaultGeometry) elif branchset.uncertainty_type ==\ 'characteristicFaultGeometryAbsolute': surfaces = [] for geom_node in node.surface: if "simpleFaultGeometry" in geom_node.tag: trace, usd, lsd, dip, spacing =\ self._parse_simple_fault_geometry_surface(geom_node) surfaces.append(geo.SimpleFaultSurface.from_fault_data( trace, usd, lsd, dip, spacing)) elif "complexFaultGeometry" in geom_node.tag: edges, spacing =\ self._parse_complex_fault_geometry_surface(geom_node) surfaces.append(geo.ComplexFaultSurface.from_fault_data( edges, spacing)) elif "planarSurface" in geom_node.tag: surfaces.append( self._parse_planar_geometry_surface(geom_node)) else: pass if len(surfaces) > 1: return geo.MultiSurface(surfaces) else: return surfaces[0] else: return float(node.text.strip())
def parse_uncertainty_value(self, node, branchset)
See superclass' method for description and signature specification. Doesn't change source model file name, converts other values to either pair of floats or a single float depending on uncertainty type.
2.847826
2.777214
1.025426
spacing = node["spacing"] usd, lsd, dip = (~node.upperSeismoDepth, ~node.lowerSeismoDepth, ~node.dip) # Parse the geometry coords = split_coords_2d(~node.LineString.posList) trace = geo.Line([geo.Point(*p) for p in coords]) return trace, usd, lsd, dip, spacing
def _parse_simple_fault_geometry_surface(self, node)
Parses a simple fault geometry surface
12.71502
11.715296
1.085335
spacing = node["spacing"] edges = [] for edge_node in node.nodes: coords = split_coords_3d(~edge_node.LineString.posList) edges.append(geo.Line([geo.Point(*p) for p in coords])) return edges, spacing
def _parse_complex_fault_geometry_surface(self, node)
Parses a complex fault geometry surface
9.767385
9.192933
1.062488
nodes = [] for key in ["topLeft", "topRight", "bottomRight", "bottomLeft"]: nodes.append(geo.Point(getattr(node, key)["lon"], getattr(node, key)["lat"], getattr(node, key)["depth"])) top_left, top_right, bottom_right, bottom_left = tuple(nodes) return geo.PlanarSurface.from_corner_points( top_left, top_right, bottom_right, bottom_left)
def _parse_planar_geometry_surface(self, node)
Parses a planar geometry surface
2.941027
2.946542
0.998128
_float_re = re.compile(r'^(\+|\-)?(\d+|\d*\.\d+)$') if branchset.uncertainty_type == 'sourceModel': try: for fname in node.text.strip().split(): self.collect_source_model_data( branchnode['branchID'], fname) except Exception as exc: raise LogicTreeError(node, self.filename, str(exc)) from exc elif branchset.uncertainty_type == 'abGRAbsolute': ab = (node.text.strip()).split() if len(ab) == 2: a, b = ab if _float_re.match(a) and _float_re.match(b): return raise LogicTreeError( node, self.filename, 'expected a pair of floats separated by space') elif branchset.uncertainty_type == 'incrementalMFDAbsolute': pass elif branchset.uncertainty_type == 'simpleFaultGeometryAbsolute': self._validate_simple_fault_geometry(node.simpleFaultGeometry, _float_re) elif branchset.uncertainty_type == 'complexFaultGeometryAbsolute': self._validate_complex_fault_geometry(node.complexFaultGeometry, _float_re) elif branchset.uncertainty_type ==\ 'characteristicFaultGeometryAbsolute': for geom_node in node.surface: if "simpleFaultGeometry" in geom_node.tag: self._validate_simple_fault_geometry(geom_node, _float_re) elif "complexFaultGeometry" in geom_node.tag: self._validate_complex_fault_geometry(geom_node, _float_re) elif "planarSurface" in geom_node.tag: self._validate_planar_fault_geometry(geom_node, _float_re) else: raise LogicTreeError( geom_node, self.filename, "Surface geometry type not recognised") else: try: float(node.text) except (TypeError, ValueError): raise LogicTreeError( node, self.filename, 'expected single float value')
def validate_uncertainty_value(self, node, branchnode, branchset)
See superclass' method for description and signature specification. Checks that the following conditions are met: * For uncertainty of type "sourceModel": referenced file must exist and be readable. This is checked in :meth:`collect_source_model_data` along with saving the source model information. * For uncertainty of type "abGRAbsolute": value should be two float values. * For both absolute uncertainties: the source (only one) must be referenced in branchset's filter "applyToSources". * For all other cases: value should be a single float value.
2.753863
2.443837
1.12686