code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
dists = set() for gsim in gsims: dists.update(gsim.REQUIRES_DISTANCES) return len(dists)
def get_num_distances(gsims)
:returns: the number of distances required for the given GSIMs
3.354091
2.918461
1.149267
distances = get_distances(rupture, sites, self.filter_distance) if self.maximum_distance: mask = distances <= self.maximum_distance( rupture.tectonic_region_type, rupture.mag) if mask.any(): sites, distances = sites.filter(mask), distances[mask] else: raise FarAwayRupture( '%d: %d km' % (rupture.serial, distances.min())) return sites, DistancesContext([(self.filter_distance, distances)])
def filter(self, sites, rupture)
Filter the site collection with respect to the rupture. :param sites: Instance of :class:`openquake.hazardlib.site.SiteCollection`. :param rupture: Instance of :class:`openquake.hazardlib.source.rupture.BaseRupture` :returns: (filtered sites, distance context)
5.474553
4.879865
1.121866
for param in self.REQUIRES_RUPTURE_PARAMETERS: if param == 'mag': value = rupture.mag elif param == 'strike': value = rupture.surface.get_strike() elif param == 'dip': value = rupture.surface.get_dip() elif param == 'rake': value = rupture.rake elif param == 'ztor': value = rupture.surface.get_top_edge_depth() elif param == 'hypo_lon': value = rupture.hypocenter.longitude elif param == 'hypo_lat': value = rupture.hypocenter.latitude elif param == 'hypo_depth': value = rupture.hypocenter.depth elif param == 'width': value = rupture.surface.get_width() else: raise ValueError('%s requires unknown rupture parameter %r' % (type(self).__name__, param)) setattr(rupture, param, value)
def add_rup_params(self, rupture)
Add .REQUIRES_RUPTURE_PARAMETERS to the rupture
1.790154
1.684157
1.062938
sites, dctx = self.filter(sites, rupture) for param in self.REQUIRES_DISTANCES - set([self.filter_distance]): distances = get_distances(rupture, sites, param) setattr(dctx, param, distances) reqv_obj = (self.reqv.get(rupture.tectonic_region_type) if self.reqv else None) if reqv_obj and isinstance(rupture.surface, PlanarSurface): reqv = reqv_obj.get(dctx.repi, rupture.mag) if 'rjb' in self.REQUIRES_DISTANCES: dctx.rjb = reqv if 'rrup' in self.REQUIRES_DISTANCES: reqv_rup = numpy.sqrt(reqv**2 + rupture.hypocenter.depth**2) dctx.rrup = reqv_rup self.add_rup_params(rupture) # NB: returning a SitesContext make sures that the GSIM cannot # access site parameters different from the ones declared sctx = SitesContext(self.REQUIRES_SITES_PARAMETERS, sites) return sctx, dctx
def make_contexts(self, sites, rupture)
Filter the site collection with respect to the rupture and create context objects. :param sites: Instance of :class:`openquake.hazardlib.site.SiteCollection`. :param rupture: Instance of :class:`openquake.hazardlib.source.rupture.BaseRupture` :returns: Tuple of two items: sites and distances context. :raises ValueError: If any of declared required parameters (site, rupture and distance parameters) is unknown.
4.939309
4.968355
0.994154
sitecol = sites.complete N = len(sitecol) fewsites = N <= FEWSITES rupdata = [] # rupture data for rup, sites in self._gen_rup_sites(src, sites): try: with self.ctx_mon: sctx, dctx = self.make_contexts(sites, rup) except FarAwayRupture: continue yield rup, sctx, dctx if fewsites: # store rupdata try: rate = rup.occurrence_rate probs_occur = numpy.zeros(0, numpy.float64) except AttributeError: # for nonparametric ruptures rate = numpy.nan probs_occur = rup.probs_occur row = [src.id or 0, rate] for rup_param in self.REQUIRES_RUPTURE_PARAMETERS: row.append(getattr(rup, rup_param)) for dist_param in self.REQUIRES_DISTANCES: row.append(get_distances(rup, sitecol, dist_param)) closest = rup.surface.get_closest_points(sitecol) row.append(closest.lons) row.append(closest.lats) row.append(rup.weight) row.append(probs_occur) rupdata.append(tuple(row)) if rupdata: dtlist = [('srcidx', numpy.uint32), ('occurrence_rate', float)] for rup_param in self.REQUIRES_RUPTURE_PARAMETERS: dtlist.append((rup_param, float)) for dist_param in self.REQUIRES_DISTANCES: dtlist.append((dist_param, (float, (N,)))) dtlist.append(('lon', (float, (N,)))) # closest lons dtlist.append(('lat', (float, (N,)))) # closest lats dtlist.append(('mutex_weight', float)) dtlist.append(('probs_occur', vfloat64)) self.rupdata = numpy.array(rupdata, dtlist) else: self.rupdata = ()
def gen_rup_contexts(self, src, sites)
:param src: a hazardlib source :param sites: the sites affected by it :yields: (rup, sctx, dctx)
3.518441
3.427898
1.026414
pmap = ProbabilityMap.build( len(imtls.array), len(self.gsims), s_sites.sids, initvalue=rup_indep) eff_ruptures = 0 for rup, sctx, dctx in self.gen_rup_contexts(src, s_sites): eff_ruptures += 1 with self.poe_mon: pnes = self._make_pnes(rup, sctx, dctx, imtls, trunclevel) for sid, pne in zip(sctx.sids, pnes): if rup_indep: pmap[sid].array *= pne else: pmap[sid].array += (1.-pne) * rup.weight if rup_indep: pmap = ~pmap pmap.eff_ruptures = eff_ruptures return pmap
def poe_map(self, src, s_sites, imtls, trunclevel, rup_indep=True)
:param src: a source object :param s_sites: a filtered SiteCollection of sites around the source :param imtls: intensity measure and levels :param trunclevel: truncation level :param rup_indep: True if the ruptures are independent :returns: a ProbabilityMap instance
4.885836
4.949327
0.987172
acc = AccumDict(accum=[]) ctx_mon = monitor('disagg_contexts', measuremem=False) pne_mon = monitor('disaggregate_pne', measuremem=False) clo_mon = monitor('get_closest', measuremem=False) for rupture in ruptures: with ctx_mon: orig_dctx = DistancesContext( (param, get_distances(rupture, sitecol, param)) for param in self.REQUIRES_DISTANCES) self.add_rup_params(rupture) with clo_mon: # this is faster than computing orig_dctx closest_points = rupture.surface.get_closest_points(sitecol) cache = {} for rlz, gsim in self.gsim_by_rlzi.items(): dctx = orig_dctx.roundup(gsim.minimum_distance) for m, imt in enumerate(iml4.imts): for p, poe in enumerate(iml4.poes_disagg): iml = tuple(iml4.array[:, rlz, m, p]) try: pne = cache[gsim, imt, iml] except KeyError: with pne_mon: pne = gsim.disaggregate_pne( rupture, sitecol, dctx, imt, iml, truncnorm, epsilons) cache[gsim, imt, iml] = pne acc[poe, str(imt), rlz].append(pne) acc['mags'].append(rupture.mag) acc['dists'].append(getattr(dctx, self.filter_distance)) acc['lons'].append(closest_points.lons) acc['lats'].append(closest_points.lats) return acc
def disaggregate(self, sitecol, ruptures, iml4, truncnorm, epsilons, monitor=Monitor())
Disaggregate (separate) PoE in different contributions. :param sitecol: a SiteCollection with N sites :param ruptures: an iterator over ruptures with the same TRT :param iml4: a 4d array of IMLs of shape (N, R, M, P) :param truncnorm: an instance of scipy.stats.truncnorm :param epsilons: the epsilon bins :param monitor: a Monitor instance :returns: an AccumDict with keys (poe, imt, rlzi) and mags, dists, lons, lats
4.728471
4.179511
1.131345
if not minimum_distance: return self ctx = DistancesContext() for dist, array in vars(self).items(): small_distances = array < minimum_distance if small_distances.any(): array = array[:] # make a copy first array[small_distances] = minimum_distance setattr(ctx, dist, array) return ctx
def roundup(self, minimum_distance)
If the minimum_distance is nonzero, returns a copy of the DistancesContext with updated distances, i.e. the ones below minimum_distance are rounded up to the minimum_distance. Otherwise, returns the original DistancesContext unchanged.
5.555187
3.81957
1.454401
if numpy.isnan(self.occurrence_rate): # nonparametric rupture # Uses the formula # # ∑ p(k|T) * p(X<x|rup)^k # # where `p(k|T)` is the probability that the rupture occurs k times # in the time span `T`, `p(X<x|rup)` is the probability that a # rupture occurrence does not cause a ground motion exceedance, and # thesummation `∑` is done over the number of occurrences `k`. # # `p(k|T)` is given by the attribute probs_occur and # `p(X<x|rup)` is computed as ``1 - poes``. # Converting from 1d to 2d if len(poes.shape) == 1: poes = numpy.reshape(poes, (-1, len(poes))) p_kT = self.probs_occur prob_no_exceed = numpy.array( [v * ((1 - poes) ** i) for i, v in enumerate(p_kT)]) prob_no_exceed = numpy.sum(prob_no_exceed, axis=0) prob_no_exceed[prob_no_exceed > 1.] = 1. # sanity check prob_no_exceed[poes == 0.] = 1. # avoid numeric issues return prob_no_exceed # parametric rupture tom = self.temporal_occurrence_model return tom.get_probability_no_exceedance(self.occurrence_rate, poes)
def get_probability_no_exceedance(self, poes)
Compute and return the probability that in the time span for which the rupture is defined, the rupture itself never generates a ground motion value higher than a given level at a given site. Such calculation is performed starting from the conditional probability that an occurrence of the current rupture is producing a ground motion value higher than the level of interest at the site of interest. The actual formula used for such calculation depends on the temporal occurrence model the rupture is associated with. The calculation can be performed for multiple intensity measure levels and multiple sites in a vectorized fashion. :param poes: 2D numpy array containing conditional probabilities the the a rupture occurrence causes a ground shaking value exceeding a ground motion level at a site. First dimension represent sites, second dimension intensity measure levels. ``poes`` can be obtained calling the :meth:`method <openquake.hazardlib.gsim.base.GroundShakingIntensityModel.get_poes>
3.895436
3.902757
0.998124
return (src.__class__.__iter__ is not BaseSeismicSource.__iter__ and getattr(src, 'mutex_weight', 1) == 1 and src.splittable)
def splittable(src)
:returns: True if the source is splittable, False otherwise
13.669891
11.640126
1.174377
'''Checks if value is valid float, appends to array if valid, appends nan if not''' value = value.strip(' ') try: if value: attribute_array = np.hstack([attribute_array, float(value)]) else: attribute_array = np.hstack([attribute_array, np.nan]) except: print(irow, key) msg = 'Input file format error at line: %d' % (irow + 2) msg += ' key: %s' % (key) raise ValueError(msg) return attribute_array
def _float_check(self, attribute_array, value, irow, key)
Checks if value is valid float, appends to array if valid, appends nan if not
3.622687
2.739333
1.322471
'''Checks if value is valid integer, appends to array if valid, appends nan if not''' value = value.strip(' ') try: if value: attribute_array = np.hstack([attribute_array, int(value)]) else: attribute_array = np.hstack([attribute_array, np.nan]) except: msg = 'Input file format error at line: %d' % (irow + 2) msg += ' key: %s' % (key) raise ValueError(msg) return attribute_array
def _int_check(self, attribute_array, value, irow, key)
Checks if value is valid integer, appends to array if valid, appends nan if not
3.684794
2.714265
1.357566
''' Writes the catalogue to file, purging events if necessary. :param catalogue: Earthquake catalogue as instance of :class: openquake.hmtk.seismicity.catalogue.Catalogue :param numpy.array flag_vector: Boolean vector specifying whether each event is valid (therefore written) or otherwise :param numpy.ndarray magnitude_table: Magnitude-time table specifying the year and magnitudes of completeness ''' # First apply purging conditions output_catalogue = self.apply_purging(catalogue, flag_vector, magnitude_table) outfile = open(self.output_file, 'wt') writer = csv.DictWriter(outfile, fieldnames=self.OUTPUT_LIST) writer.writeheader() # Quick check to remove nan arrays for key in self.OUTPUT_LIST: cond = (isinstance(output_catalogue.data[key], np.ndarray) and np.all(np.isnan(output_catalogue.data[key]))) if cond: output_catalogue.data[key] = [] # Write the catalogue for iloc in range(0, output_catalogue.get_number_events()): row_dict = {} for key in self.OUTPUT_LIST: if len(output_catalogue.data[key]) > 0: row_dict[key] = output_catalogue.data[key][iloc] else: row_dict[key] = '' writer.writerow(row_dict) outfile.close()
def write_file(self, catalogue, flag_vector=None, magnitude_table=None)
Writes the catalogue to file, purging events if necessary. :param catalogue: Earthquake catalogue as instance of :class: openquake.hmtk.seismicity.catalogue.Catalogue :param numpy.array flag_vector: Boolean vector specifying whether each event is valid (therefore written) or otherwise :param numpy.ndarray magnitude_table: Magnitude-time table specifying the year and magnitudes of completeness
3.290839
2.03571
1.616556
''' Apply all the various purging conditions, if specified. :param catalogue: Earthquake catalogue as instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` :param numpy.array flag_vector: Boolean vector specifying whether each event is valid (therefore written) or otherwise :param numpy.ndarray magnitude_table: Magnitude-time table specifying the year and magnitudes of completeness ''' output_catalogue = deepcopy(catalogue) if magnitude_table is not None: if flag_vector is not None: output_catalogue.catalogue_mt_filter( magnitude_table, flag_vector) return output_catalogue else: output_catalogue.catalogue_mt_filter( magnitude_table) return output_catalogue if flag_vector is not None: output_catalogue.purge_catalogue(flag_vector) return output_catalogue
def apply_purging(self, catalogue, flag_vector, magnitude_table)
Apply all the various purging conditions, if specified. :param catalogue: Earthquake catalogue as instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` :param numpy.array flag_vector: Boolean vector specifying whether each event is valid (therefore written) or otherwise :param numpy.ndarray magnitude_table: Magnitude-time table specifying the year and magnitudes of completeness
3.768693
1.694016
2.224709
azims = geod.azimuth(reference.longitude, reference.latitude, projected.longitude, projected.latitude) depths = np.subtract(reference.depth, projected.depth) dists = geod.geodetic_distance(reference.longitude, reference.latitude, projected.longitude, projected.latitude) return (dists * math.sin(math.radians(azims)), dists * math.cos(math.radians(azims)), depths)
def get_xyz_from_ll(projected, reference)
This method computes the x, y and z coordinates of a set of points provided a reference point :param projected: :class:`~openquake.hazardlib.geo.point.Point` object representing the coordinates of target point to be projected :param reference: :class:`~openquake.hazardlib.geo.point.Point` object representing the coordinates of the reference point. :returns: x y z
2.869705
2.81632
1.018956
''' Define the equation of target fault plane passing through 3 given points which includes two points on the fault trace and one point on the fault plane but away from the fault trace. Note: in order to remain the consistency of the fault normal vector direction definition, the order of the three given points is strickly defined. :param p0: The fault trace and is the closer points from the starting point of fault trace. :class:`~openquake.hazardlib.geo.point.Point` object representing the location of the one vertex of the fault patch. :param p1: The fault trace and is the further points from the starting point of fault trace. :class:`~openquake.hazardlib.geo.point.Point` object representing the location of the one vertex of the fault patch. :param p2: The point on the fault plane but away from the fault trace. :class:`~openquake.hazardlib.geo.point.Point` object representing the location of the one vertex of the fault patch. :param reference: :class:`~openquake.hazardlib.geo.point.Point` object representing the origin of the cartesian system used the represent objects in a projected reference :returns: normal: normal vector of the plane (a,b,c) dist_to_plane: d in the plane equation, ax + by + cz = d ''' p0_xyz = get_xyz_from_ll(p0, reference) p1_xyz = get_xyz_from_ll(p1, reference) p2_xyz = get_xyz_from_ll(p2, reference) p0 = np.array(p0_xyz) p1 = np.array(p1_xyz) p2 = np.array(p2_xyz) u = p1 - p0 v = p2 - p0 # vector normal to plane, ax+by+cy = d, normal=(a,b,c) normal = np.cross(u, v) # Define the d for the plane equation dist_to_plane = np.dot(p0, normal) return normal, dist_to_plane
def get_plane_equation(p0, p1, p2, reference)
Define the equation of target fault plane passing through 3 given points which includes two points on the fault trace and one point on the fault plane but away from the fault trace. Note: in order to remain the consistency of the fault normal vector direction definition, the order of the three given points is strickly defined. :param p0: The fault trace and is the closer points from the starting point of fault trace. :class:`~openquake.hazardlib.geo.point.Point` object representing the location of the one vertex of the fault patch. :param p1: The fault trace and is the further points from the starting point of fault trace. :class:`~openquake.hazardlib.geo.point.Point` object representing the location of the one vertex of the fault patch. :param p2: The point on the fault plane but away from the fault trace. :class:`~openquake.hazardlib.geo.point.Point` object representing the location of the one vertex of the fault patch. :param reference: :class:`~openquake.hazardlib.geo.point.Point` object representing the origin of the cartesian system used the represent objects in a projected reference :returns: normal: normal vector of the plane (a,b,c) dist_to_plane: d in the plane equation, ax + by + cz = d
3.653659
1.359489
2.687524
''' This method finds the projection of the site onto the plane containing the slipped area, defined as the Pp(i.e. 'perpendicular projection of site location onto the fault plane' Spudich et al. (2013) - page 88) given a site. :param site: Location of the site, [lon, lat, dep] :param normal: Normal to the plane including the fault patch, describe by a normal vector[a, b, c] :param dist_to_plane: D in the plane equation, ax + by + cz = d :param reference: :class:`~openquake.hazardlib.geo.point.Point` object representing the location of project reference point :returns: pp, the projection point, [ppx, ppy, ppz], in xyz domain , a numpy array. ''' # Transform to xyz coordinate [site_x, site_y, site_z] = get_xyz_from_ll(site, reference) a = np.array([(1, 0, 0, -normal[0]), (0, 1, 0, -normal[1]), (0, 0, 1, -normal[2]), (normal[0], normal[1], normal[2], 0)]) b = np.array([site_x, site_y, site_z, dist_to_plane]) x = np.linalg.solve(a, b) pp = np.array([x[0], x[1], x[2]]) return pp
def projection_pp(site, normal, dist_to_plane, reference)
This method finds the projection of the site onto the plane containing the slipped area, defined as the Pp(i.e. 'perpendicular projection of site location onto the fault plane' Spudich et al. (2013) - page 88) given a site. :param site: Location of the site, [lon, lat, dep] :param normal: Normal to the plane including the fault patch, describe by a normal vector[a, b, c] :param dist_to_plane: D in the plane equation, ax + by + cz = d :param reference: :class:`~openquake.hazardlib.geo.point.Point` object representing the location of project reference point :returns: pp, the projection point, [ppx, ppy, ppz], in xyz domain , a numpy array.
4.779685
1.463771
3.265324
cosang = np.dot(v1, v2) sinang = np.linalg.norm(np.cross(v1, v2)) return np.arctan2(sinang, cosang)
def vectors2angle(v1, v2)
Returns the angle in radians between vectors 'v1' and 'v2'. :param v1: vector, a numpy array :param v2: vector, a numpy array :returns: the angle in radians between the two vetors
1.801986
2.402128
0.750162
if e == 0.: c_prime = 0.8 elif e > 0.: c_prime = 1. / ((1. / 0.8) - ((r_hyp - rd) / e)) return c_prime
def isochone_ratio(e, rd, r_hyp)
Get the isochone ratio as described in Spudich et al. (2013) PEER report, page 88. :param e: a float defining the E-path length, which is the distance from Pd(direction) point to hypocentre. In km. :param rd: float, distance from the site to the direct point. :param r_hyp: float, the hypocentre distance. :returns: c_prime, a float defining the isochone ratio
4.599393
4.168624
1.103336
pa = np.array([seg1_start, seg2_start]) pb = np.array([seg1_end, seg2_end]) si = pb - pa ni = si / np.power( np.dot(np.sum(si ** 2, axis=1).reshape(2, 1), np.ones((1, 3))), 0.5) nx = ni[:, 0].reshape(2, 1) ny = ni[:, 1].reshape(2, 1) nz = ni[:, 2].reshape(2, 1) sxx = np.sum(nx ** 2 - 1) syy = np.sum(ny ** 2 - 1) szz = np.sum(nz ** 2 - 1) sxy = np.sum(nx * ny) sxz = np.sum(nx * nz) syz = np.sum(ny * nz) s = np.array([sxx, sxy, sxz, sxy, syy, syz, sxz, syz, szz]).reshape(3, 3) cx = np.sum(pa[:, 0].reshape(2, 1) * (nx ** 2 - 1) + pa[:, 1].reshape(2, 1) * [nx * ny] + pa[:, 2].reshape(2, 1) * (nx * nz)) cy = np.sum(pa[:, 0].reshape(2, 1) * [nx * ny] + pa[:, 1].reshape(2, 1) * [ny ** 2 - 1] + pa[:, 2].reshape(2, 1) * [ny * nz]) cz = np.sum(pa[:, 0].reshape(2, 1) * [nx * nz] + pa[:, 1].reshape(2, 1) * [ny * nz] + pa[:, 2].reshape(2, 1) * [nz ** 2 - 1]) c = np.array([cx, cy, cz]).reshape(3, 1) p_intersect = np.linalg.solve(s, c) vector1 = (p_intersect.flatten() - seg2_end) / \ sum((p_intersect.flatten() - seg2_end) ** 2) ** 0.5 vector2 = (seg2_start - seg2_end) / \ sum((seg2_start - seg2_end) ** 2) ** 0.5 vector3 = (seg1_end - seg1_start) / \ sum((seg1_end - seg1_start) ** 2) ** 0.5 vector4 = (p_intersect.flatten() - seg1_start) / \ sum((p_intersect.flatten() - seg1_start) ** 2) ** 0.5 return p_intersect, vector1, vector2, vector3, vector4
def _intersection(seg1_start, seg1_end, seg2_start, seg2_end)
Get the intersection point between two segments. The calculation is in Catestian coordinate system. :param seg1_start: A numpy array, representing one end point of a segment(e.g. segment1) segment. :param seg1_end: A numpy array, representing the other end point of the first segment(e.g. segment1) :param seg2_start: A numpy array, representing one end point of the other segment(e.g. segment2) segment. :param seg2_end: A numpy array, representing the other end point of the second segment(e.g. segment2) :returns: p_intersect, :a numpy ndarray. representing the location of intersection point of the two given segments vector1, a numpy array, vector defined by intersection point and seg2_end vector2, a numpy array, vector defined by seg2_start and seg2_end vector3, a numpy array, vector defined by seg1_start and seg1_end vector4, a numpy array, vector defined by intersection point and seg1_start
1.704862
1.625085
1.049091
# extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] C_SITE = self.SITE_COEFFS[imt] s_c, idx = self._get_site_classification(sites.vs30) sa_rock = (self.get_magnitude_scaling_term(C, rup) + self.get_sof_term(C, rup) + self.get_depth_term(C, rup) + self.get_distance_term(C, dists, rup)) sa_soil = self.add_site_amplification(C, C_SITE, sites, sa_rock, idx, rup) stddevs = self.get_stddevs(C, sites.vs30.shape, idx, stddev_types) return sa_soil, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.077331
4.082936
0.998627
if rup.mag <= self.CONSTANTS["m_c"]: return C["ccr"] * rup.mag else: return (C["ccr"] * self.CONSTANTS["m_c"]) +\ (C["dcr"] * (rup.mag - self.CONSTANTS["m_c"]))
def get_magnitude_scaling_term(self, C, rup)
Returns the magnitude scaling term in equations 1 and 2
3.816835
3.517609
1.085065
n_sites = sites.vs30.shape # Convert from reference rock to hard rock hard_rock_sa = sa_rock - C["lnSC1AM"] # Gets the elastic site amplification ratio ln_a_n_max = self._get_ln_a_n_max(C, n_sites, idx, rup) # Retrieves coefficients needed to determine smr sreff, sreffc, f_sr = self._get_smr_coeffs(C, C_SITE, idx, n_sites, hard_rock_sa) snc = np.zeros(n_sites) alpha = self.CONSTANTS["alpha"] beta = self.CONSTANTS["beta"] smr = np.zeros(n_sites) sa_soil = hard_rock_sa + ln_a_n_max # Get lnSF ln_sf = self._get_ln_sf(C, C_SITE, idx, n_sites, rup) lnamax_idx = np.exp(ln_a_n_max) < 1.25 not_lnamax_idx = np.logical_not(lnamax_idx) for i in range(1, 5): idx_i = idx[i] if not np.any(idx_i): # No sites of the given site class continue idx2 = np.logical_and(lnamax_idx, idx_i) if np.any(idx2): # Use the approximate method for SRC and SNC c_a = C_SITE["LnAmax1D{:g}".format(i)] /\ (np.log(beta) - np.log(sreffc[idx2] ** alpha + beta)) c_b = -c_a * np.log(sreffc[idx2] ** alpha + beta) snc[idx2] = np.exp((c_a * (alpha - 1.) * np.log(beta) * np.log(10.0 * beta) - np.log(10.0) * (c_b + ln_sf[idx2])) / (c_a * (alpha * np.log(10.0 * beta) - np.log(beta)))) # For the cases when ln_a_n_max >= 1.25 idx2 = np.logical_and(not_lnamax_idx, idx_i) if np.any(idx2): snc[idx2] = (np.exp((ln_a_n_max[idx2] * np.log(sreffc[idx2] ** alpha + beta) - ln_sf[idx2] * np.log(beta)) / C_SITE["LnAmax1D{:g}".format(i)]) - beta) **\ (1.0 / alpha) smr[idx_i] = sreff[idx_i] * (snc[idx_i] / sreffc[idx_i]) *\ f_sr[idx_i] # For the cases when site class = i and SMR != 0 idx2 = np.logical_and(idx_i, np.fabs(smr) > 0.0) if np.any(idx2): sa_soil[idx2] += (-C_SITE["LnAmax1D{:g}".format(i)] * (np.log(smr[idx2] ** alpha + beta) - np.log(beta)) / (np.log(sreffc[idx2] ** alpha + beta) - np.log(beta))) return sa_soil
def add_site_amplification(self, C, C_SITE, sites, sa_rock, idx, rup)
Applies the site amplification scaling defined in equations from 10 to 15
3.642815
3.62858
1.003923
# Get SR sreff = np.zeros(n_sites) sreffc = np.zeros(n_sites) f_sr = np.zeros(n_sites) for i in range(1, 5): sreff[idx[i]] += (np.exp(sa_rock[idx[i]]) * self.IMF[i]) sreffc[idx[i]] += (C_SITE["Src1D{:g}".format(i)] * self.IMF[i]) # Get f_SR f_sr[idx[i]] += C_SITE["fsr{:g}".format(i)] return sreff, sreffc, f_sr
def _get_smr_coeffs(self, C, C_SITE, idx, n_sites, sa_rock)
Returns the SReff and SReffC terms needed for equation 14 and 15
3.788602
3.485745
1.086884
ln_a_n_max = C["lnSC1AM"] * np.ones(n_sites) for i in [2, 3, 4]: if np.any(idx[i]): ln_a_n_max[idx[i]] += C["S{:g}".format(i)] return ln_a_n_max
def _get_ln_a_n_max(self, C, n_sites, idx, rup)
Defines the rock site amplification defined in equations 10a and 10b
4.304117
3.933938
1.094099
ln_sf = np.zeros(n_sites) for i in range(1, 5): ln_sf_i = (C["lnSC1AM"] - C_SITE["LnAmax1D{:g}".format(i)]) if i > 1: ln_sf_i += C["S{:g}".format(i)] ln_sf[idx[i]] += ln_sf_i return ln_sf
def _get_ln_sf(self, C, C_SITE, idx, n_sites, rup)
Returns the log SF term required for equation 12
5.237435
4.999738
1.047542
site_class = np.ones(vs30.shape, dtype=int) idx = {} idx[1] = vs30 > 600. idx[2] = np.logical_and(vs30 > 300., vs30 <= 600.) idx[3] = np.logical_and(vs30 > 200., vs30 <= 300.) idx[4] = vs30 <= 200. for i in [2, 3, 4]: site_class[idx[i]] = i return site_class, idx
def _get_site_classification(self, vs30)
Define the site class categories based on Vs30. Returns a vector of site class values and a dictionary containing logical vectors for each of the site classes
1.924298
1.904981
1.01014
if rup.rake <= -45.0 and rup.rake >= -135.0: # Normal faulting return C["FN_UM"] elif rup.rake > 45.0 and rup.rake < 135.0: # Reverse faulting return C["FRV_UM"] else: # No adjustment for strike-slip faulting return 0.0
def get_sof_term(self, C, rup)
In the case of the upper mantle events separate coefficients are considered for normal, reverse and strike-slip
3.849852
3.508081
1.097424
x_ij = dists.rrup gn_exp = np.exp(C["c1"] + 6.5 * C["c2"]) g_n = C["gcrN"] * np.log(self.CONSTANTS["xcro"] + 30. + gn_exp) *\ np.ones_like(x_ij) idx = x_ij <= 30.0 if np.any(idx): g_n[idx] = C["gcrN"] * np.log(self.CONSTANTS["xcro"] + x_ij[idx] + gn_exp) c_m = min(rup.mag, self.CONSTANTS["m_c"]) r_ij = self.CONSTANTS["xcro"] + x_ij + np.exp(C["c1"] + C["c2"] * c_m) return C["gUM"] * np.log(r_ij) +\ C["gcrL"] * np.log(x_ij + 200.0) +\ g_n + C["eum"] * x_ij + C["ecrV"] * dists.rvolc + C["gamma_S"]
def get_distance_term(self, C, dists, rup)
Returns the distance attenuation term
5.518758
5.448793
1.012841
if rup.ztor > 25.0: # Deep interface events c_int = C["cint"] else: c_int = C["cintS"] if rup.mag <= self.CONSTANTS["m_c"]: return c_int * rup.mag else: return (c_int * self.CONSTANTS["m_c"]) +\ (C["dint"] * (rup.mag - self.CONSTANTS["m_c"]))
def get_magnitude_scaling_term(self, C, rup)
Returns magnitude scaling term, which is dependent on top of rupture depth - as described in equations 1 and 2
4.704389
4.617753
1.018761
m_c = self.CONSTANTS["m_c"] if rup.mag <= m_c: return C["cSL"] * rup.mag +\ C["cSL2"] * ((rup.mag - self.CONSTANTS["m_sc"]) ** 2.) else: return C["cSL"] * m_c +\ C["cSL2"] * ((m_c - self.CONSTANTS["m_sc"]) ** 2.) +\ C["dSL"] * (rup.mag - m_c)
def get_magnitude_scaling_term(self, C, rup)
Returns the magnitude scaling defined in equation 1
3.364179
3.234898
1.039964
if rup.ztor > 100.0: return C["bSLH"] * 100.0 else: return C["bSLH"] * rup.ztor
def get_depth_term(self, C, rup)
Returns depth term (dependent on top of rupture depth) as given in equations 1 Note that there is a ztor cap of 100 km that is introduced in the Fortran code but not mentioned in the original paper!
6.060206
4.396674
1.378362
x_ij = dists.rrup # Get anelastic scaling term in quation 5 if rup.ztor >= 50.: qslh = C["eSLH"] * (0.02 * rup.ztor - 1.0) else: qslh = 0.0 # r_volc = np.copy(dists.rvolc) # r_volc[np.logical_and(r_volc > 0.0, r_volc <= 12.0)] = 12.0 # r_volc[r_volc >= 80.0] = 80.0 # Get r_ij - distance for geometric spreading (equations 3 and 4) c_m = min(rup.mag, self.CONSTANTS["m_c"]) r_ij = x_ij + np.exp(C["alpha"] + C["beta"] * c_m) return C["gSL"] * np.log(r_ij) + \ C["gLL"] * np.log(x_ij + 200.) +\ C["eSL"] * x_ij + qslh * x_ij +\ C["eSLV"] * dists.rvolc + C["gamma"]
def get_distance_term(self, C, dists, rup)
Returns the distance scaling term in equation 2a Note that the paper describes a lower and upper cap on Rvolc that is not found in the Fortran code, and is thus neglected here.
5.430401
5.273985
1.029658
COEFFS = self.COEFFS[imt] R = self._compute_term_r(COEFFS, rup.mag, dists.rrup) mean = 10 ** (self._compute_mean(COEFFS, rup.mag, R)) # Convert units to g, # but only for PGA and SA (not PGV): if imt.name in "SA PGA": mean = np.log(mean / (g*100.)) else: # PGV: mean = np.log(mean) c1_rrup = _compute_C1_term(COEFFS, dists.rrup) log_phi_ss = 1.00 stddevs = self._get_stddevs( COEFFS, stddev_types, sites.vs30.shape[0], rup.mag, c1_rrup, log_phi_ss, COEFFS['mean_phi_ss'] ) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.309418
4.318864
0.997813
phi_ss = _compute_phi_ss(C, mag, c1_rrup, log_phi_ss, mean_phi_ss) stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt( C['tau'] * C['tau'] + phi_ss * phi_ss) + np.zeros(num_sites)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(phi_ss + np.zeros(num_sites)) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(C['tau'] + np.zeros(num_sites)) return stddevs
def _get_stddevs(self, C, stddev_types, num_sites, mag, c1_rrup, log_phi_ss, mean_phi_ss)
Return standard deviations
1.951847
1.967799
0.991893
if mag > self.M1: rrup_min = 0.55 elif mag > self.M2: rrup_min = -2.80 * mag + 14.55 else: rrup_min = -0.295 * mag + 2.65 R = np.maximum(rrup, rrup_min) return np.log10(R)
def _compute_term_r(self, C, mag, rrup)
Compute distance term d = log10(max(R,rmin));
4.147234
4.015028
1.032928
return ( C['a1'] + C['a2'] * mag + C['a3'] * np.power(mag, 2) + C['a4'] * np.power(mag, 3) + C['a5'] * np.power(mag, 4) + C['a6'] * np.power(mag, 5) + C['a7'] * np.power(mag, 6) )
def _compute_term_1(self, C, mag)
Compute term 1 a1 + a2.*M + a3.*M.^2 + a4.*M.^3 + a5.*M.^4 + a6.*M.^5 + a7.*M.^6
1.765017
1.532203
1.151947
return ( (C['a8'] + C['a9'] * mag + C['a10'] * np.power(mag, 2) + C['a11'] * np.power(mag, 3)) * R )
def _compute_term_2(self, C, mag, R)
(a8 + a9.*M + a10.*M.*M + a11.*M.*M.*M).*d(r)
2.931237
2.299416
1.274774
return ( (C['a12'] + C['a13'] * mag + C['a14'] * np.power(mag, 2) + C['a15'] * np.power(mag, 3)) * np.power(R, 2) )
def _compute_term_3(self, C, mag, R)
(a12 + a13.*M + a14.*M.*M + a15.*M.*M.*M).*(d(r).^2)
2.696907
2.068153
1.304017
return ( (C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) + C['a19'] * np.power(mag, 3)) * np.power(R, 3) )
def _compute_term_4(self, C, mag, R)
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
2.771748
2.058332
1.346599
return ( (C['a20'] + C['a21'] * mag + C['a22'] * np.power(mag, 2) + C['a23'] * np.power(mag, 3)) * np.power(R, 4) )
def _compute_term_5(self, C, mag, R)
(a20 + a21.*M + a22.*M.*M + a23.*M.*M.*M).*(d(r).^4)
2.603524
2.063747
1.261552
return (self._compute_term_1(C, mag) + self._compute_term_2(C, mag, term_dist_r) + self._compute_term_3(C, mag, term_dist_r) + self._compute_term_4(C, mag, term_dist_r) + self._compute_term_5(C, mag, term_dist_r))
def _compute_mean(self, C, mag, term_dist_r)
compute mean
1.588893
1.570391
1.011781
if isinstance(calc_id, str) or calc_id < 0 and not username: # get the last calculation in the datastore of the current user return datastore.read(calc_id) job = logs.dbcmd('get_job', calc_id, username) if job: return datastore.read(job.ds_calc_dir + '.hdf5') else: # calc_id can be present in the datastore and not in the database: # this happens if the calculation was run with `oq run` return datastore.read(calc_id)
def read(calc_id, username=None)
:param calc_id: a calculation ID :param username: if given, restrict the search to the user's calculations :returns: the associated DataStore instance
7.569753
7.472431
1.013024
assert len(curve_ref) == len(curve), (len(curve_ref), len(curve)) assert len(curve), 'The curves are empty!' max_diff = 0 for c1, c2 in zip(curve_ref, curve): if c1 >= min_value: max_diff = max(max_diff, abs(c1 - c2) / c1) return max_diff
def max_rel_diff(curve_ref, curve, min_value=0.01)
Compute the maximum relative difference between two curves. Only values greather or equal than the min_value are considered. >>> curve_ref = [0.01, 0.02, 0.03, 0.05, 1.0] >>> curve = [0.011, 0.021, 0.031, 0.051, 1.0] >>> round(max_rel_diff(curve_ref, curve), 2) 0.1
2.419513
2.786503
0.868297
assert len(curve_ref) == len(curve), (len(curve_ref), len(curve)) assert len(curve), 'The curves are empty!' diffs = [max_rel_diff(c1, c2, min_value) for c1, c2 in zip(curve_ref, curve)] maxdiff = max(diffs) maxindex = diffs.index(maxdiff) return maxdiff, maxindex
def max_rel_diff_index(curve_ref, curve, min_value=0.01)
Compute the maximum relative difference between two sets of curves. Only values greather or equal than the min_value are considered. Return both the maximum difference and its location (array index). >>> curve_refs = [[0.01, 0.02, 0.03, 0.05], [0.01, 0.02, 0.04, 0.06]] >>> curves = [[0.011, 0.021, 0.031, 0.051], [0.012, 0.022, 0.032, 0.051]] >>> max_rel_diff_index(curve_refs, curves) (0.2, 1)
2.493991
2.976554
0.837879
bigvalues = array_ref > min_value reldiffsquare = (1. - array[bigvalues] / array_ref[bigvalues]) ** 2 return numpy.sqrt(reldiffsquare.mean())
def rmsep(array_ref, array, min_value=0)
Root Mean Square Error Percentage for two arrays. :param array_ref: reference array :param array: another array :param min_value: compare only the elements larger than min_value :returns: the relative distance between the arrays >>> curve_ref = numpy.array([[0.01, 0.02, 0.03, 0.05], ... [0.01, 0.02, 0.04, 0.06]]) >>> curve = numpy.array([[0.011, 0.021, 0.031, 0.051], ... [0.012, 0.022, 0.032, 0.051]]) >>> str(round(rmsep(curve_ref, curve, .01), 5)) '0.11292'
5.5839
6.190811
0.901966
arr = numpy.copy(array) arr[arr < cutoff] = cutoff return numpy.log(arr)
def log(array, cutoff)
Compute the logarithm of an array with a cutoff on the small values
3.686871
3.028515
1.217386
dist = numpy.zeros(len(arrays)) logref = log(ref, cutoff) for rlz, array in enumerate(arrays): diff = log(array, cutoff) - logref dist[rlz] = numpy.sqrt((diff * diff).sum()) rlz = dist.argmin() closest = dict(rlz=rlz, value=arrays[rlz], dist=dist[rlz]) return closest
def closest_to_ref(arrays, ref, cutoff=1E-12)
:param arrays: a sequence of R arrays :param ref: the reference array :returns: a dictionary with keys rlz, value, and dist
3.509706
2.902738
1.209102
assert len(a1) == len(a2), (len(a1), len(a2)) if a1.dtype.names is None and len(a1.shape) == 1: # the first array is not composite, but it is one-dimensional a1 = numpy.array(a1, numpy.dtype([(firstfield, a1.dtype)])) fields1 = [(f, a1.dtype.fields[f][0]) for f in a1.dtype.names] if a2.dtype.names is None: # the second array is not composite assert len(a2.shape) == 2, a2.shape width = a2.shape[1] fields2 = [('value%d' % i, a2.dtype) for i in range(width)] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for i in range(width): composite['value%d' % i] = a2[:, i] return composite fields2 = [(f, a2.dtype.fields[f][0]) for f in a2.dtype.names] composite = numpy.zeros(a1.shape, numpy.dtype(fields1 + fields2)) for f1 in dict(fields1): composite[f1] = a1[f1] for f2 in dict(fields2): composite[f2] = a2[f2] return composite
def compose_arrays(a1, a2, firstfield='etag')
Compose composite arrays by generating an extended datatype containing all the fields. The two arrays must have the same length.
1.943182
1.899675
1.022902
assetcol = dstore['assetcol'] tagnames = sorted(assetcol.tagnames) tag = {t: getattr(assetcol.tagcol, t) for t in tagnames} dtlist = [('asset_ref', (bytes, 100))] for tagname in tagnames: dtlist.append((tagname, (bytes, 100))) dtlist.extend([('lon', F32), ('lat', F32)]) asset_data = [] for aref, a in zip(assetcol.asset_refs, assetcol.array): tup = tuple(b'"%s"' % tag[t][a[t]].encode('utf-8') for t in tagnames) asset_data.append((aref,) + tup + (a['lon'], a['lat'])) return numpy.array(asset_data, dtlist)
def get_assets(dstore)
:param dstore: a datastore with keys 'assetcol' :returns: an array of records (asset_ref, tag1, ..., tagN, lon, lat)
3.884249
3.300399
1.176903
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) # Compute SA with primed coeffs and PGA with both unprimed and # primed coeffs C = self.COEFFS_PRIMED[imt] C_PGA = self.COEFFS_PRIMED[PGA()] C_PGA_unprimed = self.COEFFS_UNPRIMED[PGA()] SC = self.STRESS_COEFFS[imt] # Get S term to determine if consider site term is applied S = self._get_site_class(sites) # Abrahamson and Silva (1997) hanging wall term. This is not used # in the latest version of GMPE but is defined in functional form in # the paper so we keep it here as a placeholder f4HW = self._compute_f4(C, rup.mag, dists.rrup) # Flags for rake angles CN, CR = self._get_fault_mechanism_flags(rup.rake) # Get volcanic path distance which Rvol=0 for current implementation # of McVerry2006Asc, but kept here as placeholder for future use rvol = self._get_volcanic_path_distance(dists.rrup) # Get delta_C and delta_D terms for site class delta_C, delta_D = self._get_deltas(sites) # Get Atkinson and Boore (2006) stress drop factors or additional # standard deviation adjustment. Only apply these factors to sources # located within the boundaries of the CSHM. in_cshm = self._check_in_cshm_polygon(rup) if in_cshm is True: stress_drop_factor = self._compute_stress_drop_adjustment(SC, rup.mag) additional_sigma = self._compute_additional_sigma() else: stress_drop_factor = 0 additional_sigma = 0 # Compute lnPGA_ABCD primed lnPGAp_ABCD = self._compute_mean(C_PGA, S, rup.mag, dists.rrup, rvol, rup.hypo_depth, CN, CR, f4HW, delta_C, delta_D) # Compute lnPGA_ABCD unprimed lnPGA_ABCD = self._compute_mean(C_PGA_unprimed, S, rup.mag, dists.rrup, rvol, rup.hypo_depth, CN, CR, f4HW, delta_C, delta_D) # Compute lnSA_ABCD lnSAp_ABCD = self._compute_mean(C, S, rup.mag, dists.rrup, rvol, rup.hypo_depth, CN, CR, f4HW, delta_C, delta_D) # Stage 3: Equation 6 SA_ABCD(T). This is lnSA_ABCD # need to calculate final lnSA_ABCD from non-log values but return log mean = np.log(np.exp(lnSAp_ABCD) * (np.exp(lnPGA_ABCD) / np.exp(lnPGAp_ABCD))) + stress_drop_factor # Compute standard deviations C_STD = self.COEFFS_STD[imt] stddevs = self._get_stddevs_chch( C_STD, rup.mag, stddev_types, sites, additional_sigma ) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.43037
4.449192
0.99577
lats = np.ravel(rup.surface.mesh.array[1]) lons = np.ravel(rup.surface.mesh.array[0]) # These coordinates are provided by M Gerstenberger (personal # communication, 10 August 2018) polygon = shapely.geometry.Polygon([(171.6, -43.3), (173.2, -43.3), (173.2, -43.9), (171.6, -43.9)]) points_in_polygon = [ shapely.geometry.Point(lons[i], lats[i]).within(polygon) for i in np.arange(len(lons))] in_cshm = any(points_in_polygon) return in_cshm
def _check_in_cshm_polygon(self, rup)
Checks if any part of the rupture surface mesh is located within the intended boundaries of the Canterbury Seismic Hazard Model in Gerstenberger et al. (2014), Seismic hazard modelling for the recovery of Christchurch, Earthquake Spectra, 30(1), 17-29.
3.29136
2.990503
1.100604
conn = WrappedConnection(conn, debug=debug) try: upgrade(conn) except Exception: conn.rollback() raise else: if dry_run: conn.rollback() else: conn.commit()
def check_script(upgrade, conn, dry_run=True, debug=True)
An utility to debug upgrade scripts written in Python :param upgrade: upgrade procedure :param conn: a DB API 2 connection :param dry_run: if True, do not change the database :param debug: if True, print the queries which are executed
2.824935
3.82553
0.738443
sql = open(fname).read() try: # we cannot use conn.executescript which is non transactional for query in sql.split('\n\n'): conn.execute(query) except Exception: logging.error('Error executing %s' % fname) raise
def apply_sql_script(conn, fname)
Apply the given SQL script to the database :param conn: a DB API 2 connection :param fname: full path to the creation script
4.248095
5.032029
0.844211
upgrader = UpgradeManager.instance(conn, pkg_name) t0 = time.time() # run the upgrade scripts try: versions_applied = upgrader.upgrade(conn, skip_versions) except: conn.rollback() raise else: conn.commit() dt = time.time() - t0 logging.info('Upgrade completed in %s seconds', dt) return versions_applied
def upgrade_db(conn, pkg_name='openquake.server.db.schema.upgrades', skip_versions=())
Upgrade a database by running several scripts in a single transaction. :param conn: a DB API 2 connection :param str pkg_name: the name of the package with the upgrade scripts :param list skip_versions: the versions to skip :returns: the version numbers of the new scripts applied the database
3.414392
3.486271
0.979382
upgrader = UpgradeManager.instance(conn, pkg_name) return max(upgrader.get_db_versions(conn))
def db_version(conn, pkg_name='openquake.server.db.schema.upgrades')
:param conn: a DB API 2 connection :param str pkg_name: the name of the package with the upgrade scripts :returns: the current version of the database
8.134212
9.657683
0.842253
msg_safe_ = '\nThe following scripts can be applied safely:\n%s' msg_slow_ = '\nPlease note that the following scripts could be slow:\n%s' msg_danger_ = ('\nPlease note that the following scripts are potentially ' 'dangerous and could destroy your data:\n%s') upgrader = UpgradeManager.instance(conn, pkg_name) applied_versions = upgrader.get_db_versions(conn) current_version = max(applied_versions) slow = [] danger = [] safe = [] for script in getattr(upgrader, extract_scripts)(): url = script['url'] if script['version'] in applied_versions: continue elif script['version'] <= current_version: # you cannot apply a script with a version number lower than the # current db version: ensure that upgrades are strictly incremental raise VersionTooSmall( 'Your database is at version %s but you want to apply %s??' % (current_version, script['fname'])) elif script['flag'] == '-slow': slow.append(url) elif script['flag'] == '-danger': danger.append(url) else: # safe script safe.append(url) if not safe and not slow and not danger: return 'Your database is already updated at version %s.' % \ current_version header = 'Your database is at version %s.' % current_version msg_safe = msg_safe_ % '\n'.join(safe) msg_slow = msg_slow_ % '\n'.join(slow) msg_danger = msg_danger_ % '\n'.join(danger) msg = header + (msg_safe if safe else '') + (msg_slow if slow else '') \ + (msg_danger if danger else '') msg += ('\nClick on the links if you want to know what exactly the ' 'scripts are doing.') if slow: msg += ('\nEven slow script can be fast if your database is small or' ' the upgrade affects tables that are empty.') if danger: msg += ('\nEven dangerous scripts are fine if they ' 'affect empty tables or data you are not interested in.') return msg
def what_if_I_upgrade(conn, pkg_name='openquake.server.db.schema.upgrades', extract_scripts='extract_upgrade_scripts')
:param conn: a DB API 2 connection :param str pkg_name: the name of the package with the upgrade scripts :param extract_scripts: name of the method to extract the scripts
4.044836
4.093392
0.988138
curs = self._conn.cursor() query = curs.mogrify(templ, args) if self.debug: print(query) curs.execute(query) return curs
def run(self, templ, *args)
A simple utility to run SQL queries. :param templ: a query or query template :param args: the arguments (or the empty tuple) :returns: the DB API 2 cursor used to run the query
3.361985
3.243192
1.036628
logging.info('Creating the versioning table %s', self.version_table) conn.executescript(CREATE_VERSIONING % self.version_table) self._insert_script(self.read_scripts()[0], conn)
def install_versioning(self, conn)
Create the version table into an already populated database and insert the base script. :param conn: a DB API 2 connection
6.425457
5.817914
1.104426
base = self.read_scripts()[0]['fname'] logging.info('Creating the initial schema from %s', base) apply_sql_script(conn, os.path.join(self.upgrade_dir, base)) self.install_versioning(conn)
def init(self, conn)
Create the version table and run the base script on an empty database. :param conn: a DB API 2 connection
10.22494
9.418513
1.085621
''' Upgrade the database from the current version to the maximum version in the upgrade scripts. :param conn: a DBAPI 2 connection :param skip_versions: the versions to skip ''' db_versions = self.get_db_versions(conn) self.starting_version = max(db_versions) to_skip = sorted(db_versions | set(skip_versions)) scripts = self.read_scripts(None, None, to_skip) if not scripts: # no new scripts to apply return [] self.ending_version = max(s['version'] for s in scripts) return self._upgrade(conn, scripts)
def upgrade(self, conn, skip_versions=())
Upgrade the database from the current version to the maximum version in the upgrade scripts. :param conn: a DBAPI 2 connection :param skip_versions: the versions to skip
4.333464
3.234548
1.339743
scripts = self.read_scripts(skip_versions=self.get_db_versions(conn)) versions = [s['version'] for s in scripts] if versions: return ('Your database is not updated. You can update it by ' 'running oq engine --upgrade-db which will process the ' 'following new versions: %s' % versions)
def check_versions(self, conn)
:param conn: a DB API 2 connection :returns: a message with the versions that will be applied or None
9.939926
8.925241
1.113687
curs = conn.cursor() query = 'select version from {}'.format(self.version_table) try: curs.execute(query) return set(version for version, in curs.fetchall()) except: raise VersioningNotInstalled('Run oq engine --upgrade-db')
def get_db_versions(self, conn)
Get all the versions stored in the database as a set. :param conn: a DB API 2 connection
7.592251
7.577163
1.001991
''' Parse a script name and return a dictionary with fields fname, name, version and ext (or None if the name does not match). :param name: name of the script ''' match = re.match(self.pattern, script_name) if not match: return version, flag, name, ext = match.groups() return dict(fname=script_name, version=version, name=name, flag=flag, ext=ext, url=self.upgrades_url + script_name)
def parse_script_name(self, script_name)
Parse a script name and return a dictionary with fields fname, name, version and ext (or None if the name does not match). :param name: name of the script
4.799796
2.776664
1.728619
scripts = [] versions = {} # a script is unique per version for scriptname in sorted(os.listdir(self.upgrade_dir)): match = self.parse_script_name(scriptname) if match: version = match['version'] if version in skip_versions: continue # do not collect scripts already applied elif minversion and version <= minversion: continue # do not collect versions too old elif maxversion and version > maxversion: continue # do not collect versions too new try: previousname = versions[version] except KeyError: # no previous script with the same version scripts.append(match) versions[version] = scriptname else: raise DuplicatedVersion( 'Duplicated versions {%s,%s}' % (scriptname, previousname)) return scripts
def read_scripts(self, minversion=None, maxversion=None, skip_versions=())
Extract the upgrade scripts from a directory as a list of dictionaries, ordered by version. :param minversion: the minimum version to consider :param maxversion: the maximum version to consider :param skipversions: the versions to skip
3.71169
3.743056
0.99162
link_pattern = '>\s*{0}\s*<'.format(self.pattern[1:-1]) page = urllib.request.urlopen(self.upgrades_url).read() for mo in re.finditer(link_pattern, page): scriptname = mo.group(0)[1:-1].strip() yield self.parse_script_name(scriptname)
def extract_upgrade_scripts(self)
Extract the OpenQuake upgrade scripts from the links in the GitHub page
4.796794
4.313412
1.112065
try: # upgrader is an UpgradeManager instance defined in the __init__.py upgrader = importlib.import_module(pkg_name).upgrader except ImportError: raise SystemExit( 'Could not import %s (not in the PYTHONPATH?)' % pkg_name) if not upgrader.read_scripts(): raise SystemExit( 'The upgrade_dir does not contain scripts matching ' 'the pattern %s' % upgrader.pattern) curs = conn.cursor() # check if there is already a versioning table curs.execute("SELECT name FROM sqlite_master " "WHERE name=%r" % upgrader.version_table) versioning_table = curs.fetchall() # if not, run the base script and create the versioning table if not versioning_table: upgrader.init(conn) conn.commit() return upgrader
def instance(cls, conn, pkg_name='openquake.server.db.schema.upgrades')
Return an :class:`UpgradeManager` instance. :param conn: a DB API 2 connection :param str pkg_name: the name of the package with the upgrade scripts
4.456738
4.395094
1.014026
if len(self.points) == 2: return self.points[0].azimuth(self.points[1]) lons = numpy.array([point.longitude for point in self.points]) lats = numpy.array([point.latitude for point in self.points]) azimuths = geodetic.azimuth(lons[:-1], lats[:-1], lons[1:], lats[1:]) distances = geodetic.geodetic_distance(lons[:-1], lats[:-1], lons[1:], lats[1:]) azimuths = numpy.radians(azimuths) # convert polar coordinates to Cartesian ones and calculate # the average coordinate of each component avg_x = numpy.mean(distances * numpy.sin(azimuths)) avg_y = numpy.mean(distances * numpy.cos(azimuths)) # find the mean azimuth from that mean vector azimuth = numpy.degrees(numpy.arctan2(avg_x, avg_y)) if azimuth < 0: azimuth += 360 return azimuth
def average_azimuth(self)
Calculate and return weighted average azimuth of all line's segments in decimal degrees. Uses formula from http://en.wikipedia.org/wiki/Mean_of_circular_quantities >>> from openquake.hazardlib.geo.point import Point as P >>> '%.1f' % Line([P(0, 0), P(1e-5, 1e-5)]).average_azimuth() '45.0' >>> '%.1f' % Line([P(0, 0), P(0, 1e-5), P(1e-5, 1e-5)]).average_azimuth() '45.0' >>> line = Line([P(0, 0), P(-2e-5, 0), P(-2e-5, 1.154e-5)]) >>> '%.1f' % line.average_azimuth() '300.0'
2.351805
2.395485
0.981766
if len(self.points) < 2: return Line(self.points) resampled_points = [] # 1. Resample the first section. 2. Loop over the remaining points # in the line and resample the remaining sections. # 3. Extend the list with the resampled points, except the first one # (because it's already contained in the previous set of # resampled points). resampled_points.extend( self.points[0].equally_spaced_points(self.points[1], section_length) ) # Skip the first point, it's already resampled for i in range(2, len(self.points)): points = resampled_points[-1].equally_spaced_points( self.points[i], section_length ) resampled_points.extend(points[1:]) return Line(resampled_points)
def resample(self, section_length)
Resample this line into sections. The first point in the resampled line corresponds to the first point in the original line. Starting from the first point in the original line, a line segment is defined as the line connecting the last point in the resampled line and the next point in the original line. The line segment is then split into sections of length equal to ``section_length``. The resampled line is obtained by concatenating all sections. The number of sections in a line segment is calculated as follows: ``round(segment_length / section_length)``. Note that the resulting line has a length that is an exact multiple of ``section_length``, therefore its length is in general smaller or greater (depending on the rounding) than the length of the original line. For a straight line, the difference between the resulting length and the original length is at maximum half of the ``section_length``. For a curved line, the difference my be larger, because of corners getting cut. :param section_length: The length of the section, in km. :type section_length: float :returns: A new line resampled into sections based on the given length. :rtype: An instance of :class:`Line`
3.33936
3.075461
1.085808
length = 0 for i, point in enumerate(self.points): if i != 0: length += point.distance(self.points[i - 1]) return length
def get_length(self)
Calculate and return the length of the line as a sum of lengths of all its segments. :returns: Total length in km.
2.958276
3.009645
0.982932
assert len(self.points) > 1, "can not resample the line of one point" section_length = self.get_length() / (num_points - 1) resampled_points = [self.points[0]] segment = 0 acc_length = 0 last_segment_length = 0 for i in range(num_points - 1): tot_length = (i + 1) * section_length while tot_length > acc_length and segment < len(self.points) - 1: last_segment_length = self.points[segment].distance( self.points[segment + 1] ) acc_length += last_segment_length segment += 1 p1, p2 = self.points[segment - 1:segment + 1] offset = tot_length - (acc_length - last_segment_length) if offset < 1e-5: # forward geodetic transformations for very small distances # are very inefficient (and also unneeded). if target point # is just 1 cm away from original (non-resampled) line vertex, # don't even bother doing geodetic calculations. resampled = p1 else: resampled = p1.equally_spaced_points(p2, offset)[1] resampled_points.append(resampled) return Line(resampled_points)
def resample_to_num_points(self, num_points)
Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested.
3.568122
3.588871
0.994218
assert (percentile >= 0.0) and (percentile <= 1.0) c_val = _scaling(tau, var_tau) k_val = _dof(tau, var_tau) return np.sqrt(c_val * chi2.ppf(percentile, df=k_val))
def _at_percentile(tau, var_tau, percentile)
Returns the value of the inverse chi-2 distribution at the given percentile from the mean and variance of the uncertainty model, as reported in equations 5.1 - 5.3 of Al Atik (2015)
3.773633
3.765177
1.002246
if imt.name == "PGV": C = params["PGV"] else: C = params["SA"] if mag > 6.5: return C["tau4"] elif (mag > 5.5) and (mag <= 6.5): return ITPL(mag, C["tau4"], C["tau3"], 5.5, 1.0) elif (mag > 5.0) and (mag <= 5.5): return ITPL(mag, C["tau3"], C["tau2"], 5.0, 0.5) elif (mag > 4.5) and (mag <= 5.0): return ITPL(mag, C["tau2"], C["tau1"], 4.5, 0.5) else: return C["tau1"]
def global_tau(imt, mag, params)
'Global' model of inter-event variability, as presented in equation 5.6 (p103)
2.185115
2.217818
0.985255
if imt.name == "PGV": C = params["PGV"] else: C = params["SA"] if mag > 6.5: return C["tau3"] elif (mag > 5.5) and (mag <= 6.5): return ITPL(mag, C["tau3"], C["tau2"], 5.5, 1.0) elif (mag > 5.0) and (mag <= 5.5): return ITPL(mag, C["tau2"], C["tau1"], 5.0, 0.5) else: return C["tau1"]
def cena_tau(imt, mag, params)
Returns the inter-event standard deviation, tau, for the CENA case
2.666757
2.659894
1.00258
tau_model = {} for imt in mean: tau_model[imt] = {} for key in mean[imt]: if quantile is None: tau_model[imt][key] = mean[imt][key] else: tau_model[imt][key] = _at_percentile(mean[imt][key], stddev[imt][key], quantile) return tau_model
def get_tau_at_quantile(mean, stddev, quantile)
Returns the value of tau at a given quantile in the form of a dictionary organised by intensity measure
2.442457
2.329742
1.048381
# Setup SA coeffs - the backward compatible Python 2.7 way coeffs = deepcopy(phi_model.sa_coeffs) coeffs.update(phi_model.non_sa_coeffs) for imt in coeffs: if quantile is None: coeffs[imt] = {"a": phi_model[imt]["mean_a"], "b": phi_model[imt]["mean_b"]} else: coeffs[imt] = { "a": _at_percentile(phi_model[imt]["mean_a"], phi_model[imt]["var_a"], quantile), "b": _at_percentile(phi_model[imt]["mean_b"], phi_model[imt]["var_b"], quantile) } return CoeffsTable(sa_damping=5., table=coeffs)
def get_phi_ss_at_quantile(phi_model, quantile)
Returns the phi_ss values at the specified quantile as an instance of `class`:: openquake.hazardlib.gsim.base.CoeffsTable - applies to the magnitude-dependent cases
3.552452
3.376824
1.05201
C = params[imt] if mag <= 5.0: phi = C["a"] elif mag > 6.5: phi = C["b"] else: phi = C["a"] + (mag - 5.0) * ((C["b"] - C["a"]) / 1.5) return phi
def get_phi_ss(imt, mag, params)
Returns the single station phi (or it's variance) for a given magnitude and intensity measure type according to equation 5.14 of Al Atik (2015)
2.964587
3.0313
0.977992
tau = self._get_tau(imt, mag) phi = self._get_phi(imt, mag) sigma = np.sqrt(tau ** 2. + phi ** 2.) stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(sigma + np.zeros(num_sites)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(phi + np.zeros(num_sites)) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(tau + np.zeros(num_sites)) return stddevs
def get_stddevs(self, mag, imt, stddev_types, num_sites)
Returns the standard deviations for either the ergodic or non-ergodic models
1.723263
1.717205
1.003528
return TAU_EXECUTION[self.tau_model](imt, mag, self.TAU)
def _get_tau(self, imt, mag)
Returns the inter-event standard deviation (tau)
12.082797
12.433733
0.971775
phi = get_phi_ss(imt, mag, self.PHI_SS) if self.ergodic: C = self.PHI_S2SS[imt] phi = np.sqrt(phi ** 2. + C["phi_s2ss"] ** 2.) return phi
def _get_phi(self, imt, mag)
Returns the within-event standard deviation (phi)
6.36195
6.355749
1.000976
stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: sigma = self._get_total_sigma(imt, mag) stddevs.append(sigma + np.zeros(num_sites)) return stddevs
def get_stddevs(self, mag, imt, stddev_types, num_sites)
Returns the total standard deviation
2.301807
2.332937
0.986656
# Mean mean is found in self.TAU. Get the variance in tau tau_std = TAU_SETUP[self.tau_model]["STD"] # Mean phiss is found in self.PHI_SS. Get the variance in phi phi_std = deepcopy(self.PHI_SS.sa_coeffs) phi_std.update(self.PHI_SS.non_sa_coeffs) for key in phi_std: phi_std[key] = {"a": PHI_SETUP[self.phi_model][key]["var_a"], "b": PHI_SETUP[self.phi_model][key]["var_b"]} if self.ergodic: # IMT list should be taken from the PHI_S2SS_MODEL imt_list = list( PHI_S2SS_MODEL[self.phi_s2ss_model].non_sa_coeffs.keys()) imt_list += \ list(PHI_S2SS_MODEL[self.phi_s2ss_model].sa_coeffs.keys()) else: imt_list = phi_std.keys() phi_std = CoeffsTable(sa_damping=5, table=phi_std) tau_bar, tau_std = self._get_tau_vector(self.TAU, tau_std, imt_list) phi_bar, phi_std = self._get_phi_vector(self.PHI_SS, phi_std, imt_list) sigma = {} # Calculate the total standard deviation for imt in imt_list: sigma[imt] = {} for i, key in enumerate(self.tau_keys): # Calculates the expected standard deviation sigma_bar = np.sqrt(tau_bar[imt][i] ** 2. + phi_bar[imt][i] ** 2.) # Calculated the variance in the standard deviation sigma_std = np.sqrt(tau_std[imt][i] ** 2. + phi_std[imt][i] ** 2.) # The keys swap from tau to sigma new_key = key.replace("tau", "sigma") if sigma_quantile is not None: sigma[imt][new_key] =\ _at_percentile(sigma_bar, sigma_std, sigma_quantile) else: sigma[imt][new_key] = sigma_bar self.tau_keys[i] = new_key self.SIGMA = CoeffsTable(sa_damping=5, table=sigma)
def _get_sigma_at_quantile(self, sigma_quantile)
Calculates the total standard deviation at the specified quantile
3.208453
3.220248
0.996338
self.magnitude_limits = MAG_LIMS_KEYS[self.tau_model]["mag"] self.tau_keys = MAG_LIMS_KEYS[self.tau_model]["keys"] t_bar = {} t_std = {} for imt in imt_list: t_bar[imt] = [] t_std[imt] = [] for mag, key in zip(self.magnitude_limits, self.tau_keys): t_bar[imt].append( TAU_EXECUTION[self.tau_model](imt, mag, tau_mean)) t_std[imt].append( TAU_EXECUTION[self.tau_model](imt, mag, tau_std)) return t_bar, t_std
def _get_tau_vector(self, tau_mean, tau_std, imt_list)
Gets the vector of mean and variance of tau values corresponding to the specific model and returns them as dictionaries
2.75873
2.709749
1.018076
p_bar = {} p_std = {} for imt in imt_list: p_bar[imt] = [] p_std[imt] = [] for mag in self.magnitude_limits: phi_ss_mean = get_phi_ss(imt, mag, phi_mean) phi_ss_std = get_phi_ss(imt, mag, phi_std) if self.ergodic: # Add on the phi_s2ss term according to Eqs. 5.15 and 5.16 # of Al Atik (2015) phi_ss_mean = np.sqrt( phi_ss_mean ** 2. + PHI_S2SS_MODEL[self.phi_s2ss_model][imt]["mean"] ** 2. ) phi_ss_std = np.sqrt( phi_ss_std ** 2. + PHI_S2SS_MODEL[self.phi_s2ss_model][imt]["var"] ** 2. ) p_bar[imt].append(phi_ss_mean) p_std[imt].append(phi_ss_std) return p_bar, p_std
def _get_phi_vector(self, phi_mean, phi_std, imt_list)
Gets the vector of mean and variance of phi values corresponding to the specific model and returns them as dictionaries
2.705737
2.662756
1.016142
C = self.SIGMA[imt] if mag <= self.magnitude_limits[0]: # The CENA constant model is always returned here return C[self.tau_keys[0]] elif mag > self.magnitude_limits[-1]: return C[self.tau_keys[-1]] else: # Needs interpolation for i in range(len(self.tau_keys) - 1): l_m = self.magnitude_limits[i] u_m = self.magnitude_limits[i + 1] if mag > l_m and mag <= u_m: return ITPL(mag, C[self.tau_keys[i + 1]], C[self.tau_keys[i]], l_m, u_m - l_m)
def _get_total_sigma(self, imt, mag)
Returns the estimated total standard deviation for a given intensity measure type and magnitude
3.446367
3.456414
0.997093
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) F, HW = self._get_fault_type_hanging_wall(rup.rake) S = self._get_site_class(sites.vs30) # compute pga on rock (used then to compute site amplification factor) C = self.COEFFS[PGA()] pga_rock = np.exp( self._compute_mean_on_rock(C, rup.mag, dists.rrup, F, HW) ) # compute mean for the given imt (do not repeat the calculation if # imt is PGA, just add the site amplification term) if imt == PGA(): mean = np.log(pga_rock) + S * self._compute_f5(C, pga_rock) else: C = self.COEFFS[imt] mean = ( self._compute_mean_on_rock(C, rup.mag, dists.rrup, F, HW) + S * self._compute_f5(C, pga_rock) ) C_STD = self.COEFFS_STD[imt] stddevs = self._get_stddevs( C_STD, rup.mag, stddev_types, sites.vs30.size ) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.302916
3.300642
1.000689
f1 = self._compute_f1(C, mag, rrup) f3 = self._compute_f3(C, mag) f4 = self._compute_f4(C, mag, rrup) return f1 + F * f3 + HW * f4
def _compute_mean_on_rock(self, C, mag, rrup, F, HW)
Compute mean value on rock (that is eq.1, page 105 with S = 0)
2.475111
2.33572
1.059678
F, HW = 0, 0 if 45 <= rake <= 135: F, HW = 1, 1 return F, HW
def _get_fault_type_hanging_wall(self, rake)
Return fault type (F) and hanging wall (HW) flags depending on rake angle. The method assumes 'reverse' (F = 1) if 45 <= rake <= 135, 'other' (F = 0) if otherwise. Hanging-wall flag is set to 1 if 'reverse', and 0 if 'other'.
5.404798
3.352057
1.612383
r = np.sqrt(rrup ** 2 + C['c4'] ** 2) f1 = ( C['a1'] + C['a12'] * (8.5 - mag) ** C['n'] + (C['a3'] + C['a13'] * (mag - C['c1'])) * np.log(r) ) if mag <= C['c1']: f1 += C['a2'] * (mag - C['c1']) else: f1 += C['a4'] * (mag - C['c1']) return f1
def _compute_f1(self, C, mag, rrup)
Compute f1 term (eq.4, page 105)
2.81521
2.624242
1.072771
if mag <= 5.8: return C['a5'] elif 5.8 < mag < C['c1']: return ( C['a5'] + (C['a6'] - C['a5']) * (mag - 5.8) / (C['c1'] - 5.8) ) else: return C['a6']
def _compute_f3(self, C, mag)
Compute f3 term (eq.6, page 106) NOTE: In the original manuscript, for the case 5.8 < mag < c1, the term in the numerator '(mag - 5.8)' is missing, while is present in the software used for creating the verification tables
2.698677
2.200328
1.226488
fhw_m = 0 fhw_r = np.zeros_like(rrup) if mag <= 5.5: fhw_m = 0 elif 5.5 < mag < 6.5: fhw_m = mag - 5.5 else: fhw_m = 1 idx = (rrup > 4) & (rrup <= 8) fhw_r[idx] = C['a9'] * (rrup[idx] - 4.) / 4. idx = (rrup > 8) & (rrup <=18) fhw_r[idx] = C['a9'] idx = (rrup > 18) & (rrup <= 24) fhw_r[idx] = C['a9'] * (1 - (rrup[idx] - 18.) / 7.) return fhw_m * fhw_r
def _compute_f4(self, C, mag, rrup)
Compute f4 term (eq. 7, 8, and 9, page 106)
2.290489
2.249535
1.018205
return C['a10'] + C['a11'] * np.log(pga_rock + C['c5'])
def _compute_f5(self, C, pga_rock)
Compute f5 term (non-linear soil response)
6.470146
5.684942
1.13812
rlzs_assoc = dstore['csm_info'].get_rlzs_assoc() getter = getters.PmapGetter(dstore, rlzs_assoc) pmaps = getter.get_pmaps() return dict(zip(getter.rlzs, pmaps)), dstore['hcurves/mean']
def get_hcurves_and_means(dstore)
Extract hcurves from the datastore and compute their means. :returns: curves_by_rlz, mean_curves
7.823336
6.925987
1.129563
datadir = datastore.get_datadir() if what == 'all': # show all if not os.path.exists(datadir): return rows = [] for calc_id in datastore.get_calc_ids(datadir): try: ds = util.read(calc_id) oq = ds['oqparam'] cmode, descr = oq.calculation_mode, oq.description except Exception: # invalid datastore file, or missing calculation_mode # and description attributes, perhaps due to a manual kill f = os.path.join(datadir, 'calc_%s.hdf5' % calc_id) logging.warning('Unreadable datastore %s', f) continue else: rows.append((calc_id, cmode, descr.encode('utf-8'))) for row in sorted(rows, key=lambda row: row[0]): # by calc_id print('#%d %s: %s' % row) return ds = util.read(calc_id) # this part is experimental if what == 'rlzs' and 'poes' in ds: min_value = 0.01 # used in rmsep getter = getters.PmapGetter(ds) pmaps = getter.get_pmaps() weights = [rlz.weight for rlz in getter.rlzs] mean = stats.compute_pmap_stats( pmaps, [numpy.mean], weights, getter.imtls) dists = [] for rlz, pmap in zip(getter.rlzs, pmaps): dist = util.rmsep(mean.array, pmap.array, min_value) dists.append((dist, rlz)) print('Realizations in order of distance from the mean curves') for dist, rlz in sorted(dists): print('%s: rmsep=%s' % (rlz, dist)) elif view.keyfunc(what) in view: print(view(what, ds)) elif what.split('/', 1)[0] in extract: print(extract(ds, what, *extra)) elif what in ds: obj = ds[what] if hasattr(obj, 'value'): # an array print(write_csv(io.BytesIO(), obj.value).decode('utf8')) else: print(obj) else: print('%s not found' % what) ds.close()
def show(what='contents', calc_id=-1, extra=())
Show the content of a datastore (by default the last one).
5.070341
5.002836
1.013493
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) mean = np.zeros_like(sites.vs30) stddevs = [np.zeros_like(sites.vs30) for _ in stddev_types] idx_rock = sites.vs30 >= self.ROCK_VS30 idx_soil = sites.vs30 < self.ROCK_VS30 if idx_rock.any(): C = self.COEFFS_ROCK[imt] self._compute_mean(C, rup.mag, dists.rhypo, rup.hypo_depth, mean, idx_rock) self._compute_std(C, stddevs, idx_rock) if idx_soil.any(): C = self.COEFFS_SOIL[imt] self._compute_mean(C, rup.mag, dists.rhypo, rup.hypo_depth, mean, idx_soil) self._compute_std(C, stddevs, idx_soil) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
1.797541
1.80697
0.994782
mean[idx] = (C['C1'] + C['C2'] * mag + C['C3'] * np.log(rhypo[idx] + C['C4'] * np.exp(C['C5'] * mag)) + C['C6'] * hypo_depth)
def _compute_mean(self, C, mag, rhypo, hypo_depth, mean, idx)
Compute mean value according to equations 10 and 11 page 226.
2.737281
2.565454
1.066977
for stddev in stddevs: stddev[idx] += C['sigma']
def _compute_std(self, C, stddevs, idx)
Compute total standard deviation, see tables 3 and 4, pages 227 and 228.
8.750524
9.469751
0.92405
mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) idx_rock = sites.vs30 >= self.ROCK_VS30 idx_soil = sites.vs30 < self.ROCK_VS30 mean[idx_rock] += 0.275 mean[idx_soil] += 0.31 return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
2.437112
2.671508
0.912261
'''Check config file inputs and overwrite bad values with the defaults''' essential_keys = ['number_earthquakes'] for key in essential_keys: if key not in config: raise ValueError('For Kijko Nonparametric Gaussian the key %s ' 'needs to be set in the configuation' % key) if config.get('tolerance', 0.0) <= 0.0: config['tolerance'] = 0.05 if config.get('maximum_iterations', 0) < 1: config['maximum_iterations'] = 100 if config.get('number_samples', 0) < 2: config['number_samples'] = 51 return config
def check_config(config)
Check config file inputs and overwrite bad values with the defaults
4.920372
4.076246
1.207084
''' Function to return a set of exponentially spaced values between mmin and mmax :param float mmin: Minimum value :param float mmax: Maximum value :param float number_samples: Number of exponentially spaced samples :return np.ndarray: Set of 'number_samples' exponentially spaced values ''' lhs = np.exp(mmin) + np.arange(0., number_samples - 1., 1.) *\ ((np.exp(mmax) - np.exp(mmin)) / (number_samples - 1.)) magval = np.hstack([lhs, np.exp(mmax)]) return np.log(magval)
def _get_exponential_spaced_values(mmin, mmax, number_samples)
Function to return a set of exponentially spaced values between mmin and mmax :param float mmin: Minimum value :param float mmax: Maximum value :param float number_samples: Number of exponentially spaced samples :return np.ndarray: Set of 'number_samples' exponentially spaced values
3.216212
2.274605
1.413965