code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: # Parse the geometry coords = split_coords_2d(~node.LineString.posList) trace = geo.Line([geo.Point(*p) for p in coords]) except ValueError: # If the geometry cannot be created then use the LogicTreeError # to point the user to the incorrect node. Hence, if trace is # compiled successfully then len(trace) is True, otherwise it is # False trace = [] if len(trace): return raise LogicTreeError( node, self.filename, "'simpleFaultGeometry' node is not valid")
def _validate_simple_fault_geometry(self, node, _float_re)
Validates a node representation of a simple fault geometry
12.440989
12.397532
1.003505
valid_edges = [] for edge_node in node.nodes: try: coords = split_coords_3d(edge_node.LineString.posList.text) edge = geo.Line([geo.Point(*p) for p in coords]) except ValueError: # See use of validation error in simple geometry case # The node is valid if all of the edges compile correctly edge = [] if len(edge): valid_edges.append(True) else: valid_edges.append(False) if node["spacing"] and all(valid_edges): return raise LogicTreeError( node, self.filename, "'complexFaultGeometry' node is not valid")
def _validate_complex_fault_geometry(self, node, _float_re)
Validates a node representation of a complex fault geometry - this check merely verifies that the format is correct. If the geometry does not conform to the Aki & Richards convention this will not be verified here, but will raise an error when the surface is created.
8.963464
8.416619
1.064972
valid_spacing = node["spacing"] for key in ["topLeft", "topRight", "bottomLeft", "bottomRight"]: lon = getattr(node, key)["lon"] lat = getattr(node, key)["lat"] depth = getattr(node, key)["depth"] valid_lon = (lon >= -180.0) and (lon <= 180.0) valid_lat = (lat >= -90.0) and (lat <= 90.0) valid_depth = (depth >= 0.0) is_valid = valid_lon and valid_lat and valid_depth if not is_valid or not valid_spacing: raise LogicTreeError( node, self.filename, "'planarFaultGeometry' node is not valid")
def _validate_planar_fault_geometry(self, node, _float_re)
Validares a node representation of a planar fault geometry
2.616838
2.601803
1.005779
if 'applyToSources' in filters: filters['applyToSources'] = filters['applyToSources'].split() return filters
def parse_filters(self, branchset_node, uncertainty_type, filters)
See superclass' method for description and signature specification. Converts "applyToSources" filter value by just splitting it to a list.
6.349402
2.697808
2.353542
if uncertainty_type == 'sourceModel' and filters: raise LogicTreeError( branchset_node, self.filename, 'filters are not allowed on source model uncertainty') if len(filters) > 1: raise LogicTreeError( branchset_node, self.filename, "only one filter is allowed per branchset") if 'applyToTectonicRegionType' in filters: if not filters['applyToTectonicRegionType'] \ in self.tectonic_region_types: raise LogicTreeError( branchset_node, self.filename, "source models don't define sources of tectonic region " "type '%s'" % filters['applyToTectonicRegionType']) if uncertainty_type in ('abGRAbsolute', 'maxMagGRAbsolute', 'simpleFaultGeometryAbsolute', 'complexFaultGeometryAbsolute'): if not filters or not list(filters) == ['applyToSources'] \ or not len(filters['applyToSources'].split()) == 1: raise LogicTreeError( branchset_node, self.filename, "uncertainty of type '%s' must define 'applyToSources' " "with only one source id" % uncertainty_type) if uncertainty_type in ('simpleFaultDipRelative', 'simpleFaultDipAbsolute'): if not filters or (not ('applyToSources' in filters.keys()) and not ('applyToSourceType' in filters.keys())): raise LogicTreeError( branchset_node, self.filename, "uncertainty of type '%s' must define either" "'applyToSources' or 'applyToSourceType'" % uncertainty_type) if 'applyToSourceType' in filters: if not filters['applyToSourceType'] in self.source_types: raise LogicTreeError( branchset_node, self.filename, "source models don't define sources of type '%s'" % filters['applyToSourceType']) if 'applyToSources' in filters: for source_id in filters['applyToSources'].split(): for source_ids in self.source_ids.values(): if source_id not in source_ids: raise LogicTreeError( branchset_node, self.filename, "source with id '%s' is not defined in source " "models" % source_id)
def validate_filters(self, branchset_node, uncertainty_type, filters)
See superclass' method for description and signature specification. Checks that the following conditions are met: * "sourceModel" uncertainties can not have filters. * Absolute uncertainties must have only one filter -- "applyToSources", with only one source id. * All other uncertainty types can have either no or one filter. * Filter "applyToSources" must mention only source ids that exist in source models. * Filter "applyToTectonicRegionType" must mention only tectonic region types that exist in source models. * Filter "applyToSourceType" must mention only source types that exist in source models.
2.517603
2.213046
1.137619
if depth == 0: if number > 0: raise LogicTreeError( branchset_node, self.filename, 'there must be only one branch set ' 'on first branching level') elif branchset.uncertainty_type != 'sourceModel': raise LogicTreeError( branchset_node, self.filename, 'first branchset must define an uncertainty ' 'of type "sourceModel"') else: if branchset.uncertainty_type == 'sourceModel': raise LogicTreeError( branchset_node, self.filename, 'uncertainty of type "sourceModel" can be defined ' 'on first branchset only') elif branchset.uncertainty_type == 'gmpeModel': raise LogicTreeError( branchset_node, self.filename, 'uncertainty of type "gmpeModel" is not allowed ' 'in source model logic tree')
def validate_branchset(self, branchset_node, depth, number, branchset)
See superclass' method for description and signature specification. Checks that the following conditions are met: * First branching level must contain exactly one branchset, which must be of type "sourceModel". * All other branchsets must not be of type "sourceModel" or "gmpeModel".
3.064038
2.577847
1.188603
apply_to_branches = branchset_node.attrib.get('applyToBranches') if apply_to_branches: apply_to_branches = apply_to_branches.split() for branch_id in apply_to_branches: if branch_id not in self.branches: raise LogicTreeError( branchset_node, self.filename, "branch '%s' is not yet defined" % branch_id) branch = self.branches[branch_id] if branch.child_branchset is not None: raise LogicTreeError( branchset_node, self.filename, "branch '%s' already has child branchset" % branch_id) if branch not in self.open_ends: raise LogicTreeError( branchset_node, self.filename, 'applyToBranches must reference only branches ' 'from previous branching level') branch.child_branchset = branchset else: for branch in self.open_ends: branch.child_branchset = branchset
def apply_branchset(self, branchset_node, branchset)
See superclass' method for description and signature specification. Parses branchset node's attribute ``@applyToBranches`` to apply following branchests to preceding branches selectively. Branching level can have more than one branchset exactly for this: different branchsets can apply to different open ends. Checks that branchset tries to be applied only to branches on previous branching level which do not have a child branchset yet.
2.56789
2.164752
1.186228
# using regular expressions is a lot faster than using the with self._get_source_model(source_model) as sm: xml = sm.read() self.tectonic_region_types.update(TRT_REGEX.findall(xml)) self.source_ids[branch_id].extend(ID_REGEX.findall(xml)) self.source_types.update(SOURCE_TYPE_REGEX.findall(xml))
def collect_source_model_data(self, branch_id, source_model)
Parse source model file and collect information about source ids, source types and tectonic region types available in it. That information is used then for :meth:`validate_filters` and :meth:`validate_uncertainty_value`.
6.558194
4.484811
1.462312
branchset = self.root_branchset branchsets_and_uncertainties = [] branch_ids = list(branch_ids[::-1]) while branchset is not None: branch = branchset.get_branch_by_id(branch_ids.pop(-1)) if not branchset.uncertainty_type == 'sourceModel': branchsets_and_uncertainties.append((branchset, branch.value)) branchset = branch.child_branchset if not branchsets_and_uncertainties: return source_group # nothing changed sg = copy.deepcopy(source_group) sg.applied_uncertainties = [] sg.changed = numpy.zeros(len(sg.sources), int) for branchset, value in branchsets_and_uncertainties: for s, source in enumerate(sg.sources): changed = branchset.apply_uncertainty(value, source) if changed: sg.changed[s] += changed sg.applied_uncertainties.append( (branchset.uncertainty_type, value)) return sg
def apply_uncertainties(self, branch_ids, source_group)
Parse the path through the source model logic tree and return "apply uncertainties" function. :param branch_ids: List of string identifiers of branches, representing the path through source model logic tree. :param source_group: A group of sources :return: A copy of the original group with modified sources
2.90342
2.928067
0.991582
return all(abs(v - 1.) < pmf.PRECISION for v in self.dic.values())
def is_one(self)
Check that all the inner weights are 1 up to the precision
17.348572
10.720641
1.61824
ltbranch = N('logicTreeBranch', {'branchID': 'b1'}, nodes=[N('uncertaintyModel', text=str(gsim)), N('uncertaintyWeight', text='1.0')]) lt = N('logicTree', {'logicTreeID': 'lt1'}, nodes=[N('logicTreeBranchingLevel', {'branchingLevelID': 'bl1'}, nodes=[N('logicTreeBranchSet', {'applyToTectonicRegionType': '*', 'branchSetID': 'bs1', 'uncertaintyType': 'gmpeModel'}, nodes=[ltbranch])])]) return cls(repr(gsim), ['*'], ltnode=lt)
def from_(cls, gsim)
Generate a trivial GsimLogicTree from a single GSIM instance.
7.281407
6.702759
1.08633
for trt in self.values: for gsim in self.values[trt]: for attr in dir(gsim): coeffs = getattr(gsim, attr) if not isinstance(coeffs, CoeffsTable): continue for imt in imts: if imt.startswith('SA'): try: coeffs[from_string(imt)] except KeyError: raise ValueError( '%s is out of the period range defined ' 'for %s' % (imt, gsim))
def check_imts(self, imts)
Make sure the IMTs are recognized by all GSIMs in the logic tree
4.748696
4.117735
1.15323
new = object.__new__(self.__class__) vars(new).update(vars(self)) if trts != {'*'}: new.branches = [] for br in self.branches: branch = BranchTuple(br.trt, br.id, br.gsim, br.weight, br.trt in trts) new.branches.append(branch) return new
def reduce(self, trts)
Reduce the GsimLogicTree. :param trts: a subset of tectonic region types :returns: a reduced GsimLogicTree instance
5.367673
4.90932
1.093364
num = {} for trt, branches in itertools.groupby( self.branches, operator.attrgetter('trt')): num[trt] = sum(1 for br in branches if br.effective) return num
def get_num_branches(self)
Return the number of effective branches for tectonic region type, as a dictionary.
6.655065
4.457156
1.493119
# NB: the algorithm assume a symmetric logic tree for the GSIMs; # in the future we may relax such assumption num_branches = self.get_num_branches() if not sum(num_branches.values()): return 0 num = 1 for val in num_branches.values(): if val: # the branch is effective num *= val return num
def get_num_paths(self)
Return the effective number of paths in the tree.
10.999988
9.261883
1.187662
if trt == '*' or trt == b'*': # fake logictree [trt] = self.values return sorted(self.values[trt])
def get_gsims(self, trt)
:param trt: tectonic region type :returns: sorted list of available GSIMs for that trt
19.256081
20.127468
0.956707
if rake > 30.0 and rake <= 150.0: return np.power(Frss, 1 - pR) * np.power(Fnss, -pN) elif rake > -120.0 and rake <= -60.0: return np.power(Frss, - pR) * np.power(Fnss, 1 - pN) else: return np.power(Frss, - pR) * np.power(Fnss, - pN)
def _compute_faulting_style_term(Frss, pR, Fnss, pN, rake)
Compute SHARE faulting style adjustment term.
1.915452
1.941709
0.986478
mean = (C['c1'] + self._compute_term1(C, mag) + self._compute_term2(C, mag, rrup) + self._compute_term3(C, rrup)) return mean
def _compute_mean(self, C, mag, rrup)
Compute mean value according to equation 30, page 1021.
3.052552
2.857529
1.068249
stddevs = [] for _ in stddev_types: if mag < 7.16: sigma = C['c11'] + C['c12'] * mag elif mag >= 7.16: sigma = C['c13'] stddevs.append(np.zeros(num_sites) + sigma) return stddevs
def _get_stddevs(self, C, stddev_types, mag, num_sites)
Return total standard deviation as for equation 35, page 1021.
3.149384
3.012996
1.045266
c78_factor = (C['c7'] * np.exp(C['c8'] * mag)) ** 2 R = np.sqrt(rrup ** 2 + c78_factor) return C['c4'] * np.log(R) + (C['c5'] + C['c6'] * mag) * rrup
def _compute_term2(self, C, mag, rrup)
This computes the term f2 in equation 32, page 1021
3.692818
3.582748
1.030722
f3 = np.zeros_like(rrup) idx_between_70_130 = (rrup > 70) & (rrup <= 130) idx_greater_130 = rrup > 130 f3[idx_between_70_130] = ( C['c9'] * (np.log(rrup[idx_between_70_130]) - np.log(70)) ) f3[idx_greater_130] = ( C['c9'] * (np.log(rrup[idx_greater_130]) - np.log(70)) + C['c10'] * (np.log(rrup[idx_greater_130]) - np.log(130)) ) return f3
def _compute_term3(self, C, rrup)
This computes the term f3 in equation 34, page 1021 but corrected according to the erratum.
1.816602
1.738637
1.044842
# extract faulting style and rock adjustment coefficients for the # given imt C_ADJ = self.COEFFS_FS_ROCK[imt] mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) # apply faulting style and rock adjustment factor for mean and std mean = np.log(np.exp(mean) * _compute_faulting_style_term(C_ADJ['Frss'], self.CONSTS_FS['pR'], self.CONSTS_FS['Fnss'], self.CONSTS_FS['pN'], rup.rake) * C_ADJ['AFrock']) stddevs = np.array(stddevs) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
5.541949
5.597203
0.990128
mean = np.zeros_like(rrup) mean += C['c1'] + C['c2'] * mag + C['c3'] * (8.5 - mag) ** 2 idx = rrup > 70. mean[idx] += C['c7'] * (np.log(rrup[idx]) - np.log(70.)) idx = rrup > 130. mean[idx] += C['c8'] * (np.log(rrup[idx]) - np.log(130.)) R = np.sqrt( rrup ** 2 + (C['c5'] * np.exp(C['c6'] * mag)) ** 2 ) mean += C['c4'] * np.log(R) + (C['c9'] + C['c10'] * mag) * rrup return mean
def _compute_mean(self, C, mag, rrup)
Compute mean value (Equation 30 in USGS report)
2.251637
2.189852
1.028215
sw_time = np.array( [(time_cutoff / DAYS) if x > (time_cutoff / DAYS) else x for x in sw_time]) return(sw_time)
def time_window_cutoff(sw_time, time_cutoff)
Allows for cutting the declustering time window at a specific time, outside of which an event of any magnitude is no longer identified as a cluster
5.184081
5.7455
0.902285
''' If geometry is defined as a numpy array then create instance of nhlib.geo.point.Point class, otherwise if already instance of class accept class :param input_geometry: Input geometry (point) as either i) instance of nhlib.geo.point.Point class ii) numpy.ndarray [Longitude, Latitude] :param float upper_depth: Upper seismogenic depth (km) :param float lower_depth: Lower seismogenic depth (km) ''' self._check_seismogenic_depths(upper_depth, lower_depth) # Check/create the geometry class if not isinstance(input_geometry, Point): if not isinstance(input_geometry, np.ndarray): raise ValueError('Unrecognised or unsupported geometry ' 'definition') self.geometry = Point(input_geometry[0], input_geometry[1]) else: self.geometry = input_geometry
def create_geometry(self, input_geometry, upper_depth, lower_depth)
If geometry is defined as a numpy array then create instance of nhlib.geo.point.Point class, otherwise if already instance of class accept class :param input_geometry: Input geometry (point) as either i) instance of nhlib.geo.point.Point class ii) numpy.ndarray [Longitude, Latitude] :param float upper_depth: Upper seismogenic depth (km) :param float lower_depth: Lower seismogenic depth (km)
4.281444
1.721024
2.487731
''' Checks the seismic depths for physical consistency :param float upper_depth: Upper seismogenic depth (km) :param float lower_depth: Lower seismogenis depth (km) ''' # Simple check on depths if upper_depth: if upper_depth < 0.: raise ValueError('Upper seismogenic depth must be greater than' ' or equal to 0.0!') else: self.upper_depth = upper_depth else: self.upper_depth = 0.0 if lower_depth: if lower_depth < self.upper_depth: raise ValueError('Lower seismogenic depth must take a greater' ' value than upper seismogenic depth') else: self.lower_depth = lower_depth else: self.lower_depth = np.inf
def _check_seismogenic_depths(self, upper_depth, lower_depth)
Checks the seismic depths for physical consistency :param float upper_depth: Upper seismogenic depth (km) :param float lower_depth: Lower seismogenis depth (km)
2.666977
2.073328
1.286327
''' Selects the catalogue associated to the point source. Effectively a wrapper to the two functions select catalogue within a distance of the point and select catalogue within cell centred on point :param selector: Populated instance of :class: `openquake.hmtk.seismicity.selector.CatalogueSelector` :param float distance: Distance from point (km) for selection :param str selector_type: Chooses whether to select within {'circle'} or within a {'square'}. :param str distance_metric: 'epicentral' or 'hypocentral' (only for 'circle' selector type) :param float point_depth: Assumed hypocentral depth of the point (only applied to 'circle' distance type) :param float upper_depth: Upper seismogenic depth (km) (only for 'square') :param float lower_depth: Lower seismogenic depth (km) (only for 'square') ''' if selector.catalogue.get_number_events() < 1: raise ValueError('No events found in catalogue!') if 'square' in selector_type: # Calls select catalogue within cell function self.select_catalogue_within_cell(selector, distance, upper_depth=upper_eq_depth, lower_depth=lower_eq_depth) elif 'circle' in selector_type: # Calls select catalogue within distance function self.select_catalogue_within_distance(selector, distance, distance_metric, point_depth) else: raise ValueError('Unrecognised selection type for point source!')
def select_catalogue(self, selector, distance, selector_type='circle', distance_metric='epicentral', point_depth=None, upper_eq_depth=None, lower_eq_depth=None)
Selects the catalogue associated to the point source. Effectively a wrapper to the two functions select catalogue within a distance of the point and select catalogue within cell centred on point :param selector: Populated instance of :class: `openquake.hmtk.seismicity.selector.CatalogueSelector` :param float distance: Distance from point (km) for selection :param str selector_type: Chooses whether to select within {'circle'} or within a {'square'}. :param str distance_metric: 'epicentral' or 'hypocentral' (only for 'circle' selector type) :param float point_depth: Assumed hypocentral depth of the point (only applied to 'circle' distance type) :param float upper_depth: Upper seismogenic depth (km) (only for 'square') :param float lower_depth: Lower seismogenic depth (km) (only for 'square')
3.5197
1.517819
2.318919
''' Selects catalogue of earthquakes within distance from point :param selector: Populated instance of :class: `openquake.hmtk.seismicity.selector.CatalogueSelector` :param distance: Distance from point (km) for selection :param str distance_metric: Choice of point source distance metric 'epicentral' or 'hypocentral' ''' if ('hypocentral' in distance_metric) and point_depth: # If a hypocentral distance metric is chosen and a # hypocentral depth specified then update geometry self.geometry = Point(self.geometry.longitude, self.geometry.latitude, point_depth) self.catalogue = selector.circular_distance_from_point( self.geometry, distance, distance_type=distance_metric) if self.catalogue.get_number_events() < 5: # Throw a warning regarding the small number of earthquakes in # the source! warnings.warn('Source %s (%s) has fewer than 5 events' % (self.id, self.name))
def select_catalogue_within_distance( self, selector, distance, distance_metric='epicentral', point_depth=None)
Selects catalogue of earthquakes within distance from point :param selector: Populated instance of :class: `openquake.hmtk.seismicity.selector.CatalogueSelector` :param distance: Distance from point (km) for selection :param str distance_metric: Choice of point source distance metric 'epicentral' or 'hypocentral'
4.307084
2.772559
1.553469
''' Selects catalogue of earthquakes within distance from point :param selector: Populated instance of :class: `openquake.hmtk.seismicity.selector.CatalogueSelector` :param distance: Distance from point (km) for selection ''' self.catalogue = selector.cartesian_square_centred_on_point( self.geometry, distance) if self.catalogue.get_number_events() < 5: # Throw a warning regarding the small number of earthquakes in # the source! warnings.warn('Source %s (%s) has fewer than 5 events' % (self.id, self.name))
def select_catalogue_within_cell(self, selector, distance, upper_depth=None, lower_depth=None)
Selects catalogue of earthquakes within distance from point :param selector: Populated instance of :class: `openquake.hmtk.seismicity.selector.CatalogueSelector` :param distance: Distance from point (km) for selection
5.628897
3.397711
1.656673
# pylint: disable=too-many-arguments # obtain coefficients for required intensity measure type (IMT) coeffs = self.COEFFS_BASE[imt].copy() coeffs.update(self.COEFFS_SITE[imt]) # obtain IMT-independent coefficients coeffs.update(self.CONSTS) # compute bedrock motion, equation (5) log_mean = self._compute_mag_dist_terms(rup, dists, coeffs) # make site corrections, equation (9) log_mean += self._compute_site_amplification(sites, coeffs) # retrieve standard deviations log_stddevs = self._get_stddevs(coeffs, sites.vs30.size, stddev_types) # convert from common to natural logarithm ln_mean = log_mean*LOG10 ln_stddevs = np.array(log_stddevs)*LOG10 # convert accelerations from cm/s^2 to g if not imt.name == "PGV": ln_mean -= np.log(100*g) return ln_mean, ln_stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the following equations: Equation (5) on p. 881 predicts ground motion for shallow events (depth <= 30 km): ``log(pre) = a*M + b*X - log(X + d*10^(e*M)) + c + epsilon`` "where pre is the predicted PGA (cm/sec^2), PGV (cm/sec), or 5% damped response spectral acceleration (cm/sec^2)" (p. 883) and a, b, c and d are tabulated regression coefficients. Note that subscripts on the regression coeffients have been dropped - subscript `1` denoted "shallow" while subscript `2` denoted "deep" - so that the "deep" model of equation (6) can be implemented trivally by changing coefficients and setting d = 0. Equation (8) on p. 883 gives the model used for site amplitfication: ``G = p*log(VS30) + q`` Where p and q are tabulated regression coefficients. Equation (9) on p. 884 for the ground motion at a given site: ``log(pre_G) = log(pre) + G`` No adjustment of epsilon is made as a function of VS30. Note finally that "log represents log_10 in the present study" (p. 880).
4.26088
4.352594
0.978929
log_pre = coeffs['c'] + coeffs['a']*rup.mag + coeffs['b']*dists.rrup \ - np.log10(dists.rrup + coeffs['d']*10**(coeffs['e']*rup.mag)) return log_pre
def _compute_mag_dist_terms(cls, rup, dists, coeffs)
Compute equation (5) and implcitly equation (6): ``log(pre) = c + a*M + b*X - log(X + d*10^(e*M)) + epsilon``
4.336972
2.228683
1.94598
return coeffs['p']*np.log10(sites.vs30) + coeffs['q']
def _compute_site_amplification(cls, sites, coeffs)
Compute equation (8): ``G = p*log(VS30) + q``
9.070951
3.308569
2.741654
m_h = 6.75 b_3 = 0.0 if rup.mag <= m_h: return C["e1"] + (C['b1'] * (rup.mag - m_h)) +\ (C['b2'] * (rup.mag - m_h) ** 2) else: return C["e1"] + (b_3 * (rup.mag - m_h))
def _compute_magnitude(self, rup, C)
Compute the third term of the equation 1: e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh e1 + b3 * (M-Mh) otherwise
3.441857
2.644114
1.301705
ssa, ssb, ssc, ssd, sse = self._get_site_type_dummy_variables(sites) return (C['sA'] * ssa) + (C['sB'] * ssb) + (C['sC'] * ssc) + \ (C['sD'] * ssd) + (C['sE'] * sse)
def _get_site_amplification(self, sites, C)
Compute the fourth term of the equation 1 described on paragraph : The functional form Fs in Eq. (1) represents the site amplification and it is given by FS = sj Cj , for j = 1,...,5, where sj are the coefficients to be determined through the regression analysis, while Cj are dummy variables used to denote the five different EC8 site classes
3.039826
2.714246
1.119952
ssa = np.zeros(len(sites.vs30)) ssb = np.zeros(len(sites.vs30)) ssc = np.zeros(len(sites.vs30)) ssd = np.zeros(len(sites.vs30)) sse = np.zeros(len(sites.vs30)) # Class E Vs30 = 0 m/s. We fixed this value to define class E idx = (np.fabs(sites.vs30) < 1E-10) sse[idx] = 1.0 # Class D; Vs30 < 180 m/s. idx = (sites.vs30 >= 1E-10) & (sites.vs30 < 180.0) ssd[idx] = 1.0 # SClass C; 180 m/s <= Vs30 <= 360 m/s. idx = (sites.vs30 >= 180.0) & (sites.vs30 < 360.0) ssc[idx] = 1.0 # Class B; 360 m/s <= Vs30 <= 800 m/s. idx = (sites.vs30 >= 360.0) & (sites.vs30 < 800) ssb[idx] = 1.0 # Class A; Vs30 > 800 m/s. idx = (sites.vs30 >= 800.0) ssa[idx] = 1.0 return ssa, ssb, ssc, ssd, sse
def _get_site_type_dummy_variables(self, sites)
Get site type dummy variables, five different EC8 site classes he recording sites are classified into 5 classes, based on the shear wave velocity intervals in the uppermost 30 m, Vs30, according to the EC8 (CEN 2003): class A: Vs30 > 800 m/s class B: Vs30 = 360 − 800 m/s class C: Vs30 = 180 - 360 m/s class D: Vs30 < 180 m/s class E: 5 to 20 m of C- or D-type alluvium underlain by stiffer material with Vs30 > 800 m/s.
2.052899
1.738781
1.180654
U, SS, NS, RS = self._get_fault_type_dummy_variables(rup) return C['f1'] * NS + C['f2'] * RS + C['f3'] * SS
def _get_mechanism(self, rup, C)
Compute the fifth term of the equation 1 described on paragraph : Get fault type dummy variables, see Table 1
9.206651
5.227913
1.761057
U, SS, NS, RS = 0, 0, 0, 0 if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0: # strike-slip SS = 1 elif rup.rake > 30.0 and rup.rake < 150.0: # reverse RS = 1 else: # normal NS = 1 return U, SS, NS, RS
def _get_fault_type_dummy_variables(self, rup)
Fault type (Strike-slip, Normal, Thrust/reverse) is derived from rake angle. Rakes angles within 30 of horizontal are strike-slip, angles from 30 to 150 are reverse, and angles from -30 to -150 are normal. Note that the 'Unspecified' case is not considered, because rake is always given.
2.715527
2.131491
1.274003
mean, stddevs = super().get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) delta = self._get_delta(imt, rup.mag) return mean-delta, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
2.525749
2.813159
0.897834
import matplotlib.pyplot as plt fig = plt.figure() got = {} # (calc_id, kind) -> curves for i, ex in enumerate(extractors): hcurves = ex.get(what) for kind in hcurves.kind: got[ex.calc_id, kind] = hcurves[kind] oq = ex.oqparam n_imts = len(hcurves.imt) [site] = hcurves.site_id for j, imt in enumerate(hcurves.imt): imls = oq.imtls[imt] imt_slice = oq.imtls(imt) ax = fig.add_subplot(n_imts, 1, j + 1) ax.set_xlabel('%s, site %s, inv_time=%dy' % (imt, site, oq.investigation_time)) ax.set_ylabel('PoE') for ck, arr in got.items(): if (arr == 0).all(): logging.warning('There is a zero curve %s_%s', *ck) ax.loglog(imls, arr[0, imt_slice], '-', label='%s_%s' % ck) ax.loglog(imls, arr[0, imt_slice], '.') ax.grid(True) ax.legend() return plt
def make_figure_hcurves(extractors, what)
$ oq plot 'hcurves?kind=mean&imt=PGA&site_id=0'
4.173865
3.717149
1.122867
import matplotlib.pyplot as plt fig = plt.figure() ncalcs = len(extractors) for i, ex in enumerate(extractors): oq = ex.oqparam n_poes = len(oq.poes) sitecol = ex.get('sitecol') hmaps = ex.get(what) [imt] = hmaps.imt [kind] = hmaps.kind for j, poe in enumerate(oq.poes): ax = fig.add_subplot(n_poes, ncalcs, j * ncalcs + i + 1) ax.grid(True) ax.set_xlabel('hmap for IMT=%s, kind=%s, poe=%s\ncalculation %d, ' 'inv_time=%dy' % (imt, kind, poe, ex.calc_id, oq.investigation_time)) bmap = basemap('cyl', sitecol) bmap.scatter(sitecol['lon'], sitecol['lat'], c=hmaps[kind][:, 0, j], cmap='jet') return plt
def make_figure_hmaps(extractors, what)
$ oq plot 'hmaps?kind=mean&imt=PGA'
4.626588
4.143348
1.11663
import matplotlib.pyplot as plt fig = plt.figure() got = {} # (calc_id, kind) -> curves for i, ex in enumerate(extractors): uhs = ex.get(what) for kind in uhs.kind: got[ex.calc_id, kind] = uhs[kind] oq = ex.oqparam n_poes = len(oq.poes) periods = [imt.period for imt in oq.imt_periods()] [site] = uhs.site_id for j, poe in enumerate(oq.poes): ax = fig.add_subplot(n_poes, 1, j + 1) ax.set_xlabel('UHS on site %s, poe=%s, inv_time=%dy' % (site, poe, oq.investigation_time)) ax.set_ylabel('SA') for ck, arr in got.items(): ax.plot(periods, arr[0, :, j], '-', label='%s_%s' % ck) ax.plot(periods, arr[0, :, j], '.') ax.grid(True) ax.legend() return plt
def make_figure_uhs(extractors, what)
$ oq plot 'uhs?kind=mean&site_id=0'
4.301605
3.970696
1.083338
import matplotlib.pyplot as plt fig = plt.figure() [ex] = extractors sitecol = ex.get('sitecol') geom_by_src = vars(ex.get(what)) ax = fig.add_subplot(1, 1, 1) ax.grid(True) ax.set_xlabel('Source') bmap = basemap('cyl', sitecol) for src, geom in geom_by_src.items(): if src != 'array': bmap.plot(geom['lon'], geom['lat'], label=src) bmap.plot(sitecol['lon'], sitecol['lat'], 'x') ax.legend() return plt
def make_figure_source_geom(extractors, what)
Extract the geometry of a given sources Example: http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3
3.885669
4.137495
0.939136
if '?' not in what: raise SystemExit('Missing ? in %r' % what) prefix, rest = what.split('?', 1) assert prefix in 'source_geom hcurves hmaps uhs', prefix if prefix in 'hcurves hmaps' and 'imt=' not in rest: raise SystemExit('Missing imt= in %r' % what) elif prefix == 'uhs' and 'imt=' in rest: raise SystemExit('Invalid IMT in %r' % what) elif prefix in 'hcurves uhs' and 'site_id=' not in rest: what += '&site_id=0' if webapi: xs = [WebExtractor(calc_id)] if other_id: xs.append(WebExtractor(other_id)) else: xs = [Extractor(calc_id)] if other_id: xs.append(Extractor(other_id)) make_figure = globals()['make_figure_' + prefix] plt = make_figure(xs, what) plt.show()
def plot(what, calc_id=-1, other_id=None, webapi=False)
Generic plotter for local and remote calculations.
3.727479
3.840907
0.970469
rscale1 = rrup + C["c2"] * (10.0 ** (C["c3"] * mag)) return -np.log10(rscale1) - (C["c4"] * rrup)
def _compute_distance_scaling(self, C, rrup, mag)
Returns the distance scaling term
5.73809
5.314086
1.079789
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) C = self.COEFFS[imt] imean = (self._compute_magnitude_scaling(C, rup.mag) + self._compute_distance_scaling(C, dists.rrup, rup.mag)) # Original GMPE returns log10 acceleration in cm/s/s # Converts to natural logarithm of g mean = np.log((10.0 ** (imean - 2.0)) / g) mean = self._compute_site_scaling(sites.vs30, mean) istddevs = self._compute_stddevs( C, dists.rrup.shape, stddev_types ) # Convert from common logarithm to natural logarithm stddevs = np.log(10 ** np.array(istddevs)) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.755744
3.792288
0.990363
site_factor = np.ones(len(vs30), dtype=float) idx = vs30 <= 360. site_factor[idx] = 1.4 idx = vs30 > 760.0 site_factor[idx] = 0.6 return np.log(np.exp(mean) * site_factor)
def _compute_site_scaling(self, vs30, mean)
Scales the ground motions by increasing 40 % on NEHRP class D/E sites, and decreasing by 40 % on NEHRP class A/B sites
2.990858
2.799289
1.068435
# pylint: disable=too-many-arguments # obtain coefficients for required intensity measure type (IMT) coeffs = self.COEFFS_BEDROCK[imt].copy() # obtain IMT-independent coefficients coeffs.update(self.CONSTS) # compute bedrock motion, equation (11) ln_mean = self._compute_mean(rup, dists, coeffs) # obtain standard deviation ln_stddev = self._get_stddevs(coeffs, stddev_types) return ln_mean, [ln_stddev]
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements equation (11) on p. 484: ``ln(P) = c1 + c2*M + c3*(10 - M)^3 + c4*ln(R + c5*exp(c6*M)``
4.703792
5.035136
0.934194
ln_p = coeffs['c1'] + coeffs['c2']*rup.mag + \ coeffs['c3']*(self.CONSTS['ref_mag'] - rup.mag)**3 +\ coeffs['c4']*np.log(dists.rrup + coeffs['c5']*np.exp(coeffs['c6']*rup.mag)) return ln_p
def _compute_mean(self, rup, dists, coeffs)
Evaluate equation (11) on p. 484.
3.583677
3.393199
1.056135
for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES log_stddev = coeffs['sigma'] ln_stddev = log_stddev*np.log(10) return ln_stddev
def _get_stddevs(self, coeffs, stddev_types)
Look up values from Table 5 on p. 483 and convert to natural logarithm. Interpretation of "sigma_log(Y)" as the common logarithm is based on the order of magnitude of the values and consistent use of "log" and "ln" to denote common and natural logarithm elsewhere in the paper.
3.33748
3.097562
1.077454
# pylint: disable=too-many-arguments ln_mean, [ln_stddev] = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) # compute site corrections, equation (9) coeffs = self.COEFFS_UPPER[imt] ln_mean += np.log(coeffs['correction']) return ln_mean, [ln_stddev]
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the correction factor for the upper crust, equation (12) on p. 484: ``P' = P x Correction_factor``
3.799841
4.024681
0.944135
return self.kwargs[str(imt)].get_mean_and_stddevs( sctx, rctx, dctx, imt, stddev_types)
def get_mean_and_stddevs(self, sctx, rctx, dctx, imt, stddev_types)
Call the get mean and stddevs of the GMPE for the respective IMT
3.66067
3.221588
1.136293
# read the hazard data dstore = util.read(calc_id) agg_curve = dstore['agg_curve-rlzs'] plt = make_figure(agg_curve) plt.show()
def plot_ac(calc_id)
Aggregate loss curves plotter.
15.512239
15.022434
1.032605
exponent_term = (1.0 + C["c3"] * np.exp(mag - 5.)) ** 2. return C["c2"] * np.log(np.sqrt(rrup ** 2. + exponent_term))
def _compute_distance_term(self, C, rrup, mag)
Returns the distance scaling term
5.24218
5.112133
1.025439
stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: sigma = C["s1"] + (C["s2"] / (1.0 + ((distance / C["s3"]) ** 2.))) stddevs.append(sigma + np.zeros_like(distance)) return stddevs
def _get_stddevs(self, C, distance, stddev_types)
Returns the total standard deviation, which is a function of distance
3.275662
3.306864
0.990565
C = self.COEFFS[imt] mean = (self._compute_magnitude_term(C, rup.mag) + self._compute_distance_term(C, dists.rhypo, rup.mag)) stddevs = self._get_stddevs(C, dists.rhypo, stddev_types) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
2.869253
2.945611
0.974077
r_m = C["m1"] + C["m2"] * np.exp(mag - 5.) f_r = C["c2"] * np.log(np.sqrt(rhypo ** 2. + r_m ** 2.)) # For distances greater than 50 km an anelastic term is added idx = rhypo > 50.0 f_r[idx] += C["c4"] * np.log(rhypo[idx] / 50.) return f_r
def _compute_distance_term(self, C, rhypo, mag)
Returns the distance scaling term
3.875031
3.673271
1.054926
# see table 3, page 14 R1 = 90. R2 = 150. # see equation 19, page 32 m_ref = mag - 4 r1 = R1 + C['c8'] * m_ref r2 = R2 + C['c11'] * m_ref assert r1 > 0 assert r2 > 0 g0 = np.log10( np.sqrt(np.minimum(rrup, r1) ** 2 + (1 + C['c5'] * m_ref) ** 2) ) g1 = np.maximum(np.log10(rrup / r1), 0) g2 = np.maximum(np.log10(rrup / r2), 0) mean = (C['c0'] + C['c1'] * m_ref + C['c2'] * m_ref ** 2 + (C['c3'] + C['c4'] * m_ref) * g0 + (C['c6'] + C['c7'] * m_ref) * g1 + (C['c9'] + C['c10'] * m_ref) * g2) # convert from log10 to ln and units from cm/s2 to g mean = np.log((10 ** mean) * 1e-2 / g) return mean
def _compute_mean(self, C, mag, rrup)
Compute mean value according to equation 18, page 32.
2.740421
2.613045
1.048746
# standard deviation is converted from log10 to ln std_total = np.log(10 ** C['sigma']) stddevs = [] for _ in stddev_types: stddevs.append(np.zeros(num_sites) + std_total) return stddevs
def _get_stddevs(self, C, stddev_types, num_sites)
Return total standard deviation.
4.983822
4.487153
1.110687
with performance.Monitor('extract', measuremem=True) as mon: if webapi: obj = WebExtractor(calc_id).get(what) else: obj = Extractor(calc_id).get(what) fname = '%s_%d.hdf5' % (what.replace('/', '-').replace('?', '-'), calc_id) obj.save(fname) print('Saved', fname) if mon.duration > 1: print(mon)
def extract(what, calc_id, webapi=True)
Extract an output from the datastore and save it into an .hdf5 file. By default uses the WebAPI, otherwise the extraction is done locally.
4.480957
4.182465
1.071368
''' Constructs the "Kreemer Cell" from the input file. The Kreemer cell is simply a set of five lines describing the four nodes of the square (closed) :param list data: Strain data as list of text lines (input from linecache.getlines) :param int loc: Pointer to location in data :returns: temp_poly - 5 by 2 numpy array of cell longitudes and latitudes ''' temp_poly = np.empty([5, 2], dtype=float) for ival in range(1, 6): value = data[loc + ival].rstrip('\n') value = value.lstrip(' ') value = np.array((value.split(' ', 1))).astype(float) temp_poly[ival - 1, :] = value.flatten() return temp_poly
def _build_kreemer_cell(data, loc)
Constructs the "Kreemer Cell" from the input file. The Kreemer cell is simply a set of five lines describing the four nodes of the square (closed) :param list data: Strain data as list of text lines (input from linecache.getlines) :param int loc: Pointer to location in data :returns: temp_poly - 5 by 2 numpy array of cell longitudes and latitudes
6.13814
1.849755
3.318353
''' Gets the tectonic region type for every element inside the strain model :paramm strain_model: Input strain model as instance of openquake.hmtk.strain.geodetic_strain.GeodeticStrain :returns: Strain model with complete regionalisation ''' self.strain = strain_model self.strain.data['region'] = np.array( ['IPL' for _ in range(self.strain.get_number_observations())], dtype='|S13') self.strain.data['area'] = np.array( [np.nan for _ in range(self.strain.get_number_observations())]) regional_model = self.define_kreemer_regionalisation() for polygon in regional_model: self._point_in_tectonic_region(polygon) return self.strain
def get_regionalisation(self, strain_model)
Gets the tectonic region type for every element inside the strain model :paramm strain_model: Input strain model as instance of openquake.hmtk.strain.geodetic_strain.GeodeticStrain :returns: Strain model with complete regionalisation
5.40618
2.919844
1.85153
''' Returns the region type and area according to the tectonic region :param polygon: Dictionary containing the following attributes - 'long_lims' - Longitude limits (West, East) 'lat_lims' - Latitude limits (South, North) 'region_type' - Tectonic region type (str) 'area' - Area of cell in m ^ 2 ''' marker = np.zeros(self.strain.get_number_observations(), dtype=bool) idlong = np.logical_and( self.strain.data['longitude'] >= polygon['long_lims'][0], self.strain.data['longitude'] < polygon['long_lims'][1]) id0 = np.where(np.logical_and(idlong, np.logical_and( self.strain.data['latitude'] >= polygon['lat_lims'][0], self.strain.data['latitude'] < polygon['lat_lims'][1])))[0] if len(id0) > 0: marker[id0] = True for iloc in id0: self.strain.data['region'][iloc] = \ polygon['region_type'] self.strain.data['area'][iloc] = polygon['area'] marker = np.logical_not(marker) return marker
def _point_in_tectonic_region(self, polygon)
Returns the region type and area according to the tectonic region :param polygon: Dictionary containing the following attributes - 'long_lims' - Longitude limits (West, East) 'lat_lims' - Latitude limits (South, North) 'region_type' - Tectonic region type (str) 'area' - Area of cell in m ^ 2
3.001009
1.914648
1.567395
''' Applies the regionalisation defined according to the regionalisation typology of Corne Kreemer ''' '''Applies the regionalisation of Kreemer (2003) :param input_file: Filename (str) of input file contraining Kreemer regionalisation :param north: Northern limit (decimal degrees)for consideration (float) :param south: Southern limit (decimal degrees)for consideration (float) :param east: Eastern limit (decimal degrees)for consideration (float) :param west: Western limit (decimal degrees)for consideration (float) :returns: List of polygons corresonding to the Kreemer cells. ''' input_data = getlines(self.filename) kreemer_polygons = [] for line_loc, line in enumerate(input_data): if '>' in line[0]: polygon_dict = {} # Get region type (char) and area (m ^ 2) from header primary_data = line[2:].rstrip('\n') primary_data = primary_data.split(' ', 1) polygon_dict['region_type'] = primary_data[0].strip(' ') polygon_dict['area'] = float(primary_data[1].strip(' ')) polygon_dict['cell'] = _build_kreemer_cell(input_data, line_loc) polygon_dict['long_lims'] = np.array([ np.min(polygon_dict['cell'][:, 0]), np.max(polygon_dict['cell'][:, 0])]) polygon_dict['lat_lims'] = np.array([ np.min(polygon_dict['cell'][:, 1]), np.max(polygon_dict['cell'][:, 1])]) polygon_dict['cell'] = None if polygon_dict['long_lims'][0] >= 180.0: polygon_dict['long_lims'] = \ polygon_dict['long_lims'] - 360.0 valid_check = [ polygon_dict['long_lims'][0] >= west, polygon_dict['long_lims'][1] <= east, polygon_dict['lat_lims'][0] >= south, polygon_dict['lat_lims'][1] <= north] if all(valid_check): kreemer_polygons.append(polygon_dict) return kreemer_polygons
def define_kreemer_regionalisation(self, north=90., south=-90., east=180., west=-180.)
Applies the regionalisation defined according to the regionalisation typology of Corne Kreemer
2.829242
2.548731
1.110059
with urlopen(url) as f: data = io.BytesIO(f.read()) with zipfile.ZipFile(data) as z: try: return z.open(fname) except KeyError: # for instance the ShakeMap ci3031111 has inside a file # data/verified_atlas2.0/reviewed/19920628115739/output/ # uncertainty.xml # instead of just uncertainty.xml zinfo = z.filelist[0] if zinfo.filename.endswith(fname): return z.open(zinfo) else: raise
def urlextract(url, fname)
Download and unzip an archive and extract the underlying fname
6.822973
6.964191
0.979722
url = shakemap_url.format(shakemap_id) logging.info('Downloading %s', url) contents = json.loads(urlopen(url).read())[ 'properties']['products']['shakemap'][-1]['contents'] grid = contents.get('download/grid.xml') if grid is None: raise MissingLink('Could not find grid.xml link in %s' % url) uncertainty = contents.get('download/uncertainty.xml.zip') if uncertainty is None: with urlopen(grid['url']) as f: return get_shakemap_array(f) else: with urlopen(grid['url']) as f1, urlextract( uncertainty['url'], 'uncertainty.xml') as f2: return get_shakemap_array(f1, f2)
def download_array(shakemap_id, shakemap_url=SHAKEMAP_URL)
:param shakemap_id: USGS Shakemap ID :returns: an array with the shakemap
3.462258
3.667701
0.943986
if isinstance(array_or_id, str): # shakemap ID array = download_array(array_or_id) else: # shakemap array array = array_or_id available_imts = set(array['val'].dtype.names) missing = set(imts) - available_imts if missing: msg = ('The IMT %s is required but not in the available set %s, ' 'please change the riskmodel otherwise you will have ' 'incorrect zero losses for the associated taxonomies' % (missing.pop(), ', '.join(available_imts))) if discard_assets: logging.error(msg) else: raise RuntimeError(msg) # build a copy of the ShakeMap with only the relevant IMTs dt = [(imt, F32) for imt in sorted(available_imts)] dtlist = [('lon', F32), ('lat', F32), ('vs30', F32), ('val', dt), ('std', dt)] data = numpy.zeros(len(array), dtlist) for name in ('lon', 'lat', 'vs30'): data[name] = array[name] for name in ('val', 'std'): for im in available_imts: data[name][im] = array[name][im] if sitecol is None: # extract the sites from the shakemap return site.SiteCollection.from_shakemap(data), data # associate the shakemap to the (filtered) site collection bbox = (data['lon'].min(), data['lat'].min(), data['lon'].max(), data['lat'].max()) indices = sitecol.within_bbox(bbox) if len(indices) == 0: raise RuntimeError('There are no sites within the boundind box %s' % str(bbox)) sites = sitecol.filtered(indices) logging.info('Associating %d GMVs to %d sites', len(data), len(sites)) return geo.utils.assoc(data, sites, assoc_dist, 'warn')
def get_sitecol_shakemap(array_or_id, imts, sitecol=None, assoc_dist=None, discard_assets=False)
:param array_or_id: shakemap array or shakemap ID :param imts: required IMTs as a list of strings :param sitecol: SiteCollection used to reduce the shakemap :param assoc_dist: association distance :param discard_assets: set to zero the risk on assets with missing IMTs :returns: a pair (filtered site collection, filtered shakemap)
3.720454
3.514415
1.058627
assert correl in 'yes no full', correl n = len(dmatrix) corr = numpy.zeros((len(imts), n, n)) for imti, im in enumerate(imts): if correl == 'no': corr[imti] = numpy.eye(n) if correl == 'full': corr[imti] = numpy.ones((n, n)) elif correl == 'yes': corr[imti] = correlation.jbcorrelation(dmatrix, im, vs30clustered) return corr
def spatial_correlation_array(dmatrix, imts, correl='yes', vs30clustered=True)
:param dmatrix: distance matrix of shape (N, N) :param imts: M intensity measure types :param correl: 'yes', 'no' or 'full' :param vs30clustered: flag, True by default :returns: array of shape (M, N, N)
2.741781
2.616747
1.047782
# this depends on sPGA, sSa03, sSa10, sSa30 M, N = corrmatrices.shape[:2] matrices = [] for i, std in enumerate(stddev): covmatrix = numpy.zeros((N, N)) for j in range(N): for k in range(N): covmatrix[j, k] = corrmatrices[i, j, k] * std[j] * std[k] matrices.append(covmatrix) return numpy.array(matrices)
def spatial_covariance_array(stddev, corrmatrices)
:param stddev: array of shape (M, N) :param corrmatrices: array of shape (M, N, N) :returns: an array of shape (M, N, N)
3.541559
3.441045
1.02921
assert corr in 'yes no full', corr # if there is only PGA this is a 1x1 identity matrix M = len(imts) cross_matrix = numpy.zeros((M, M)) for i, im in enumerate(imts): T1 = im.period or 0.05 for j in range(M): T2 = imts[j].period or 0.05 if i == j: cross_matrix[i, j] = 1 else: Tmax = max([T1, T2]) Tmin = min([T1, T2]) II = 1 if Tmin < 0.189 else 0 if corr == 'full': cross_matrix[i, j] = 0.99999 elif corr == 'yes': cross_matrix[i, j] = 1 - math.cos(math.pi / 2 - ( 0.359 + 0.163 * II * math.log(Tmin / 0.189) ) * math.log(Tmax / Tmin)) return cross_matrix
def cross_correlation_matrix(imts, corr='yes')
:param imts: M intensity measure types :param corr: 'yes', 'no' or 'full' :returns: an array of shape (M, M)
3.564965
3.456315
1.031435
n = len(vs30s) out = [amplify_ground_shaking(im.period, vs30s[i], gmfs[m * n + i]) for m, im in enumerate(imts) for i in range(n)] return numpy.array(out)
def amplify_gmfs(imts, vs30s, gmfs)
Amplify the ground shaking depending on the vs30s
4.8661
4.132029
1.177654
gmvs[gmvs > MAX_GMV] = MAX_GMV # accelerations > 5g are absurd interpolator = interpolate.interp1d( [0, 0.1, 0.2, 0.3, 0.4, 5], [(760 / vs30)**0.35, (760 / vs30)**0.35, (760 / vs30)**0.25, (760 / vs30)**0.10, (760 / vs30)**-0.05, (760 / vs30)**-0.05], ) if T <= 0.3 else interpolate.interp1d( [0, 0.1, 0.2, 0.3, 0.4, 5], [(760 / vs30)**0.65, (760 / vs30)**0.65, (760 / vs30)**0.60, (760 / vs30)**0.53, (760 / vs30)**0.45, (760 / vs30)**0.45], ) return interpolator(gmvs) * gmvs
def amplify_ground_shaking(T, vs30, gmvs)
:param T: period :param vs30: velocity :param gmvs: ground motion values for the current site in units of g
2.157828
2.186974
0.986673
M, N = spatial_cov.shape[:2] L = numpy.array([numpy.linalg.cholesky(spatial_cov[i]) for i in range(M)]) LLT = [] for i in range(M): row = [numpy.dot(L[i], L[j].T) * cross_corr[i, j] for j in range(M)] for j in range(N): singlerow = numpy.zeros(M * N) for i in range(M): singlerow[i * N:(i + 1) * N] = row[i][j] LLT.append(singlerow) return numpy.linalg.cholesky(numpy.array(LLT))
def cholesky(spatial_cov, cross_corr)
Decompose the spatial covariance and cross correlation matrices. :param spatial_cov: array of shape (M, N, N) :param cross_corr: array of shape (M, M) :returns: a triangular matrix of shape (M * N, M * N)
2.453516
2.453541
0.99999
N = len(shakemap) # number of sites std = shakemap['std'] if imts is None or len(imts) == 0: imts = std.dtype.names else: imts = [imt for imt in imts if imt in std.dtype.names] val = {imt: numpy.log(shakemap['val'][imt]) - std[imt] ** 2 / 2. for imt in imts} imts_ = [imt.from_string(name) for name in imts] M = len(imts_) cross_corr = cross_correlation_matrix(imts_, crosscorr) mu = numpy.array([numpy.ones(num_gmfs) * val[str(imt)][j] for imt in imts_ for j in range(N)]) dmatrix = geo.geodetic.distance_matrix( shakemap['lon'], shakemap['lat']) spatial_corr = spatial_correlation_array(dmatrix, imts_, spatialcorr) stddev = [std[str(imt)] for imt in imts_] for im, std in zip(imts_, stddev): if std.sum() == 0: raise ValueError('Cannot decompose the spatial covariance ' 'because stddev==0 for IMT=%s' % im) spatial_cov = spatial_covariance_array(stddev, spatial_corr) L = cholesky(spatial_cov, cross_corr) # shape (M * N, M * N) if trunclevel: Z = truncnorm.rvs(-trunclevel, trunclevel, loc=0, scale=1, size=(M * N, num_gmfs), random_state=seed) else: Z = norm.rvs(loc=0, scale=1, size=(M * N, num_gmfs), random_state=seed) # Z has shape (M * N, E) gmfs = numpy.exp(numpy.dot(L, Z) + mu) / PCTG if site_effects: gmfs = amplify_gmfs(imts_, shakemap['vs30'], gmfs) if gmfs.max() > MAX_GMV: logging.warning('There suspiciously large GMVs of %.2fg', gmfs.max()) return imts, gmfs.reshape((M, N, num_gmfs)).transpose(1, 2, 0)
def to_gmfs(shakemap, spatialcorr, crosscorr, site_effects, trunclevel, num_gmfs, seed, imts=None)
:returns: (IMT-strings, array of GMFs of shape (R, N, E, M)
3.572724
3.467355
1.030389
header = _build_header(dtype, ()) h = [] for col in header: name = '~'.join(col[:-2]) numpytype = col[-2] shape = col[-1] coldescr = name if numpytype != 'float32' and not numpytype.startswith('|S'): coldescr += ':' + numpytype if shape: coldescr += ':' + ':'.join(map(str, shape)) h.append(coldescr) return h
def build_header(dtype)
Convert a numpy nested dtype into a list of strings suitable as header of csv file. >>> imt_dt = numpy.dtype([('PGA', numpy.float32, 3), ... ('PGV', numpy.float32, 4)]) >>> build_header(imt_dt) ['PGA:3', 'PGV:4'] >>> gmf_dt = numpy.dtype([('A', imt_dt), ('B', imt_dt), ... ('idx', numpy.uint32)]) >>> build_header(gmf_dt) ['A~PGA:3', 'A~PGV:4', 'B~PGA:3', 'B~PGV:4', 'idx:uint32']
4.425084
4.628613
0.956028
close = True if dest is None: # write on a temporary file fd, dest = tempfile.mkstemp(suffix='.csv') os.close(fd) if hasattr(dest, 'write'): # file-like object in append mode # it must be closed by client code close = False elif not hasattr(dest, 'getvalue'): # not a BytesIO, assume dest is a filename dest = open(dest, 'wb') try: # see if data is a composite numpy array data.dtype.fields except AttributeError: # not a composite array autoheader = [] else: autoheader = build_header(data.dtype) if comment: dest.write(encode('# %s\n' % comment)) someheader = header or autoheader if header != 'no-header' and someheader: dest.write(encode(sep.join(htranslator.write(someheader)) + u'\n')) if autoheader: all_fields = [col.split(':', 1)[0].split('~') for col in autoheader] for record in data: row = [] for fields in all_fields: val = extract_from(record, fields) if fields[0] in ('lon', 'lat', 'depth'): row.append('%.5f' % val) else: row.append(scientificformat(val, fmt)) dest.write(encode(sep.join(row) + u'\n')) else: for row in data: dest.write(encode(sep.join(scientificformat(col, fmt) for col in row) + u'\n')) if hasattr(dest, 'getvalue'): return dest.getvalue()[:-1] # a newline is strangely added elif close: dest.close() return dest.name
def write_csv(dest, data, sep=',', fmt='%.6E', header=None, comment=None)
:param dest: None, file, filename or io.BytesIO instance :param data: array to save :param sep: separator to use (default comma) :param fmt: formatting string (default '%12.8E') :param header: optional list with the names of the columns to display :param comment: optional first line starting with a # character
4.07859
4.11236
0.991788
triples = [] fields = [] for col_str in header: col = col_str.strip().split(':') n = len(col) if n == 1: # default dtype and no shape col = [col[0], 'float32', ''] elif n == 2: if castable_to_int(col[1]): # default dtype and shape col = [col[0], 'float32', col[1]] else: # dtype and no shape col = [col[0], col[1], ''] elif n > 3: raise ValueError('Invalid column description: %s' % col_str) field = col[0] numpytype = col[1] shape = () if not col[2].strip() else (int(col[2]),) triples.append((field, numpytype, shape)) fields.append(field) return fields, numpy.dtype(triples)
def parse_header(header)
Convert a list of the form `['fieldname:fieldtype:fieldsize',...]` into a numpy composite dtype. The parser understands headers generated by :func:`openquake.commonlib.writers.build_header`. Here is an example: >>> parse_header(['PGA:float32', 'PGV', 'avg:float32:2']) (['PGA', 'PGV', 'avg'], dtype([('PGA', '<f4'), ('PGV', '<f4'), ('avg', '<f4', (2,))])) :params header: a list of type descriptions :returns: column names and the corresponding composite dtype
2.912807
2.804239
1.038716
names, vals = [], [] pieces = comment.split('=') for i, piece in enumerate(pieces): if i == 0: # first line names.append(piece.strip()) elif i == len(pieces) - 1: # last line vals.append(ast.literal_eval(piece)) else: val, name = piece.rsplit(',', 1) vals.append(ast.literal_eval(val)) names.append(name.strip()) return list(zip(names, vals))
def parse_comment(comment)
Parse a comment of the form # investigation_time=50.0, imt="PGA", ... and returns it as pairs of strings: >>> parse_comment('''path=('b1',), time=50.0, imt="PGA"''') [('path', ('b1',)), ('time', 50.0), ('imt', 'PGA')]
2.433824
2.465055
0.98733
r with open(fname) as f: header = next(f) if header.startswith('#'): # the first line is a comment, skip it attrs = dict(parse_comment(header[1:])) header = next(f) else: attrs = {} transheader = htranslator.read(header.split(sep)) fields, dtype = parse_header(transheader) ts_pairs = [] # [(type, shape), ...] for name in fields: dt = dtype.fields[name][0] ts_pairs.append((dt.subdtype[0].type if dt.subdtype else dt.type, dt.shape)) col_ids = list(range(1, len(ts_pairs) + 1)) num_columns = len(col_ids) records = [] col, col_id = '', 0 for i, line in enumerate(f, 2): row = line.split(sep) if len(row) != num_columns: raise InvalidFile( 'expected %d columns, found %d in file %s, line %d' % (num_columns, len(row), fname, i)) try: record = [] for (ntype, shape), col, col_id in zip(ts_pairs, row, col_ids): record.append(_cast(col, ntype, shape, i, fname)) records.append(tuple(record)) except Exception as e: raise InvalidFile( 'Could not cast %r in file %s, line %d, column %d ' 'using %s: %s' % (col, fname, i, col_id, (ntype.__name__,) + shape, e)) return ArrayWrapper(numpy.array(records, dtype), attrs)
def read_composite_array(fname, sep=',')
r""" Convert a CSV file with header into an ArrayWrapper object. >>> from openquake.baselib.general import gettemp >>> fname = gettemp('PGA:3,PGV:2,avg:1\n' ... '.1 .2 .3,.4 .5,.6\n') >>> print(read_composite_array(fname).array) # array of shape (1,) [([0.1, 0.2, 0.3], [0.4, 0.5], [0.6])]
3.348209
3.309872
1.011583
r with open(fname) as f: records = [] for line in f: row = line.split(sep) record = [list(map(float, col.split())) for col in row] records.append(record) return numpy.array(records)
def read_array(fname, sep=',')
r""" Convert a CSV file without header into a numpy array of floats. >>> from openquake.baselib.general import gettemp >>> print(read_array(gettemp('.1 .2, .3 .4, .5 .6\n'))) [[[0.1 0.2] [0.3 0.4] [0.5 0.6]]]
3.232714
3.385814
0.954782
descrs = [] for name in names: mo = re.match(self.short_regex, name) if mo: idx = mo.lastindex # matching group index, starting from 1 suffix = self.suffix[idx - 1].replace(r':\|', ':|') descrs.append(mo.group(mo.lastindex) + suffix + name[mo.end():]) else: descrs.append(name) return descrs
def read(self, names)
Convert names into descriptions
4.929541
4.634836
1.063585
# example: '(poe-[\d\.]+):float32' -> 'poe-[\d\.]+' names = [] for descr in descrs: mo = re.match(self.long_regex, descr) if mo: names.append(mo.group(mo.lastindex) + descr[mo.end():]) else: names.append(descr) return names
def write(self, descrs)
Convert descriptions into names
5.181825
4.683548
1.106389
write_csv(fname, data, self.sep, self.fmt, header) self.fnames.add(getattr(fname, 'name', fname))
def save(self, data, fname, header=None)
Save data on fname. :param data: numpy array or list of lists :param fname: path name :param header: header to use
6.193901
11.17783
0.554124
write_csv(dest, data, self.sep, self.fmt, 'no-header')
def save_block(self, data, dest)
Save data on dest, which is file open in 'a' mode
16.125755
14.156363
1.139117
# pylint: disable=too-many-arguments # obtain coefficients for required intensity measure type coeffs = self.COEFFS_BEDROCK[imt].copy() # obtain site-class specific coefficients a_1, a_2, sigma_site = self._get_site_coeffs(sites, imt) coeffs.update({'a1': a_1, 'a2': a_2, 'sigma_site': sigma_site}) # compute bedrock motion, equation (8) ln_mean = (self._compute_magnitude_terms(rup, coeffs) + self._compute_distance_terms(dists, coeffs)) # adjust for site class, equation (10) ln_mean += self._compute_site_amplification(ln_mean, coeffs) # No need to convert to g since "In [equation (8)], y_br = (SA/g)" ln_stddevs = self._get_stddevs(coeffs, stddev_types) return ln_mean, [ln_stddevs]
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for specification of input and result values. Implements the following equations: Equation (8) on p. 203 for the bedrock ground motion: ``ln(y_br) = c1 + c2*(M - 6) + c3*(M - 6)**2 - lnR - c4*R + ln(ε_br)`` Equation (9) on p. 207 gives the site amplification factor: ``ln(F_s) = a1*y_br + a2 + ln(δ_site)`` Equation (10) on p. 207 for the ground motion at a given site: ``y_site = y_br*F_s`` Equation (11) on p. 207 for total standard error at a given site: ``σ{ln(ε_site)} = sqrt(σ{ln(ε_br)}**2 + σ{ln(δ_site)}**2)``
5.281665
4.937303
1.069747
adj_mag = rup.mag - self.CONSTS['ref_mag'] return coeffs['c1'] + coeffs['c2']*adj_mag + coeffs['c3']*adj_mag**2
def _compute_magnitude_terms(self, rup, coeffs)
First three terms of equation (8) on p. 203: ``c1 + c2*(M - 6) + c3*(M - 6)**2``
3.518551
3.152007
1.116289
return - np.log(dists.rhypo) - coeffs['c4']*dists.rhypo
def _compute_distance_terms(cls, dists, coeffs)
Fourth and fifth terms of equation (8) on p. 203: ``- ln(R) - c4*R``
15.149701
6.044175
2.506496
for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES return np.sqrt(coeffs['sigma_bedrock']**2 + coeffs['sigma_site']**2)
def _get_stddevs(self, coeffs, stddev_types)
Equation (11) on p. 207 for total standard error at a given site: ``σ{ln(ε_site)} = sqrt(σ{ln(ε_br)}**2 + σ{ln(δ_site)}**2)``
4.293042
4.039305
1.062817
site_classes = self.get_nehrp_classes(sites) is_bedrock = self.is_bedrock(sites) if 'E' in site_classes: msg = ('Site class E and F not supported by %s' % type(self).__name__) warnings.warn(msg, UserWarning) a_1 = np.nan*np.ones_like(sites.vs30) a_2 = np.nan*np.ones_like(sites.vs30) sigma = np.nan*np.ones_like(sites.vs30) for key in self.COEFFS_NEHRP.keys(): indices = (site_classes == key) & ~is_bedrock a_1[indices] = self.COEFFS_NEHRP[key][imt]['a1'] a_2[indices] = self.COEFFS_NEHRP[key][imt]['a2'] sigma[indices] = self.COEFFS_NEHRP[key][imt]['sigma'] a_1[is_bedrock] = 0. a_2[is_bedrock] = 0. sigma[is_bedrock] = 0. return (a_1, a_2, sigma)
def _get_site_coeffs(self, sites, imt)
Extracts correct coefficients for each site from Table 5 on p. 208 for each site. :raises UserWarning: If vs30 is below limit for site class D, since "E- and F-type sites [...] are susceptible for liquefaction and failure." p. 205.
2.297054
2.188738
1.049488
classes = sorted(self.NEHRP_VS30_UPPER_BOUNDS.keys()) bounds = [self.NEHRP_VS30_UPPER_BOUNDS[item] for item in classes] bounds = np.reshape(np.array(bounds), (-1, 1)) vs30s = np.reshape(sites.vs30, (1, -1)) site_classes = np.choose((vs30s < bounds).sum(axis=0) - 1, classes) return site_classes.astype('object')
def get_nehrp_classes(self, sites)
Site classification threshholds from Section 4 "Site correction coefficients" p. 205. Note that site classes E and F are not supported.
3.507518
3.594345
0.975843
if settings.LOCKDOWN and hasattr(request, 'user'): if request.user.is_authenticated: user = request.user.username else: # This may happen with crafted requests user = '' else: user = getattr(settings, 'DEFAULT_USER', getpass.getuser()) return user
def get_user(request)
Returns the users from `request` if authentication is enabled, otherwise returns the default user (from settings, or as reported by the OS).
4.492265
4.387955
1.023772
users = [get_user(request)] if settings.LOCKDOWN and hasattr(request, 'user'): if request.user.is_authenticated: groups = request.user.groups.all() if groups: users = list(User.objects.filter(groups__in=groups) .values_list('username', flat=True)) else: # This may happen with crafted requests users = [] return users
def get_valid_users(request)
Returns a list of `users` based on groups membership. Returns a list made of a single user when it is not member of any group.
3.964322
3.764217
1.05316
acl_on = settings.ACL_ON if settings.LOCKDOWN and hasattr(request, 'user'): # ACL is always disabled for superusers if request.user.is_superuser: acl_on = False return acl_on
def get_acl_on(request)
Returns `True` if ACL should be honorated, returns otherwise `False`.
4.216167
4.078534
1.033746
context = {} context['oq_engine_server_url'] = ('//' + request.META.get('HTTP_HOST', 'localhost:8800')) # this context var is also evaluated by the STANDALONE_APPS to identify # the running environment. Keep it as it is context['oq_engine_version'] = oqversion context['server_name'] = settings.SERVER_NAME return context
def oq_server_context_processor(request)
A custom context processor which allows injection of additional context variables.
7.575546
7.746998
0.977869
retry = 0 response = '' success = False while response != requests.codes.ok and retry < max_retries: try: response = requests.head(url, allow_redirects=True).status_code success = True except: sleep(1) retry += 1 if not success: logging.warning('Unable to connect to %s within %s retries' % (url, max_retries)) return success
def check_webserver_running(url="http://localhost:8800", max_retries=30)
Returns True if a given URL is responding within a given timeout.
3.23475
3.047245
1.061533
name = ekey[0] + '.csv' try: array = dstore[ekey[0]].value except AttributeError: # this happens if the key correspond to a HDF5 group return [] # write a custom exporter in this case if len(array.shape) == 1: # vector array = array.reshape((len(array), 1)) return [write_csv(dstore.export_path(name), array)]
def export_csv(ekey, dstore)
Default csv exporter for arrays stored in the output.hdf5 file :param ekey: export key :param dstore: datastore object :returns: a list with the path of the exported file
6.613894
5.80924
1.138513
dest = dstore.export_path('input.zip') nbytes = dstore.get_attr('input/zip', 'nbytes') zbytes = dstore['input/zip'].value # when reading input_zip some terminating null bytes are truncated (for # unknown reasons) therefore they must be restored zbytes += b'\x00' * (nbytes - len(zbytes)) open(dest, 'wb').write(zbytes) return [dest]
def export_input_zip(ekey, dstore)
Export the data in the `input_zip` dataset as a .zip file
7.460839
7.8751
0.947396
result = dict(loss_curves=[], stat_curves=[]) weights = [w['default'] for w in param['weights']] statnames, stats = zip(*param['stats']) for ri in riskinputs: A = len(ri.assets) L = len(riskmodel.lti) R = ri.hazard_getter.num_rlzs loss_curves = numpy.zeros((R, L, A), object) avg_losses = numpy.zeros((R, L, A)) for out in riskmodel.gen_outputs(ri, monitor): r = out.rlzi for l, loss_type in enumerate(riskmodel.loss_types): # loss_curves has shape (A, C) for i, asset in enumerate(ri.assets): loss_curves[out.rlzi, l, i] = lc = out[loss_type][i] aid = asset['ordinal'] avg = scientific.average_loss(lc) avg_losses[r, l, i] = avg lcurve = (lc['loss'], lc['poe'], avg) result['loss_curves'].append((l, r, aid, lcurve)) # compute statistics for l, loss_type in enumerate(riskmodel.loss_types): for i, asset in enumerate(ri.assets): avg_stats = compute_stats(avg_losses[:, l, i], stats, weights) losses = loss_curves[0, l, i]['loss'] all_poes = numpy.array( [loss_curves[r, l, i]['poe'] for r in range(R)]) poes_stats = compute_stats(all_poes, stats, weights) result['stat_curves'].append( (l, asset['ordinal'], losses, poes_stats, avg_stats)) if R == 1: # the realization is the same as the mean del result['loss_curves'] return result
def classical_risk(riskinputs, riskmodel, param, monitor)
Compute and return the average losses for each asset. :param riskinputs: :class:`openquake.risklib.riskinput.RiskInput` objects :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: dictionary of extra parameters :param monitor: :class:`openquake.baselib.performance.Monitor` instance
4.427897
4.204781
1.053063
return [re.sub(r'\{[^}]*\}', "", copy(subnode.tag)) for subnode in node.nodes]
def get_taglist(node)
Return a list of tags (with NRML namespace removed) representing the order of the nodes within a node
9.71722
11.255533
0.863328
assert "LineString" in node.tag crds = [float(x) for x in node.nodes[0].text.split()] if with_depth: return Line([Point(crds[iloc], crds[iloc + 1], crds[iloc + 2]) for iloc in range(0, len(crds), 3)]) else: return Line([Point(crds[iloc], crds[iloc + 1]) for iloc in range(0, len(crds), 2)])
def linestring_node_to_line(node, with_depth=False)
Returns an instance of a Linestring node to :class: openquake.hazardlib.geo.line.Line
2.14958
2.223492
0.966759
assert "pointGeometry" in node.tag for subnode in node.nodes: if "Point" in subnode.tag: # Position lon, lat = map(float, subnode.nodes[0].text.split()) point = Point(lon, lat) elif "upperSeismoDepth" in subnode.tag: upper_depth = float_(subnode.text) elif "lowerSeismoDepth" in subnode.tag: lower_depth = float_(subnode.text) else: # Redundent pass assert lower_depth > upper_depth return point, upper_depth, lower_depth
def node_to_point_geometry(node)
Reads the node and returns the point geometry, upper depth and lower depth
3.366523
3.198569
1.052509