code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
assert "areaGeometry" in node.tag
for subnode in node.nodes:
if "Polygon" in subnode.tag:
crds = [float(x)
for x in subnode.nodes[0].nodes[0].nodes[0].text.split()]
polygon = Polygon([Point(crds[iloc], crds[iloc + 1])
for iloc in range(0, len(crds), 2)])
elif "upperSeismoDepth" in subnode.tag:
upper_depth = float_(subnode.text)
elif "lowerSeismoDepth" in subnode.tag:
lower_depth = float_(subnode.text)
else:
# Redundent
pass
assert lower_depth > upper_depth
return polygon, upper_depth, lower_depth | def node_to_area_geometry(node) | Reads an area geometry node and returns the polygon, upper depth and lower
depth | 3.106273 | 2.87585 | 1.080124 |
assert "simpleFaultGeometry" in node.tag
for subnode in node.nodes:
if "LineString" in subnode.tag:
trace = linestring_node_to_line(subnode, with_depth=False)
elif "dip" in subnode.tag:
dip = float(subnode.text)
elif "upperSeismoDepth" in subnode.tag:
upper_depth = float(subnode.text)
elif "lowerSeismoDepth" in subnode.tag:
lower_depth = float(subnode.text)
else:
# Redundent
pass
assert lower_depth > upper_depth
return trace, dip, upper_depth, lower_depth | def node_to_simple_fault_geometry(node) | Reads a simple fault geometry node and returns an OpenQuake representation
:returns:
trace - Trace of fault as instance | 2.895574 | 2.889186 | 1.002211 |
assert "complexFaultGeometry" in node.tag
intermediate_edges = []
for subnode in node.nodes:
if "faultTopEdge" in subnode.tag:
top_edge = linestring_node_to_line(subnode.nodes[0],
with_depth=True)
elif "intermediateEdge" in subnode.tag:
int_edge = linestring_node_to_line(subnode.nodes[0],
with_depth=True)
intermediate_edges.append(int_edge)
elif "faultBottomEdge" in subnode.tag:
bottom_edge = linestring_node_to_line(subnode.nodes[0],
with_depth=True)
else:
# Redundent
pass
return [top_edge] + intermediate_edges + [bottom_edge] | def node_to_complex_fault_geometry(node) | Reads a complex fault geometry node and returns an | 2.683864 | 2.813745 | 0.95384 |
# Parse to float dictionary
if not all([node.attrib[key]
for key in ["minMag", "maxMag", "aValue", "bValue"]]):
return None
tgr = dict((key, float_(node.attrib[key])) for key in node.attrib)
return mfd.truncated_gr.TruncatedGRMFD(min_mag=tgr["minMag"],
max_mag=tgr["maxMag"],
bin_width=bin_width,
a_val=tgr["aValue"],
b_val=tgr["bValue"]) | def node_to_truncated_gr(node, bin_width=0.1) | Parses truncated GR node to an instance of the
:class: openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD | 3.940663 | 3.100016 | 1.271175 |
if not all([node.attrib["minMag"], node.attrib["binWidth"],
node.nodes[0].text]):
return None
# Text to float
rates = [float(x) for x in node.nodes[0].text.split()]
return mfd.evenly_discretized.EvenlyDiscretizedMFD(
float(node.attrib["minMag"]),
float(node.attrib["binWidth"]),
rates) | def node_to_evenly_discretized(node) | Parses the evenly discretized mfd node to an instance of the
:class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD,
or to None if not all parameters are available | 4.678683 | 3.555224 | 1.316002 |
if "incrementalMFD" in taglist:
mfd = node_to_evenly_discretized(
node.nodes[taglist.index("incrementalMFD")])
elif "truncGutenbergRichterMFD" in taglist:
mfd = node_to_truncated_gr(
node.nodes[taglist.index("truncGutenbergRichterMFD")])
else:
mfd = None
return mfd | def node_to_mfd(node, taglist) | Reads the node to return a magnitude frequency distribution | 3.072888 | 3.297345 | 0.931928 |
if not len(node):
return None
npd_pmf = []
for plane in node.nodes:
if not all(plane.attrib[key] for key in plane.attrib):
# One plane fails - return None
return None
npd = NodalPlane(float(plane.attrib["strike"]),
float(plane.attrib["dip"]),
float(plane.attrib["rake"]))
npd_pmf.append((float(plane.attrib["probability"]), npd))
return PMF(npd_pmf) | def node_to_nodal_planes(node) | Parses the nodal plane distribution to a PMF | 4.594302 | 4.117254 | 1.115866 |
if not len(node):
return None
hdds = []
for subnode in node.nodes:
if not all([subnode.attrib[key] for key in ["depth", "probability"]]):
return None
hdds.append((float(subnode.attrib["probability"]),
float(subnode.attrib["depth"])))
return PMF(hdds) | def node_to_hdd(node) | Parses the node to a hpyocentral depth distribution PMF | 4.250048 | 3.486055 | 1.219157 |
assert "pointSource" in node.tag
pnt_taglist = get_taglist(node)
# Get metadata
point_id, name, trt = (node.attrib["id"],
node.attrib["name"],
node.attrib["tectonicRegion"])
assert point_id # Defensive validation!
# Process geometry
location, upper_depth, lower_depth = node_to_point_geometry(
node.nodes[pnt_taglist.index("pointGeometry")])
# Process scaling relation
msr = node_to_scalerel(node.nodes[pnt_taglist.index("magScaleRel")])
# Process aspect ratio
aspect = float_(node.nodes[pnt_taglist.index("ruptAspectRatio")].text)
# Process MFD
mfd = node_to_mfd(node, pnt_taglist)
# Process nodal planes
npds = node_to_nodal_planes(
node.nodes[pnt_taglist.index("nodalPlaneDist")])
# Process hypocentral depths
hdds = node_to_hdd(node.nodes[pnt_taglist.index("hypoDepthDist")])
return mtkPointSource(point_id, name, trt,
geometry=location,
upper_depth=upper_depth,
lower_depth=lower_depth,
mag_scale_rel=msr,
rupt_aspect_ratio=aspect,
mfd=mfd,
nodal_plane_dist=npds,
hypo_depth_dist=hdds) | def parse_point_source_node(node, mfd_spacing=0.1) | Returns an "areaSource" node into an instance of the :class:
openquake.hmtk.sources.area.mtkAreaSource | 3.808866 | 3.541728 | 1.075426 |
assert "areaSource" in node.tag
area_taglist = get_taglist(node)
# Get metadata
area_id, name, trt = (node.attrib["id"],
node.attrib["name"],
node.attrib["tectonicRegion"])
assert area_id # Defensive validation!
# Process geometry
polygon, upper_depth, lower_depth = node_to_area_geometry(
node.nodes[area_taglist.index("areaGeometry")])
# Process scaling relation
msr = node_to_scalerel(node.nodes[area_taglist.index("magScaleRel")])
# Process aspect ratio
aspect = float_(node.nodes[area_taglist.index("ruptAspectRatio")].text)
# Process MFD
mfd = node_to_mfd(node, area_taglist)
# Process nodal planes
npds = node_to_nodal_planes(
node.nodes[area_taglist.index("nodalPlaneDist")])
# Process hypocentral depths
hdds = node_to_hdd(node.nodes[area_taglist.index("hypoDepthDist")])
return mtkAreaSource(area_id, name, trt,
geometry=polygon,
upper_depth=upper_depth,
lower_depth=lower_depth,
mag_scale_rel=msr,
rupt_aspect_ratio=aspect,
mfd=mfd,
nodal_plane_dist=npds,
hypo_depth_dist=hdds) | def parse_area_source_node(node, mfd_spacing=0.1) | Returns an "areaSource" node into an instance of the :class:
openquake.hmtk.sources.area.mtkAreaSource | 3.77859 | 3.36793 | 1.121933 |
assert "simpleFaultSource" in node.tag
sf_taglist = get_taglist(node)
# Get metadata
sf_id, name, trt = (node.attrib["id"],
node.attrib["name"],
node.attrib["tectonicRegion"])
# Process geometry
trace, dip, upper_depth, lower_depth = node_to_simple_fault_geometry(
node.nodes[sf_taglist.index("simpleFaultGeometry")])
# Process scaling relation
msr = node_to_scalerel(node.nodes[sf_taglist.index("magScaleRel")])
# Process aspect ratio
aspect = float_(node.nodes[sf_taglist.index("ruptAspectRatio")].text)
# Process MFD
mfd = node_to_mfd(node, sf_taglist)
# Process rake
rake = float_(node.nodes[sf_taglist.index("rake")].text)
simple_fault = mtkSimpleFaultSource(sf_id, name, trt,
geometry=None,
dip=dip,
upper_depth=upper_depth,
lower_depth=lower_depth,
mag_scale_rel=msr,
rupt_aspect_ratio=aspect,
mfd=mfd,
rake=rake)
simple_fault.create_geometry(trace, dip, upper_depth, lower_depth,
mesh_spacing)
return simple_fault | def parse_simple_fault_node(node, mfd_spacing=0.1, mesh_spacing=1.0) | Parses a "simpleFaultSource" node and returns an instance of the :class:
openquake.hmtk.sources.simple_fault.mtkSimpleFaultSource | 3.510737 | 3.122494 | 1.124337 |
assert "complexFaultSource" in node.tag
sf_taglist = get_taglist(node)
# Get metadata
sf_id, name, trt = (node.attrib["id"],
node.attrib["name"],
node.attrib["tectonicRegion"])
# Process geometry
edges = node_to_complex_fault_geometry(
node.nodes[sf_taglist.index("complexFaultGeometry")])
# Process scaling relation
msr = node_to_scalerel(node.nodes[sf_taglist.index("magScaleRel")])
# Process aspect ratio
aspect = float_(node.nodes[sf_taglist.index("ruptAspectRatio")].text)
# Process MFD
mfd = node_to_mfd(node, sf_taglist)
# Process rake
rake = float_(node.nodes[sf_taglist.index("rake")].text)
complex_fault = mtkComplexFaultSource(sf_id, name, trt,
geometry=None,
mag_scale_rel=msr,
rupt_aspect_ratio=aspect,
mfd=mfd,
rake=rake)
complex_fault.create_geometry(edges, mesh_spacing)
return complex_fault | def parse_complex_fault_node(node, mfd_spacing=0.1, mesh_spacing=4.0) | Parses a "complexFaultSource" node and returns an instance of the :class:
openquake.hmtk.sources.complex_fault.mtkComplexFaultSource | 3.893937 | 3.379735 | 1.152143 |
sm_node = node_from_xml(self.input_file)[0]
if sm_node[0].tag.startswith('{http://openquake.org/xmlns/nrml/0.4}'):
node_sets = [sm_node]
sm_name = sm_node.get("name", "")
else: # format NRML 0.5+
node_sets = sm_node
sm_name = sm_node["name"]
source_model = mtkSourceModel(identifier, name=sm_name)
for node_set in node_sets:
for node in node_set:
if "pointSource" in node.tag:
source_model.sources.append(
parse_point_source_node(node, mfd_spacing))
elif "areaSource" in node.tag:
source_model.sources.append(
parse_area_source_node(node, mfd_spacing))
elif "simpleFaultSource" in node.tag:
source_model.sources.append(
parse_simple_fault_node(node, mfd_spacing,
simple_mesh_spacing))
elif "complexFaultSource" in node.tag:
source_model.sources.append(
parse_complex_fault_node(node, mfd_spacing,
complex_mesh_spacing))
# TODO: multiPointSource are not supported
else:
print("Source typology %s not recognised - skipping!"
% node.tag)
return source_model | def read_file(self, identifier, mfd_spacing=0.1, simple_mesh_spacing=1.0,
complex_mesh_spacing=4.0, area_discretization=10.) | Reads in the source model in returns an instance of the :class:
openquake.hmtk.sourcs.source_model.mtkSourceModel | 2.964745 | 2.600096 | 1.140244 |
C = self.COEFFS[imt]
mag = rup.mag - 6
d = np.sqrt(dists.rjb ** 2 + C['c7'] ** 2)
mean = np.zeros_like(d)
mean += C['c1'] + C['c2'] * mag + C['c3'] * mag ** 2 + C['c6']
idx = d <= 100.
mean[idx] = mean[idx] + C['c5'] * np.log10(d[idx])
idx = d > 100.
mean[idx] = (mean[idx] + C['c5'] * np.log10(100.) -
np.log10(d[idx] / 100.) + C['c4'] * (d[idx] - 100.))
# convert from log10 to ln and from cm/s**2 to g
mean = np.log((10.0 ** (mean - 2.0)) / g)
stddevs = self._get_stddevs(C, stddev_types, dists.rjb.shape[0])
return mean, stddevs | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types) | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | 2.762673 | 2.816004 | 0.981061 |
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS_SINTER[imt]
# cap magnitude values at 8.5, see page 1709
mag = rup.mag
if mag > 8.5:
mag = 8.5
# compute PGA on rock (needed for site amplification calculation)
G = 10 ** (1.2 - 0.18 * mag)
pga_rock = self._compute_mean(self.COEFFS_SINTER[PGA()], G, mag,
rup.hypo_depth, dists.rrup, sites.vs30,
# by passing pga_rock > 500 the soil
# amplification is 0
np.zeros_like(sites.vs30) + 600,
PGA())
pga_rock = 10 ** (pga_rock)
# periods 0.4 s (2.5 Hz) and 0.2 s (5 Hz) need a special case because
# of the erratum. SA for 0.4s and 0.2s is computed and a weighted sum
# is returned
if imt.period in (0.2, 0.4):
C04 = self.COEFFS_SINTER[SA(period=0.4, damping=5.0)]
C02 = self.COEFFS_SINTER[SA(period=0.2, damping=5.0)]
mean04 = self._compute_mean(C04, G, mag, rup.hypo_depth,
dists.rrup, sites.vs30, pga_rock, imt)
mean02 = self._compute_mean(C02, G, mag, rup.hypo_depth,
dists.rrup, sites.vs30, pga_rock, imt)
if imt.period == 0.2:
mean = 0.333 * mean02 + 0.667 * mean04
else:
mean = 0.333 * mean04 + 0.667 * mean02
else:
mean = self._compute_mean(C, G, mag, rup.hypo_depth, dists.rrup,
sites.vs30, pga_rock, imt)
# convert from log10 to ln and units from cm/s**2 to g
mean = np.log((10 ** mean) * 1e-2 / g)
if imt.period == 4.0:
mean /= 0.550
stddevs = self._get_stddevs(C, stddev_types, sites.vs30.shape[0])
return mean, stddevs | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types) | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | 3.041957 | 3.054698 | 0.995829 |
if hypo_depth > 100:
hypo_depth = 100
delta = 0.00724 * 10 ** (0.507 * mag)
R = np.sqrt(rrup ** 2 + delta ** 2)
s_amp = self._compute_soil_amplification(C, vs30, pga_rock, imt)
mean = (
# 1st term
C['c1'] + C['c2'] * mag +
# 2nd term
C['c3'] * hypo_depth +
# 3rd term
C['c4'] * R -
# 4th term
g * np.log10(R) +
# 5th, 6th and 7th terms
s_amp
)
return mean | def _compute_mean(self, C, g, mag, hypo_depth, rrup, vs30, pga_rock, imt) | Compute mean according to equation 1, page 1706. | 3.10304 | 3.01601 | 1.028856 |
if imt.period >= 1:
return np.ones_like(pga_rock)
else:
sl = np.zeros_like(pga_rock)
pga_between_100_500 = (pga_rock > 100) & (pga_rock < 500)
pga_greater_equal_500 = pga_rock >= 500
is_SA_between_05_1 = 0.5 < imt.period < 1
is_SA_less_equal_05 = imt.period <= 0.5
if is_SA_between_05_1:
sl[pga_between_100_500] = (1 - (1. / imt.period - 1) *
(pga_rock[pga_between_100_500] -
100) / 400)
sl[pga_greater_equal_500] = 1 - (1. / imt.period - 1)
if is_SA_less_equal_05 or imt.period == 0:
sl[pga_between_100_500] = (1 - (pga_rock[pga_between_100_500] -
100) / 400)
sl[pga_rock <= 100] = 1
return sl | def _compute_soil_linear_factor(cls, pga_rock, imt) | Compute soil linear factor as explained in paragraph 'Functional
Form', page 1706. | 2.185077 | 2.179816 | 1.002413 |
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS_SSLAB[imt]
# cap magnitude values at 8.0, see page 1709
mag = rup.mag
if mag >= 8.0:
mag = 8.0
# compute PGA on rock (needed for site amplification calculation)
G = 10 ** (0.301 - 0.01 * mag)
pga_rock = self._compute_mean(self.COEFFS_SSLAB[PGA()], G, mag,
rup.hypo_depth, dists.rrup, sites.vs30,
# by passing pga_rock > 500 the soil
# amplification is 0
np.zeros_like(sites.vs30) + 600,
PGA())
pga_rock = 10 ** (pga_rock)
# compute actual mean and convert from log10 to ln and units from
# cm/s**2 to g
mean = self._compute_mean(C, G, mag, rup.hypo_depth, dists.rrup,
sites.vs30, pga_rock, imt)
mean = np.log((10 ** mean) * 1e-2 / g)
if imt.period == 4.0:
mean /= 0.550
stddevs = self._get_stddevs(C, stddev_types, sites.vs30.shape[0])
return mean, stddevs | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types) | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | 4.346504 | 4.350676 | 0.999041 |
# fix hypocentral depth to 20 km. Create new rupture context to avoid
# changing the original one
new_rup = copy.deepcopy(rup)
new_rup.hypo_depth = 20.
mean, stddevs = super().get_mean_and_stddevs(
sites, new_rup, dists, imt, stddev_types)
return mean, stddevs | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types) | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
Call super class method with hypocentral depth fixed at 20 km | 3.341514 | 2.911119 | 1.147845 |
Sbc, Sc, Sd, Se = cls._compute_site_class_dummy_variables(vs30)
sl = cls._compute_soil_linear_factor(pga_rock, imt)
return (
C['c5'] * sl * Sbc * 0.5 +
C['c5'] * sl * Sc +
C['c6'] * sl * Sd +
C['c7'] * sl * Se
) | def _compute_soil_amplification(cls, C, vs30, pga_rock, imt) | Compute soil amplification (5th, 6th, and 7th terms in equation 1,
page 1706) and add the B/C site condition as implemented by NSHMP. | 4.906426 | 4.371977 | 1.122244 |
Sbc = np.zeros_like(vs30)
Sc = np.zeros_like(vs30)
Sd = np.zeros_like(vs30)
Se = np.zeros_like(vs30)
Sbc[vs30 > 760.] = 1
Sc[(vs30 > 360) & (vs30 <= 760)] = 1
Sd[(vs30 >= 180) & (vs30 <= 360)] = 1
Se[vs30 < 180] = 1
return Sbc, Sc, Sd, Se | def _compute_site_class_dummy_variables(cls, vs30) | Extend
:meth:`AtkinsonBoore2003SInter._compute_site_class_dummy_variables`
and includes dummy variable for B/C site conditions (vs30 > 760.) | 1.984975 | 1.894274 | 1.047882 |
return AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification(
C, vs30, pga_rock, imt) | def _compute_soil_amplification(cls, C, vs30, pga_rock, imt) | Compute soil amplification (5th, 6th, and 7th terms in equation 1,
page 1706) and add the B/C site condition as implemented by NSHMP.
Call
:meth:`AtkinsonBoore2003SInterNSHMP2008._compute_soil_amplification` | 4.584463 | 2.343098 | 1.956582 |
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
# Compute SA with primed coeffs and PGA with both unprimed and
# primed coeffs
C = self.COEFFS_PRIMED[imt]
C_PGA = self.COEFFS_PRIMED[PGA()]
C_PGA_unprimed = self.COEFFS_UNPRIMED[PGA()]
# Get S term to determine if consider site term is applied
S = self._get_site_class(sites)
# Abrahamson and Silva (1997) hanging wall term. This is not used
# in the latest version of GMPE but is defined in functional form in
# the paper so we keep it here as a placeholder
f4HW = self._compute_f4(C, rup.mag, dists.rrup)
# Flags for rake angles
CN, CR = self._get_fault_mechanism_flags(rup.rake)
# Get volcanic path distance which Rvol=0 for current implementation
# of McVerry2006Asc, but kept here as placeholder for future use
rvol = self._get_volcanic_path_distance(dists.rrup)
# Get delta_C and delta_D terms for site class
delta_C, delta_D = self._get_deltas(sites)
# Compute lnPGA_ABCD primed
lnPGAp_ABCD = self._compute_mean(C_PGA, S, rup.mag, dists.rrup, rvol,
rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Compute lnPGA_ABCD unprimed
lnPGA_ABCD = self._compute_mean(C_PGA_unprimed, S, rup.mag, dists.rrup,
rvol, rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Compute lnSA_ABCD
lnSAp_ABCD = self._compute_mean(C, S, rup.mag, dists.rrup, rvol,
rup.hypo_depth, CN, CR, f4HW,
delta_C, delta_D)
# Stage 3: Equation 6 SA_ABCD(T). This is lnSA_ABCD
# need to calculate final lnSA_ABCD from non-log values but return log
mean = np.log(np.exp(lnSAp_ABCD) *
(np.exp(lnPGA_ABCD) / np.exp(lnPGAp_ABCD)))
# Compute standard deviations
C_STD = self.COEFFS_STD[imt]
stddevs = self._get_stddevs(
C_STD, rup.mag, stddev_types, sites
)
return mean, stddevs | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types) | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | 4.200213 | 4.215809 | 0.9963 |
# Stage 1: compute PGA_ABCD and PGA'_ABCD which are then used in
# equation 6
# Equation 1 PGA unprimed version
lnSA_AB = self._compute_mean_on_rock(C, mag, rrup, rvol, hypo_depth,
CN, CR, f4HW)
# Equation 4 PGA unprimed version
lnSA_ABCD = lnSA_AB + S *\
self._compute_nonlinear_soil_term(C, lnSA_AB, delta_C, delta_D)
return lnSA_ABCD | def _compute_mean(self, C, S, mag, rrup, rvol, hypo_depth, CN, CR, f4HW,
delta_C, delta_D) | Compute mean value on site class A,B,C,D (equation 4)
returns lnSA_ABCD | 7.108525 | 5.671859 | 1.253297 |
lnSA_AB = (
# line 1 of equation 1
C['c1'] + C['c4as'] * (mag - 6) +
# line 2
C['c3as'] * (8.5 - mag) ** 2 +
# line 3
C['c5'] * rrup +
# line 3 and 4
(C['c8'] + C['c6as'] * (mag - 6)) *
np.log((rrup ** 2 + C['c10as'] ** 2) ** 0.5) +
# line 5
C['c46'] * rvol +
# line 6
C['c32'] * CN + C['c33as'] * CR + f4HW
)
return lnSA_AB | def _compute_mean_on_rock(self, C, mag, rrup, rvol, hypo_depth, CN, CR,
f4HW) | Compute mean value on site class A/B (equation 1 on page 22) | 3.708462 | 3.409449 | 1.087701 |
lnSA_CD = (
# line 1 equation 4 without first term (lnSA_AB)
C['c29'] * delta_C +
# line 2 and 3
(C['c30as'] * np.log(np.exp(lnSA_AB) + 0.03) + C['c43']) * delta_D
)
return lnSA_CD | def _compute_nonlinear_soil_term(self, C, lnSA_AB, delta_C, delta_D) | Compute mean value on site class C/D (equation 4 on page 22 without
the first term) | 6.746306 | 5.562162 | 1.212893 |
num_sites = sites.vs30.size
sigma_intra = np.zeros(num_sites)
# interevent stddev
tau = sigma_intra + C['tau']
# intraevent std (equations 8a-8c page 29)
if mag < 5.0:
sigma_intra += C['sigmaM6'] - C['sigSlope']
elif 5.0 <= mag < 7.0:
sigma_intra += C['sigmaM6'] + C['sigSlope'] * (mag - 6)
else:
sigma_intra += C['sigmaM6'] + C['sigSlope']
std = []
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
# equation 9 page 29
std += [np.sqrt(sigma_intra**2 + tau**2)]
elif stddev_type == const.StdDev.INTRA_EVENT:
std.append(sigma_intra)
elif stddev_type == const.StdDev.INTER_EVENT:
std.append(tau)
return std | def _get_stddevs(self, C, mag, stddev_types, sites) | Return standard deviation as defined on page 29 in
equation 8a,b,c and 9. | 3.194693 | 2.933187 | 1.089154 |
vs30 = sites.vs30
S = np.zeros_like(vs30)
S[vs30 <= 760] = 1
return S | def _get_site_class(self, sites) | Return site class flag (0 if vs30 > 760, that is rock, or 1 if vs30 <=
760, that is deep soil) | 4.629903 | 2.784476 | 1.662756 |
CN, CR = 0, 0
# Pure Normal: rake = -90
if rake > -147 and rake < -33:
CN = -1
# Pure Reverse: rake = 90
if rake > 67 and rake < 123:
CR = 1
# Pure Oblique Reverse: rake = 45
if rake > 33 and rake < 66:
CR = 0.5
return CN, CR | def _get_fault_mechanism_flags(self, rake) | Return the fault mechanism flag CN and CR, page 23
CN = -1 for normal (-146<rake<-33), 0 otherwise
CR = 0.5 for reverse-oblique (33<rake<66), 1 for reverse (67<rake<123)
and 0 otherwise | 5.049485 | 2.775882 | 1.819056 |
vs30 = sites.vs30
delta_C = np.zeros(len(vs30))
delta_C[(vs30 >= 360) & (vs30 < 760)] = 1
delta_D = np.zeros(len(vs30))
delta_D[vs30 < 360] = 1
return delta_C, delta_D | def _get_deltas(self, sites) | Return delta's for equation 4
delta_C = 1 for site class C (360<=Vs30<760), 0 otherwise
delta_D = 1 for site class D (Vs30<=360), 0 otherwise | 2.701598 | 1.759435 | 1.535492 |
# Define subduction flag (page 23)
# SI=1 for subduction interface, 0 otherwise
# DS=1 for subduction intraslab, 0 otherwise
SI = 0
DS = 1
lnSA_AB = (
# line 1 and 2 of equation 2
C['c11'] + (C['c12y'] + (C['c15'] - C['c17']) * C['c19y']) *
(mag - 6) +
# line 3
C['c13y'] * (10 - mag) ** 3 +
# line 4
C['c17'] * np.log(rrup + C['c18y'] * np.exp(C['c19y'] * mag)) +
# line 5
C['c20'] * hypo_depth + C['c24'] * SI +
# line 6
C['c46'] * rvol * (1 - DS)
)
return lnSA_AB | def _compute_mean_on_rock(self, C, mag, rrup, rvol, hypo_depth, CN, CR,
f4HW) | Compute mean value on site class A/B (equation 2 on page 22) | 4.613979 | 4.296272 | 1.073949 |
siteclass = sites.siteclass
delta_C = np.zeros_like(siteclass, dtype=np.float)
delta_C[siteclass == b'C'] = 1
delta_D = np.zeros_like(siteclass, dtype=np.float)
delta_D[siteclass == b'D'] = 1
return delta_C, delta_D | def _get_deltas(self, sites) | Return delta's for equation 4
delta_C = 1 for site class C, 0 otherwise
delta_D = 1 for site class D, 0 otherwise | 3.02299 | 2.063847 | 1.464736 |
siteclass = sites.siteclass
S = np.zeros_like(siteclass, dtype=np.float)
S[(siteclass == b'C') | (siteclass == b'D')] = 1
return S | def _get_site_class(self, sites) | Return site class flag (0 if class A or B, that is rock, or 1 if
class C or D). | 5.612051 | 4.431824 | 1.266307 |
d1 = np.sqrt(50. ** 2 + 6. ** 2)
d = np.sqrt(rjb ** 2 + 6 ** 2)
mean = np.zeros_like(rjb)
mean += (
C['a1'] + C['a2'] * (mag - 6.4) +
C['a7'] * (8.5 - mag) ** 2
)
idx = rjb < 50.
mean[idx] += (
C['a3'] * np.log(d[idx]) +
C['a4'] * (mag - 6.4) * np.log(d[idx]) +
C['a5'] * rjb[idx]
)
idx = rjb >=50.
mean[idx] += (
C['a3'] * np.log(d1) +
C['a4'] * (mag - 6.4) * np.log(d[idx]) +
C['a5'] * rjb[idx] + C['a6'] * (np.log(d[idx]) - np.log(d1))
)
return mean | def _compute_mean(self, C, mag, rjb) | Compute and return mean value (table 8, page 8) | 2.327327 | 2.309294 | 1.007809 |
assets_by_taxo = AccumDict(group_array(assets, 'taxonomy'))
assets_by_taxo.idxs = numpy.argsort(numpy.concatenate([
a['ordinal'] for a in assets_by_taxo.values()]))
assets_by_taxo.eps = {}
if epspath is None: # no epsilons
return assets_by_taxo
# otherwise read the epsilons and group them by taxonomy
with hdf5.File(epspath, 'r') as h5:
dset = h5['epsilon_matrix']
for taxo, assets in assets_by_taxo.items():
lst = [dset[aid] for aid in assets['ordinal']]
assets_by_taxo.eps[taxo] = numpy.array(lst)
return assets_by_taxo | def get_assets_by_taxo(assets, epspath=None) | :param assets: an array of assets
:param epspath: hdf5 file where the epsilons are (or None)
:returns: assets_by_taxo with attributes eps and idxs | 3.80212 | 3.35174 | 1.134372 |
assets_by_taxo = group_array(asset_array, 'taxonomy')
eps = numpy.zeros((len(asset_array), num_samples), numpy.float32)
for taxonomy, assets in assets_by_taxo.items():
shape = (len(assets), num_samples)
logging.info('Building %s epsilons for taxonomy %s', shape, taxonomy)
zeros = numpy.zeros(shape)
epsilons = scientific.make_epsilons(zeros, seed, correlation)
for asset, epsrow in zip(assets, epsilons):
eps[asset['ordinal']] = epsrow
return eps | def make_eps(asset_array, num_samples, seed, correlation) | :param asset_array: an array of assets
:param int num_samples: the number of ruptures
:param int seed: a random seed
:param float correlation: the correlation coefficient
:returns: epsilons matrix of shape (num_assets, num_samples) | 3.702617 | 4.058822 | 0.912239 |
if oq.ignore_covs or not riskmodel.covs:
return
A = len(assetcol)
hdf5path = dstore.hdf5cache()
logging.info('Storing the epsilon matrix in %s', hdf5path)
if oq.calculation_mode == 'scenario_risk':
eps = make_eps(assetcol.array, E, oq.master_seed, oq.asset_correlation)
else: # event based
if oq.asset_correlation:
numpy.random.seed(oq.master_seed)
eps = numpy.array([numpy.random.normal(size=E)] * A)
else:
seeds = oq.master_seed + numpy.arange(E)
eps = numpy.zeros((A, E), F32)
for i, seed in enumerate(seeds):
numpy.random.seed(seed)
eps[:, i] = numpy.random.normal(size=A)
with hdf5.File(hdf5path) as cache:
cache['epsilon_matrix'] = eps
return hdf5path | def cache_epsilons(dstore, oq, assetcol, riskmodel, E) | Do nothing if there are no coefficients of variation of ignore_covs is
set. Otherwise, generate an epsilon matrix of shape (A, E) and save it
in the cache file, by returning the path to it. | 4.280031 | 3.637666 | 1.176587 |
rlzi, sid, imt = key.split('/')
return int(rlzi[4:]), int(sid[4:]), imt | def str2rsi(key) | Convert a string of the form 'rlz-XXXX/sid-YYYY/ZZZ'
into a triple (XXXX, YYYY, ZZZ) | 11.323089 | 5.522185 | 2.050473 |
oqparam = dstore['oqparam']
tmap = (dstore['taxonomy_mapping'] if 'taxonomy_mapping' in dstore
else {})
crm = dstore.getitem('risk_model')
# building dictionaries riskid -> loss_type -> risk_func
fragdict, vulndict, consdict, retrodict = (
AccumDict(), AccumDict(), AccumDict(), AccumDict())
fragdict.limit_states = crm.attrs['limit_states']
for quoted_id, rm in crm.items():
riskid = unquote_plus(quoted_id)
fragdict[riskid] = {}
vulndict[riskid] = {}
consdict[riskid] = {}
retrodict[riskid] = {}
for lt_kind in rm:
lt, kind = lt_kind.rsplit('-', 1)
rf = dstore['risk_model/%s/%s' % (quoted_id, lt_kind)]
if kind == 'consequence':
consdict[riskid][lt, kind] = rf
elif kind == 'fragility': # rf is a FragilityFunctionList
try:
rf = rf.build(
fragdict.limit_states,
oqparam.continuous_fragility_discretization,
oqparam.steps_per_interval)
except ValueError as err:
raise ValueError('%s: %s' % (riskid, err))
fragdict[riskid][lt, kind] = rf
else: # rf is a vulnerability function
rf.init()
if lt.endswith('_retrofitted'):
# strip _retrofitted, since len('_retrofitted') = 12
retrodict[riskid][lt[:-12], kind] = rf
else:
vulndict[riskid][lt, kind] = rf
return CompositeRiskModel(
oqparam, tmap, fragdict, vulndict, consdict, retrodict) | def read(cls, dstore) | :param dstore: a DataStore instance
:returns: a :class:`CompositeRiskModel` instance | 4.855799 | 4.526121 | 1.072839 |
# .taxonomy must be set by the engine
tdict = {taxo: idx for idx, taxo in enumerate(self.taxonomy)}
return tdict | def taxonomy_dict(self) | :returns: a dict taxonomy string -> taxonomy index | 9.521399 | 7.190211 | 1.324217 |
extra_imts = set()
for taxonomy in self.taxonomies:
for (lt, kind), rf in self[taxonomy].risk_functions.items():
if rf.imt not in imts:
extra_imts.add(rf.imt)
return extra_imts | def get_extra_imts(self, imts) | Returns the extra IMTs in the risk functions, i.e. the ones not in
the `imts` set (the set of IMTs for which there is hazard). | 5.166289 | 4.14899 | 1.245192 |
lst = [('user_provided', numpy.bool)]
for cp in self.curve_params:
lst.append((cp.loss_type, F32, len(cp.ratios)))
loss_ratios = numpy.zeros(1, numpy.dtype(lst))
for cp in self.curve_params:
loss_ratios['user_provided'] = cp.user_provided
loss_ratios[cp.loss_type] = tuple(cp.ratios)
return loss_ratios | def get_loss_ratios(self) | :returns: a 1-dimensional composite array with loss ratios by loss type | 4.442107 | 3.868895 | 1.148159 |
A = sum(len(assets) for assets in assets_by_site)
L = len(self.loss_types)
D = len(self.damage_states)
out = numpy.zeros((A, L, 1, D + 1), F32)
for assets, gmv in zip(assets_by_site, gmf):
group = group_array(assets, 'taxonomy')
for taxonomy, assets in group.items():
for l, loss_type in enumerate(self.loss_types):
fracs = self[taxonomy](loss_type, assets, [gmv])
for asset, frac in zip(assets, fracs):
dmg = asset['number'] * frac[0, :D]
csq = asset['value-' + loss_type] * frac[0, D]
out[asset['ordinal'], l, 0, :D] = dmg
out[asset['ordinal'], l, 0, D] = csq
return out | def get_dmg_csq(self, assets_by_site, gmf) | :returns:
an array of shape (A, L, 1, D + 1) with the number of buildings
in each damage state for each asset and loss type | 4.246821 | 3.385078 | 1.254571 |
self.monitor = monitor
hazard_getter = riskinput.hazard_getter
if hazard is None:
with monitor('getting hazard'):
hazard_getter.init()
hazard = hazard_getter.get_hazard()
sids = hazard_getter.sids
assert len(sids) == 1
with monitor('computing risk', measuremem=False):
# this approach is slow for event_based_risk since a lot of
# small arrays are passed (one per realization) instead of
# a long array with all realizations; ebrisk does the right
# thing since it calls get_output directly
assets_by_taxo = get_assets_by_taxo(riskinput.assets, epspath)
for rlzi, haz in sorted(hazard[sids[0]].items()):
out = self.get_output(assets_by_taxo, haz, rlzi)
yield out | def gen_outputs(self, riskinput, monitor, epspath=None, hazard=None) | Group the assets per taxonomy and compute the outputs by using the
underlying riskmodels. Yield one output per realization.
:param riskinput: a RiskInput instance
:param monitor: a monitor object used to measure the performance | 7.487239 | 6.901413 | 1.084885 |
if isinstance(haz, numpy.ndarray):
# NB: in GMF-based calculations the order in which
# the gmfs are stored is random since it depends on
# which hazard task ends first; here we reorder
# the gmfs by event ID; this is convenient in
# general and mandatory for the case of
# VulnerabilityFunctionWithPMF, otherwise the
# sample method would receive the means in random
# order and produce random results even if the
# seed is set correctly; very tricky indeed! (MS)
haz.sort(order='eid')
eids = haz['eid']
data = haz['gmv'] # shape (E, M)
elif not haz: # no hazard for this site
eids = numpy.arange(1)
data = []
else: # classical
eids = []
data = haz # shape M
dic = dict(eids=eids)
if rlzi is not None:
dic['rlzi'] = rlzi
for l, lt in enumerate(self.loss_types):
ls = []
for taxonomy, assets_ in assets_by_taxo.items():
if len(assets_by_taxo.eps):
epsilons = assets_by_taxo.eps[taxonomy][:, eids]
else: # no CoVs
epsilons = ()
rm = self[taxonomy]
if len(data) == 0:
dat = [0]
elif len(eids): # gmfs
dat = data[:, rm.imti[lt]]
else: # hcurves
dat = data[rm.imti[lt]]
ls.append(rm(lt, assets_, dat, eids, epsilons))
arr = numpy.concatenate(ls)
dic[lt] = arr[assets_by_taxo.idxs] if len(arr) else arr
return hdf5.ArrayWrapper((), dic) | def get_output(self, assets_by_taxo, haz, rlzi=None) | :param assets_by_taxo: a dictionary taxonomy index -> assets on a site
:param haz: an array or a dictionary of hazard on that site
:param rlzi: if given, a realization index | 8.091201 | 8.128737 | 0.995382 |
new = copy.copy(self)
new.taxonomies = sorted(taxonomies)
new._riskmodels = {}
for riskid, rm in self._riskmodels.items():
if riskid in taxonomies:
new._riskmodels[riskid] = rm
rm.compositemodel = new
return new | def reduce(self, taxonomies) | :param taxonomies: a set of taxonomies
:returns: a new CompositeRiskModel reduced to the given taxonomies | 4.666321 | 3.439779 | 1.356576 |
'''
Returns an ordered dictionary with the available GSIM classes
keyed by class name
'''
mfds = {}
for fname in os.listdir(os.path.dirname(__file__)):
if fname.endswith('.py'):
modname, _ext = os.path.splitext(fname)
mod = importlib.import_module(
'openquake.hmtk.faults.mfd.' + modname)
for cls in mod.__dict__.values():
if inspect.isclass(cls) and issubclass(cls, BaseMFDfromSlip):
mfds[cls.__name__] = cls
return dict((k, mfds[k]) for k in sorted(mfds)) | def get_available_mfds() | Returns an ordered dictionary with the available GSIM classes
keyed by class name | 3.094826 | 2.37636 | 1.302339 |
sites.vs30 = 700 * np.ones(len(sites.vs30))
mean, stddevs = super().get_mean_and_stddevs(
sites, rup, dists, imt, stddev_types)
tau_ss = 'tauC'
log_phi_ss = 1.00
C = ZhaoEtAl2006AscSWISS05.COEFFS_ASC
mean, stddevs = _apply_adjustments(
C, self.COEFFS_FS_ROCK[imt], tau_ss,
mean, stddevs, sites, rup, dists.rrup, imt, stddev_types,
log_phi_ss)
return mean, stddevs | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types) | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | 5.489582 | 5.911612 | 0.92861 |
'''
Creates the classic Stepp (1972) plots for a completed Stepp analysis,
and exports the figure to a file.
:param model:
Completed Stepp (1972) analysis as instance of :class:
`openquake.hmtk.seismicity.completeness.comp_stepp_1971.Stepp1971`
:param string filename:
Name of output file
:param string filetype:
Type of file (from list supported by matplotlib)
:param int dpi:
Resolution (dots per inch) of output file
'''
if ax is None:
fig, ax = plt.subplots(figsize=figure_size)
else:
fig = ax.get_figure()
if filename and os.path.exists(filename):
raise IOError('File already exists!')
# get colours from current axes: thus user can set up before calling
prop_cycler = ax._get_lines.prop_cycler
prop_cyclers = itertools.tee(itertools.cycle(prop_cycler), 3)
marker_cyclers = itertools.tee(itertools.cycle(VALID_MARKERS), 3)
# plot observed Sigma lambda
for i, (min_mag, max_mag) in enumerate(zip(model.magnitude_bin[:-1],
model.magnitude_bin[1:])):
label = '(%g, %g]: %d' % (min_mag, max_mag,
model.completeness_table[i, 0])
colour = next(prop_cyclers[0])['color']
ax.loglog(model.time_values, model.sigma[:, i],
linestyle='none',
marker=next(marker_cyclers[0]),
markersize=3,
markerfacecolor=colour,
markeredgecolor=colour,
label=label)
# plot expected Poisson rate
for i in range(0, len(model.magnitude_bin) - 1):
ax.loglog(model.time_values, model.model_line[:, i],
color=next(prop_cyclers[1])['color'],
linewidth=0.5)
# mark breaks from expected rate
for i in range(0, len(model.magnitude_bin) - 1):
colour = next(prop_cyclers[2])['color']
if np.any(np.isnan(model.model_line[:, i])):
continue
xmarker = model.end_year - model.completeness_table[i, 0]
knee = model.model_line[:, i] > 0.
ymarker = 10.0 ** np.interp(np.log10(xmarker),
np.log10(model.time_values[knee]),
np.log10(model.model_line[knee, i]))
ax.loglog(xmarker, ymarker,
marker=next(marker_cyclers[2]),
markerfacecolor='white',
markeredgecolor=colour)
ax.legend(loc='center left',
bbox_to_anchor=(1, 0.5), frameon=False, fontsize='small')
ax.set_xlabel('Time (years)')
ax.set_ylabel("$\\sigma_{\\lambda} = \\sqrt{\\lambda} / \\sqrt{T}$")
ax.autoscale(enable=True, axis='both', tight=True)
# save figure to file
if filename is not None:
fig.savefig(filename, dpi=dpi, format=filetype) | def create_stepp_plot(model, figure_size=(8, 6),
filename=None, filetype='png', dpi=300, ax=None) | Creates the classic Stepp (1972) plots for a completed Stepp analysis,
and exports the figure to a file.
:param model:
Completed Stepp (1972) analysis as instance of :class:
`openquake.hmtk.seismicity.completeness.comp_stepp_1971.Stepp1971`
:param string filename:
Name of output file
:param string filetype:
Type of file (from list supported by matplotlib)
:param int dpi:
Resolution (dots per inch) of output file | 3.06376 | 2.421699 | 1.265128 |
'''Checks that the config file contains all required parameters
:param dict config:
Configuration file
:returns:
Configuration file with all correct parameters
'''
if 'tolerance' not in config.keys() or not config['tolerance']:
config['tolerance'] = 1E-5
if not config.get('maximum_iterations', None):
config['maximum_iterations'] = 1000
mmin_obs = np.min(data['magnitude'])
if config.get('input_mmin', 0) < mmin_obs:
config['input_mmin'] = mmin_obs
if fabs(config['b-value']) < 1E-7:
config['b-value'] = 1E-7
return config | def check_config(config, data) | Checks that the config file contains all required parameters
:param dict config:
Configuration file
:returns:
Configuration file with all correct parameters | 3.985154 | 3.257587 | 1.223345 |
values = value.replace(',', ' ').split()
for val in values:
if val not in disagg.pmf_map:
raise ValueError('Invalid disagg output: %s' % val)
return values | def disagg_outputs(value) | Validate disaggregation outputs. For instance
>>> disagg_outputs('TRT Mag_Dist')
['TRT', 'Mag_Dist']
>>> disagg_outputs('TRT, Mag_Dist')
['TRT', 'Mag_Dist'] | 5.703255 | 6.172169 | 0.924028 |
if not value.startswith('['): # assume the GSIM name
value = '[%s]' % value
[(gsim_name, kwargs)] = toml.loads(value).items()
minimum_distance = float(kwargs.pop('minimum_distance', 0))
if gsim_name == 'FromFile':
return FromFile()
try:
gsim_class = registry[gsim_name]
except KeyError:
raise ValueError('Unknown GSIM: %s' % gsim_name)
gs = gsim_class(**kwargs)
gs._toml = '\n'.join(line.strip() for line in value.splitlines())
gs.minimum_distance = minimum_distance
return gs | def gsim(value) | Convert a string in TOML format into a GSIM instance
>>> gsim('[BooreAtkinson2011]')
[BooreAtkinson2011] | 4.156522 | 3.762156 | 1.104825 |
def composed_validator(value):
out = value
for validator in reversed(validators):
out = validator(out)
return out
composed_validator.__name__ = 'compose(%s)' % ','.join(
val.__name__ for val in validators)
return composed_validator | def compose(*validators) | Implement composition of validators. For instance
>>> utf8_not_empty = compose(utf8, not_empty) | 2.808859 | 3.284716 | 0.85513 |
if not value:
return ()
try:
return tuple(map(int, value.split(',')))
except Exception:
raise ValueError('Invalid hazard_id %r' % value) | def hazard_id(value) | >>> hazard_id('')
()
>>> hazard_id('-1')
(-1,)
>>> hazard_id('42')
(42,)
>>> hazard_id('42,3')
(42, 3)
>>> hazard_id('42,3,4')
(42, 3, 4)
>>> hazard_id('42:3')
Traceback (most recent call last):
...
ValueError: Invalid hazard_id '42:3' | 3.474792 | 2.94441 | 1.180132 |
r
try:
if isinstance(value, bytes):
return value.decode('utf-8')
else:
return value
except Exception:
raise ValueError('Not UTF-8: %r' % value) | def utf8(value) | r"""
Check that the string is UTF-8. Returns an encode bytestring.
>>> utf8(b'\xe0') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Not UTF-8: ... | 3.749802 | 3.025699 | 1.239318 |
names = value.replace(',', ' ').split()
for n in names:
try:
name(n)
except ValueError:
raise ValueError('List of names containing an invalid name:'
' %s' % n)
return names | def namelist(value) | :param value: input string
:returns: list of identifiers separated by whitespace or commas
>>> namelist('a,b')
['a', 'b']
>>> namelist('a1 b_2\t_c')
['a1', 'b_2', '_c']
>>> namelist('a1 b_2 1c')
Traceback (most recent call last):
...
ValueError: List of names containing an invalid name: 1c | 4.86324 | 3.926738 | 1.238493 |
lon = round(float_(value), 5)
if lon > 180.:
raise ValueError('longitude %s > 180' % lon)
elif lon < -180.:
raise ValueError('longitude %s < -180' % lon)
return lon | def longitude(value) | :param value: input string
:returns: longitude float, rounded to 5 digits, i.e. 1 meter maximum
>>> longitude('0.123456')
0.12346 | 2.719698 | 3.133595 | 0.867916 |
lat = round(float_(value), 5)
if lat > 90.:
raise ValueError('latitude %s > 90' % lat)
elif lat < -90.:
raise ValueError('latitude %s < -90' % lat)
return lat | def latitude(value) | :param value: input string
:returns: latitude float, rounded to 5 digits, i.e. 1 meter maximum
>>> latitude('-0.123456')
-0.12346 | 2.864146 | 3.452708 | 0.829536 |
lon, lat = value.split()
return longitude(lon), latitude(lat) | def lon_lat(value) | :param value: a pair of coordinates
:returns: a tuple (longitude, latitude)
>>> lon_lat('12 14')
(12.0, 14.0) | 5.417974 | 11.251772 | 0.481522 |
lst = value.split()
dim = len(lst)
if dim == 2:
return longitude(lst[0]), latitude(lst[1]), 0.
elif dim == 3:
return longitude(lst[0]), latitude(lst[1]), depth(lst[2])
else:
raise ValueError('Invalid point format: %s' % value) | def point(value) | :param value: a tuple of coordinates as a string (2D or 3D)
:returns: a tuple of coordinates as a string (2D or 3D) | 2.638495 | 2.64401 | 0.997914 |
if not value.strip():
raise ValueError('Empty list of coordinates: %r' % value)
points = []
pointset = set()
for i, line in enumerate(value.split(','), 1):
pnt = point(line)
if pnt[:2] in pointset:
raise ValueError("Found overlapping site #%d, %s" % (i, line))
pointset.add(pnt[:2])
points.append(pnt)
return points | def coordinates(value) | Convert a non-empty string into a list of lon-lat coordinates.
>>> coordinates('')
Traceback (most recent call last):
...
ValueError: Empty list of coordinates: ''
>>> coordinates('1.1 1.2')
[(1.1, 1.2, 0.0)]
>>> coordinates('1.1 1.2, 2.2 2.3')
[(1.1, 1.2, 0.0), (2.2, 2.3, 0.0)]
>>> coordinates('1.1 1.2 -0.4, 2.2 2.3 -0.5')
[(1.1, 1.2, -0.4), (2.2, 2.3, -0.5)]
>>> coordinates('0 0 0, 0 0 -1')
Traceback (most recent call last):
...
ValueError: Found overlapping site #2, 0 0 -1 | 4.143063 | 2.806466 | 1.476256 |
points = ['%s %s' % (lon, lat) for lon, lat, dep in coordinates(value)]
# close the linear polygon ring by appending the first coord to the end
points.append(points[0])
return 'POLYGON((%s))' % ', '.join(points) | def wkt_polygon(value) | Convert a string with a comma separated list of coordinates into
a WKT polygon, by closing the ring. | 4.810816 | 4.40069 | 1.093196 |
i = int(not_empty(value))
if i < 0:
raise ValueError('integer %d < 0' % i)
return i | def positiveint(value) | :param value: input string
:returns: positive integer | 4.638084 | 5.210243 | 0.890186 |
f = float(not_empty(value))
if f < 0:
raise ValueError('float %s < 0' % f)
return f | def positivefloat(value) | :param value: input string
:returns: positive float | 4.741292 | 5.129272 | 0.92436 |
value = value.strip().lower()
try:
return _BOOL_DICT[value]
except KeyError:
raise ValueError('Not a boolean: %s' % value) | def boolean(value) | :param value: input string such as '0', '1', 'true', 'false'
:returns: boolean
>>> boolean('')
False
>>> boolean('True')
True
>>> boolean('false')
False
>>> boolean('t')
Traceback (most recent call last):
...
ValueError: Not a boolean: t | 3.626246 | 4.512857 | 0.803537 |
probs = list(map(probability, value.replace(',', ' ').split()))
if rows and cols:
probs = numpy.array(probs).reshape((len(rows), len(cols)))
return probs | def probabilities(value, rows=0, cols=0) | :param value: input string, comma separated or space separated
:param rows: the number of rows if the floats are in a matrix (0 otherwise)
:param cols: the number of columns if the floats are in a matrix (or 0
:returns: a list of probabilities
>>> probabilities('')
[]
>>> probabilities('1')
[1.0]
>>> probabilities('0.1 0.2')
[0.1, 0.2]
>>> probabilities('0.1, 0.2') # commas are ignored
[0.1, 0.2] | 4.013105 | 5.065287 | 0.792276 |
probs = probabilities(value)
if len(probs) < 2:
raise ValueError('Not enough probabilities, found %r' % value)
elif sorted(probs, reverse=True) != probs:
raise ValueError('The probabilities %s are not in decreasing order'
% value)
return probs | def decreasing_probabilities(value) | :param value: input string, comma separated or space separated
:returns: a list of decreasing probabilities
>>> decreasing_probabilities('1')
Traceback (most recent call last):
...
ValueError: Not enough probabilities, found '1'
>>> decreasing_probabilities('0.2 0.1')
[0.2, 0.1]
>>> decreasing_probabilities('0.1 0.2')
Traceback (most recent call last):
...
ValueError: The probabilities 0.1 0.2 are not in decreasing order | 3.92162 | 2.954002 | 1.327562 |
imts = []
for chunk in value.split(','):
imts.append(imt.from_string(chunk.strip()))
sorted_imts = sorted(imts, key=lambda im: getattr(im, 'period', 1))
if len(distinct(imts)) < len(imts):
raise ValueError('Duplicated IMTs in %s' % value)
if sorted_imts != imts:
raise ValueError('The IMTs are not sorted by period: %s' % value)
return [str(imt) for imt in imts] | def intensity_measure_types(value) | :param value: input string
:returns: non-empty list of Intensity Measure Type objects
>>> intensity_measure_types('PGA')
['PGA']
>>> intensity_measure_types('PGA, SA(1.00)')
['PGA', 'SA(1.0)']
>>> intensity_measure_types('SA(0.1), SA(0.10)')
Traceback (most recent call last):
...
ValueError: Duplicated IMTs in SA(0.1), SA(0.10)
>>> intensity_measure_types('SA(1), PGA')
Traceback (most recent call last):
...
ValueError: The IMTs are not sorted by period: SA(1), PGA | 3.244108 | 2.704994 | 1.199303 |
if len(imls) < 1:
raise ValueError('No imls for %s: %s' % (imt, imls))
elif imls != sorted(imls):
raise ValueError('The imls for %s are not sorted: %s' % (imt, imls))
elif len(distinct(imls)) < len(imls):
raise ValueError("Found duplicated levels for %s: %s" % (imt, imls))
elif imls[0] == 0 and imls[1] <= min_iml: # apply the cutoff
raise ValueError("The min_iml %s=%s is larger than the second level "
"for %s" % (imt, min_iml, imls))
elif imls[0] == 0 and imls[1] > min_iml: # apply the cutoff
imls[0] = min_iml | def check_levels(imls, imt, min_iml=1E-10) | Raise a ValueError if the given levels are invalid.
:param imls: a list of intensity measure and levels
:param imt: the intensity measure type
:param min_iml: minimum intensity measure level (default 1E-10)
>>> check_levels([0.1, 0.2], 'PGA') # ok
>>> check_levels([], 'PGA')
Traceback (most recent call last):
...
ValueError: No imls for PGA: []
>>> check_levels([0.2, 0.1], 'PGA')
Traceback (most recent call last):
...
ValueError: The imls for PGA are not sorted: [0.2, 0.1]
>>> check_levels([0.2, 0.2], 'PGA')
Traceback (most recent call last):
...
ValueError: Found duplicated levels for PGA: [0.2, 0.2] | 2.734244 | 2.349917 | 1.163549 |
dic = dictionary(value)
for imt_str, imls in dic.items():
norm_imt = str(imt.from_string(imt_str))
if norm_imt != imt_str:
dic[norm_imt] = imls
del dic[imt_str]
check_levels(imls, imt_str) # ValueError if the levels are invalid
return dic | def intensity_measure_types_and_levels(value) | :param value: input string
:returns: Intensity Measure Type and Levels dictionary
>>> intensity_measure_types_and_levels('{"SA(0.10)": [0.1, 0.2]}')
{'SA(0.1)': [0.1, 0.2]} | 4.33676 | 5.107164 | 0.849152 |
dic = dictionary(value)
for lt, ratios in dic.items():
for ratio in ratios:
if not 0 <= ratio <= 1:
raise ValueError('Loss ratio %f for loss_type %s is not in '
'the range [0, 1]' % (ratio, lt))
check_levels(ratios, lt) # ValueError if the levels are invalid
return dic | def loss_ratios(value) | :param value: input string
:returns: dictionary loss_type -> loss ratios
>>> loss_ratios('{"structural": [0.1, 0.2]}')
{'structural': [0.1, 0.2]} | 5.286747 | 5.613488 | 0.941794 |
if not (isinstance(n, int) and n > 0):
raise ValueError('n must be a positive integer, got %s' % n)
if x_min <= 0:
raise ValueError('x_min must be positive, got %s' % x_min)
if x_max <= x_min:
raise ValueError('x_max (%s) must be bigger than x_min (%s)' %
(x_max, x_min))
delta = numpy.log(x_max / x_min)
return numpy.exp(delta * numpy.arange(n) / (n - 1)) * x_min | def logscale(x_min, x_max, n) | :param x_min: minumum value
:param x_max: maximum value
:param n: number of steps
:returns: an array of n values from x_min to x_max | 1.912931 | 2.095616 | 0.912825 |
if not value:
return {}
value = value.replace('logscale(', '("logscale", ') # dirty but quick
try:
dic = dict(ast.literal_eval(value))
except Exception:
raise ValueError('%r is not a valid Python dictionary' % value)
for key, val in dic.items():
try:
has_logscale = (val[0] == 'logscale')
except Exception: # no val[0]
continue
if has_logscale:
dic[key] = list(logscale(*val[1:]))
return dic | def dictionary(value) | :param value:
input string corresponding to a literal Python object
:returns:
the Python object
>>> dictionary('')
{}
>>> dictionary('{}')
{}
>>> dictionary('{"a": 1}')
{'a': 1}
>>> dictionary('"vs30_clustering: true"') # an error really done by a user
Traceback (most recent call last):
...
ValueError: '"vs30_clustering: true"' is not a valid Python dictionary
>>> dictionary('{"ls": logscale(0.01, 2, 5)}')
{'ls': [0.01, 0.03760603093086393, 0.14142135623730948, 0.5318295896944986, 1.9999999999999991]} | 4.485899 | 3.942667 | 1.137783 |
value = ast.literal_eval(value)
if isinstance(value, (int, float, list)):
return {'default': value}
dic = {'default': value[next(iter(value))]}
dic.update(value)
return dic | def floatdict(value) | :param value:
input string corresponding to a literal Python number or dictionary
:returns:
a Python dictionary key -> number
>>> floatdict("200")
{'default': 200}
>>> text = "{'active shallow crust': 250., 'default': 200}"
>>> sorted(floatdict(text).items())
[('active shallow crust', 250.0), ('default', 200)] | 4.384219 | 5.603353 | 0.782428 |
dic = floatdict(value)
for trt, magdists in dic.items():
if isinstance(magdists, list): # could be a scalar otherwise
magdists.sort() # make sure the list is sorted by magnitude
for mag, dist in magdists: # validate the magnitudes
magnitude(mag)
return IntegrationDistance(dic) | def maximum_distance(value) | :param value:
input string corresponding to a valid maximum distance
:returns:
a IntegrationDistance mapping | 12.365262 | 10.651634 | 1.160879 |
value = value.strip()
if value not in SCALEREL:
raise ValueError(
"'%s' is not a recognized magnitude-scale relationship" % value)
return value | def mag_scale_rel(value) | :param value:
name of a Magnitude-Scale relationship in hazardlib
:returns:
the corresponding hazardlib object | 6.462636 | 8.064327 | 0.801386 |
probs = probabilities(value)
if abs(1.-sum(map(float, value.split()))) > 1e-12:
raise ValueError('The probabilities %s do not sum up to 1!' % value)
return [(p, i) for i, p in enumerate(probs)] | def pmf(value) | Comvert a string into a Probability Mass Function.
:param value:
a sequence of probabilities summing up to 1 (no commas)
:returns:
a list of pairs [(probability, index), ...] with index starting from 0
>>> pmf("0.157 0.843")
[(0.157, 0), (0.843, 1)] | 4.752439 | 5.033906 | 0.944086 |
weights = [n['weight'] for n in nodes_with_a_weight]
if abs(sum(weights) - 1.) > PRECISION:
raise ValueError('The weights do not sum up to 1: %s' % weights)
return nodes_with_a_weight | def check_weights(nodes_with_a_weight) | Ensure that the sum of the values is 1
:param nodes_with_a_weight: a list of Node objects with a weight attribute | 2.907097 | 3.25369 | 0.893477 |
probs = probabilities(value)
if abs(sum(probs) - 1.) > PRECISION:
raise ValueError('The weights do not sum up to 1: %s' % probs)
return probs | def weights(value) | Space-separated list of weights:
>>> weights('0.1 0.2 0.7')
[0.1, 0.2, 0.7]
>>> weights('0.1 0.2 0.8')
Traceback (most recent call last):
...
ValueError: The weights do not sum up to 1: [0.1, 0.2, 0.8] | 5.318249 | 4.679032 | 1.136613 |
check_weights(nodes)
data = []
for node in nodes:
data.append([node['alongStrike'], node['downDip'], node['weight']])
return numpy.array(data, float) | def hypo_list(nodes) | :param nodes: a hypoList node with N hypocenter nodes
:returns: a numpy array of shape (N, 3) with strike, dip and weight | 7.031784 | 5.667133 | 1.240801 |
check_weights(nodes)
data = []
for node in nodes:
data.append([slip_range(~node), node['weight']])
return numpy.array(data, float) | def slip_list(nodes) | :param nodes: a slipList node with N slip nodes
:returns: a numpy array of shape (N, 2) with slip angle and weight | 8.264439 | 8.34973 | 0.989785 |
values = value.split()
num_values = len(values)
if num_values % 3 and num_values % 2:
raise ValueError('Wrong number: nor pairs not triplets: %s' % values)
try:
return list(map(float_, values))
except Exception as exc:
raise ValueError('Found a non-float in %s: %s' % (value, exc)) | def posList(value) | :param value:
a string with the form `lon1 lat1 [depth1] ... lonN latN [depthN]`
without commas, where the depts are optional.
:returns:
a list of floats without other validations | 5.284957 | 5.468787 | 0.966386 |
return longitude(lon), latitude(lat), positivefloat(depth) | def point3d(value, lon, lat, depth) | This is used to convert nodes of the form
<hypocenter lon="LON" lat="LAT" depth="DEPTH"/>
:param value: None
:param lon: longitude string
:param lat: latitude string
:returns: a validated triple (lon, lat, depth) | 12.730621 | 32.073669 | 0.396918 |
a, b = value.split()
return positivefloat(a), float_(b) | def ab_values(value) | a and b values of the GR magniture-scaling relation.
a is a positive float, b is just a float. | 15.154474 | 12.231612 | 1.23896 |
if '.' in value:
raise ValueError('There are decimal points in %s' % value)
values = value.replace(',', ' ').split()
if not values:
raise ValueError('Not a list of integers: %r' % value)
try:
ints = [int(float(v)) for v in values]
except Exception:
raise ValueError('Not a list of integers: %r' % value)
return ints | def integers(value) | :param value: input string
:returns: non-empty list of integers
>>> integers('1, 2')
[1, 2]
>>> integers(' ')
Traceback (most recent call last):
...
ValueError: Not a list of integers: ' ' | 2.900363 | 2.551702 | 1.136639 |
ints = integers(value)
for val in ints:
if val < 0:
raise ValueError('%d is negative in %r' % (val, value))
return ints | def positiveints(value) | >>> positiveints('1, -1')
Traceback (most recent call last):
...
ValueError: -1 is negative in '1, -1' | 4.275635 | 3.801983 | 1.12458 |
try:
start, stop = value.split(':')
start = ast.literal_eval(start)
stop = ast.literal_eval(stop)
if start is not None and stop is not None:
assert start < stop
except Exception:
raise ValueError('invalid slice: %s' % value)
return (start, stop) | def simple_slice(value) | >>> simple_slice('2:5')
(2, 5)
>>> simple_slice('0:None')
(0, None) | 2.542004 | 2.514603 | 1.010897 |
new = {}
for name, val in dic.items():
if name == 'vs30Type':
# avoid "Unrecognized parameter vs30Type"
new['vs30measured'] = val == 'measured'
elif name not in site.site_param_dt:
raise ValueError('Unrecognized parameter %s' % name)
else:
new[name] = val
return new | def site_param(dic) | Convert a dictionary site_model_param -> string into a dictionary
of valid casted site parameters. | 4.997629 | 5.095043 | 0.980881 |
res = {}
for name, text in dic.items():
try:
p = getattr(cls, name)
except AttributeError:
logging.warning('Ignored unknown parameter %s', name)
else:
res[name] = p.validator(text)
return res | def check(cls, dic) | Convert a dictionary name->string into a dictionary name->value
by converting the string. If the name does not correspond to a
known parameter, just ignore it and print a warning. | 3.929033 | 3.297664 | 1.19146 |
self = cls.__new__(cls)
for k, v in dic.items():
setattr(self, k, ast.literal_eval(v))
return self | def from_(cls, dic) | Build a new ParamSet from a dictionary of string-valued parameters
which are assumed to be already valid. | 2.763639 | 2.755599 | 1.002918 |
dic = self.__dict__
return [(k, repr(dic[k])) for k in sorted(dic)
if not k.startswith('_')] | def to_params(self) | Convert the instance dictionary into a sorted list of pairs
(name, valrepr) where valrepr is the string representation of
the underlying value. | 5.677846 | 4.061359 | 1.398016 |
# it is important to have the validator applied in a fixed order
valids = [getattr(self, valid)
for valid in sorted(dir(self.__class__))
if valid.startswith('is_valid_')]
for is_valid in valids:
if not is_valid():
docstring = '\n'.join(
line.strip() for line in is_valid.__doc__.splitlines())
doc = docstring.format(**vars(self))
raise ValueError(doc) | def validate(self) | Apply the `is_valid` methods to self and possibly raise a ValueError. | 4.798325 | 4.207634 | 1.140385 |
mag_idx = numpy.abs(mag - self.mags).argmin()
dists = []
for dist in repi:
repi_idx = numpy.abs(dist - self.repi).argmin()
dists.append(self.reqv[repi_idx, mag_idx])
return numpy.array(dists) | def get(self, repi, mag) | :param repi: an array of epicentral distances in the range self.repi
:param mag: a magnitude in the range self.mags
:returns: an array of equivalent distances | 3.310907 | 3.084985 | 1.073233 |
'''
Input core configuration parameters as specified in the
configuration file
:param dict mfd_conf:
Configuration file containing the following attributes:
* 'Model_Weight' - Logic tree weight of model type (float)
* 'MFD_spacing' - Width of MFD bin (float)
* 'Minimum_Magnitude' - Minimum magnitude of activity rates (float)
* 'Maximum_Magnitude' - Characteristic magnituded (float)
(if not defined will use scaling relation)
* 'Maximum_Magnitude_Uncertainty' - Uncertainty on
maximum magnitude
(If not defined and the MSR has a sigma term then this will be
taken from sigma)
* 'Lower_Bound' - Lower bound in terms of number of sigma (float)
* 'Upper_Bound' - Upper bound in terms of number of sigma (float)
* 'Sigma' - Standard deviation (in magnitude units) of distribution
'''
self.mfd_model = 'Characteristic'
self.mfd_weight = mfd_conf['Model_Weight']
self.bin_width = mfd_conf['MFD_spacing']
self.mmin = None
self.mmax = None
self.mmax_sigma = None
self.lower_bound = mfd_conf['Lower_Bound']
self.upper_bound = mfd_conf['Upper_Bound']
self.sigma = mfd_conf['Sigma']
self.occurrence_rate = None | def setUp(self, mfd_conf) | Input core configuration parameters as specified in the
configuration file
:param dict mfd_conf:
Configuration file containing the following attributes:
* 'Model_Weight' - Logic tree weight of model type (float)
* 'MFD_spacing' - Width of MFD bin (float)
* 'Minimum_Magnitude' - Minimum magnitude of activity rates (float)
* 'Maximum_Magnitude' - Characteristic magnituded (float)
(if not defined will use scaling relation)
* 'Maximum_Magnitude_Uncertainty' - Uncertainty on
maximum magnitude
(If not defined and the MSR has a sigma term then this will be
taken from sigma)
* 'Lower_Bound' - Lower bound in terms of number of sigma (float)
* 'Upper_Bound' - Upper bound in terms of number of sigma (float)
* 'Sigma' - Standard deviation (in magnitude units) of distribution | 5.154822 | 1.441876 | 3.575081 |
'''
Calculates activity rate on the fault
:param float slip:
Slip rate in mm/yr
:param fault_width:
Width of the fault (km)
:param float disp_length_ratio:
Displacement to length ratio (dimensionless)
:param float shear_modulus:
Shear modulus of the fault (GPa)
:returns:
* Minimum Magnitude (float)
* Bin width (float)
* Occurrence Rates (numpy.ndarray)
'''
# Working in Nm so convert: shear_modulus - GPa -> Nm
# area - km ** 2. -> m ** 2.
# slip - mm/yr -> m/yr
moment_rate = (shear_modulus * 1.E9) * (area * 1.E6) * (slip / 1000.)
moment_mag = _scale_moment(self.mmax, in_nm=True)
characteristic_rate = moment_rate / moment_mag
if self.sigma and (fabs(self.sigma) > 1E-5):
self.mmin = self.mmax + (self.lower_bound * self.sigma)
mag_upper = self.mmax + (self.upper_bound * self.sigma)
mag_range = np.arange(self.mmin,
mag_upper + self.bin_width,
self.bin_width)
self.occurrence_rate = characteristic_rate * (
truncnorm.cdf(mag_range + (self.bin_width / 2.),
self.lower_bound, self.upper_bound,
loc=self.mmax, scale=self.sigma) -
truncnorm.cdf(mag_range - (self.bin_width / 2.),
self.lower_bound, self.upper_bound,
loc=self.mmax, scale=self.sigma))
else:
# Returns only a single rate
self.mmin = self.mmax
self.occurrence_rate = np.array([characteristic_rate], dtype=float)
return self.mmin, self.bin_width, self.occurrence_rate | def get_mfd(self, slip, area, shear_modulus=30.0) | Calculates activity rate on the fault
:param float slip:
Slip rate in mm/yr
:param fault_width:
Width of the fault (km)
:param float disp_length_ratio:
Displacement to length ratio (dimensionless)
:param float shear_modulus:
Shear modulus of the fault (GPa)
:returns:
* Minimum Magnitude (float)
* Bin width (float)
* Occurrence Rates (numpy.ndarray) | 3.552244 | 2.51512 | 1.412356 |
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
mean = (self._get_magnitude_scaling_term(C, rup.mag) +
self._get_distance_scaling_term(C, rup.mag, dists.rrup) +
self._get_style_of_faulting_term(C, rup.rake) +
self._get_site_scaling_term(C, sites.vs30))
stddevs = self._get_stddevs(imt,
rup.mag,
len(dists.rrup),
stddev_types)
return mean, stddevs | def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types) | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | 2.723328 | 2.675046 | 1.018049 |
if mag < 6.75:
return C["a1_lo"] + C["a2_lo"] * mag + C["a3"] *\
((8.5 - mag) ** 2.0)
else:
return C["a1_hi"] + C["a2_hi"] * mag + C["a3"] *\
((8.5 - mag) ** 2.0) | def _get_magnitude_scaling_term(self, C, mag) | Returns the magnitude scaling term defined in equation 3 | 2.87762 | 2.794783 | 1.02964 |
if mag < 6.75:
mag_factor = -(C["b1_lo"] + C["b2_lo"] * mag)
else:
mag_factor = -(C["b1_hi"] + C["b2_hi"] * mag)
return mag_factor * np.log(rrup + 10.0) + (C["gamma"] * rrup) | def _get_distance_scaling_term(self, C, mag, rrup) | Returns the magnitude dependent distance scaling term | 3.536628 | 3.41016 | 1.037086 |
site_amp = C["xi"] * np.log(1200.0) * np.ones(len(vs30))
idx = vs30 < 1200.0
site_amp[idx] = C["xi"] * np.log(vs30[idx])
return site_amp | def _get_site_scaling_term(self, C, vs30) | Returns the site scaling. For sites with Vs30 > 1200 m/s the site
amplification for Vs30 = 1200 is used | 3.435722 | 2.937588 | 1.169572 |
if mag < 5.0:
stddev_mag = 5.0
else:
stddev_mag = mag
if imt.name == "PGA" or imt.period < 0.05:
total_sigma = 1.18 + 0.035 * np.log(0.05) - 0.06 * stddev_mag
elif imt.period > 3.0:
total_sigma = 1.18 + 0.035 * np.log(3.0) - 0.06 * stddev_mag
else:
total_sigma = 1.18 + 0.035 * np.log(imt.period) - 0.06 * stddev_mag
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(total_sigma + np.zeros(n_sites, dtype=float))
return stddevs | def _get_stddevs(self, imt, mag, n_sites, stddev_types) | The standard error (assumed equivalent to total standard deviation)
is defined as a function of magnitude and period (equation 4,
page 1168). For magnitudes lower than 5.0 the standard deviation is
equal to that for the case in which magnitude is 5.0. For short
periods (T < 0.05), including PGA, the standard deviation is
assumed to be equal to the case in which T = 0.05, whilst for long
periods (T > 3.0) it is assumed to be equal to the case in which
T = 3.0 | 2.112568 | 1.945588 | 1.085825 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.