code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# extract dictionaries of coefficients specific to required # intensity measure type and for PGA C = self.COEFFS[imt] C_PGA = self.COEFFS[PGA()] # compute median pga on rock (vs30=1100), needed for site response # term calculation pga1100 = np.exp(self._compute_imt1100(PGA(), sites, rup, dists)) mean = (self._compute_base_term(C, rup, dists) + self._compute_faulting_style_term(C, rup) + self._compute_site_response_term(C, imt, sites, pga1100) + self._compute_hanging_wall_term(C, dists, rup) + self._compute_top_of_rupture_depth_term(C, rup) + self._compute_large_distance_term(C, dists, rup) + self._compute_soil_depth_term(C, imt, sites.z1pt0, sites.vs30)) stddevs = self._get_stddevs(C, C_PGA, pga1100, rup, sites, stddev_types) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.264071
3.233761
1.009373
c1 = self.CONSTS['c1'] R = np.sqrt(dists.rrup ** 2 + self.CONSTS['c4'] ** 2) base_term = (C['a1'] + C['a8'] * ((8.5 - rup.mag) ** 2) + (C['a2'] + self.CONSTS['a3'] * (rup.mag - c1)) * np.log(R)) if rup.mag <= c1: return base_term + self.CONSTS['a4'] * (rup.mag - c1) else: return base_term + self.CONSTS['a5'] * (rup.mag - c1)
def _compute_base_term(self, C, rup, dists)
Compute and return base model term, that is the first term in equation 1, page 74. The calculation of this term is explained in paragraph 'Base Model', page 75.
2.846616
2.826943
1.006959
# ranges of rake values for each faulting mechanism are specified in # table 2, page 75 return (C['a12'] * float(rup.rake > 30 and rup.rake < 150) + C['a13'] * float(rup.rake > -120 and rup.rake < -60))
def _compute_faulting_style_term(self, C, rup)
Compute and return faulting style term, that is the sum of the second and third terms in equation 1, page 74.
5.056424
4.503536
1.122768
site_resp_term = np.zeros_like(sites.vs30) vs30_star, _ = self._compute_vs30_star_factor(imt, sites.vs30) vlin, c, n = C['VLIN'], self.CONSTS['c'], self.CONSTS['n'] a10, b = C['a10'], C['b'] idx = sites.vs30 < vlin arg = vs30_star[idx] / vlin site_resp_term[idx] = (a10 * np.log(arg) - b * np.log(pga1100[idx] + c) + b * np.log(pga1100[idx] + c * (arg ** n))) idx = sites.vs30 >= vlin site_resp_term[idx] = (a10 + b * n) * np.log(vs30_star[idx] / vlin) return site_resp_term
def _compute_site_response_term(self, C, imt, sites, pga1100)
Compute and return site response model term, that is the fifth term in equation 1, page 74.
2.952821
2.920726
1.010989
if rup.dip == 90.0: return np.zeros_like(dists.rx) else: idx = dists.rx > 0 Fhw = np.zeros_like(dists.rx) Fhw[idx] = 1 # equation 8, page 77 T1 = np.zeros_like(dists.rx) idx1 = (dists.rjb < 30.0) & (idx) T1[idx1] = 1.0 - dists.rjb[idx1] / 30.0 # equation 9, page 77 T2 = np.ones_like(dists.rx) idx2 = ((dists.rx <= rup.width * np.cos(np.radians(rup.dip))) & (idx)) T2[idx2] = (0.5 + dists.rx[idx2] / (2 * rup.width * np.cos(np.radians(rup.dip)))) # equation 10, page 78 T3 = np.ones_like(dists.rx) idx3 = (dists.rx < rup.ztor) & (idx) T3[idx3] = dists.rx[idx3] / rup.ztor # equation 11, page 78 if rup.mag <= 6.0: T4 = 0.0 elif rup.mag > 6 and rup.mag < 7: T4 = rup.mag - 6 else: T4 = 1.0 # equation 5, in AS08_NGA_errata.pdf if rup.dip >= 30: T5 = 1.0 - (rup.dip - 30.0) / 60.0 else: T5 = 1.0 return Fhw * C['a14'] * T1 * T2 * T3 * T4 * T5
def _compute_hanging_wall_term(self, C, dists, rup)
Compute and return hanging wall model term, that is the sixth term in equation 1, page 74. The calculation of this term is explained in paragraph 'Hanging-Wall Model', page 77.
2.404232
2.356169
1.020399
if rup.ztor >= 10.0: return C['a16'] else: return C['a16'] * rup.ztor / 10.0
def _compute_top_of_rupture_depth_term(self, C, rup)
Compute and return top of rupture depth term, that is the seventh term in equation 1, page 74. The calculation of this term is explained in paragraph 'Depth-to-Top of Rupture Model', page 78.
4.835664
4.239858
1.140525
# equation 15, page 79 if rup.mag < 5.5: T6 = 1.0 elif rup.mag >= 5.5 and rup.mag <= 6.5: T6 = 0.5 * (6.5 - rup.mag) + 0.5 else: T6 = 0.5 # equation 14, page 79 large_distance_term = np.zeros_like(dists.rrup) idx = dists.rrup >= 100.0 large_distance_term[idx] = C['a18'] * (dists.rrup[idx] - 100.0) * T6 return large_distance_term
def _compute_large_distance_term(self, C, dists, rup)
Compute and return large distance model term, that is the 8-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Large Distance Model', page 78.
2.780558
2.621315
1.06075
a21 = self._compute_a21_factor(C, imt, z1pt0, vs30) a22 = self._compute_a22_factor(imt) median_z1pt0 = self._compute_median_z1pt0(vs30) soil_depth_term = a21 * np.log((z1pt0 + self.CONSTS['c2']) / (median_z1pt0 + self.CONSTS['c2'])) idx = z1pt0 >= 200 soil_depth_term[idx] += a22 * np.log(z1pt0[idx] / 200) return soil_depth_term
def _compute_soil_depth_term(self, C, imt, z1pt0, vs30)
Compute and return soil depth model term, that is the 9-th term in equation 1, page 74. The calculation of this term is explained in paragraph 'Soil Depth Model', page 79.
2.422168
2.413678
1.003518
vs30_1100 = np.zeros_like(sites.vs30) + 1100 vs30_star, _ = self._compute_vs30_star_factor(imt, vs30_1100) C = self.COEFFS[imt] mean = (self._compute_base_term(C, rup, dists) + self._compute_faulting_style_term(C, rup) + self._compute_hanging_wall_term(C, dists, rup) + self._compute_top_of_rupture_depth_term(C, rup) + self._compute_large_distance_term(C, dists, rup) + self._compute_soil_depth_term(C, imt, sites.z1pt0, vs30_1100) + # this is the site response term in case of vs30=1100 ((C['a10'] + C['b'] * self.CONSTS['n']) * np.log(vs30_star / C['VLIN']))) return mean
def _compute_imt1100(self, imt, sites, rup, dists)
Compute and return mean imt value for rock conditions (vs30 = 1100 m/s)
3.708989
3.597712
1.03093
std_intra = self._compute_intra_event_std(C, C_PGA, pga1100, rup.mag, sites.vs30, sites.vs30measured) std_inter = self._compute_inter_event_std(C, C_PGA, pga1100, rup.mag, sites.vs30) stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt(std_intra ** 2 + std_inter ** 2)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(std_intra) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(std_inter) return stddevs
def _get_stddevs(self, C, C_PGA, pga1100, rup, sites, stddev_types)
Return standard deviations as described in paragraph 'Equations for standard deviation', page 81.
1.659505
1.581989
1.048999
sigma_b = self._compute_sigma_b(C, mag, vs30measured) sigma_b_pga = self._compute_sigma_b(C_PGA, mag, vs30measured) delta_amp = self._compute_partial_derivative_site_amp(C, pga1100, vs30) std_intra = np.sqrt(sigma_b ** 2 + self.CONSTS['sigma_amp'] ** 2 + (delta_amp ** 2) * (sigma_b_pga ** 2) + 2 * delta_amp * sigma_b * sigma_b_pga * C['rho']) return std_intra
def _compute_intra_event_std(self, C, C_PGA, pga1100, mag, vs30, vs30measured)
Compute intra event standard deviation (equation 24) as described in the errata and not in the original paper.
3.025318
2.980466
1.015049
tau_0 = self._compute_std_0(C['s3'], C['s4'], mag) tau_b_pga = self._compute_std_0(C_PGA['s3'], C_PGA['s4'], mag) delta_amp = self._compute_partial_derivative_site_amp(C, pga1100, vs30) std_inter = np.sqrt(tau_0 ** 2 + (delta_amp ** 2) * (tau_b_pga ** 2) + 2 * delta_amp * tau_0 * tau_b_pga * C['rho']) return std_inter
def _compute_inter_event_std(self, C, C_PGA, pga1100, mag, vs30)
Compute inter event standard deviation, equation 25, page 82.
3.449772
3.345113
1.031287
sigma_0 = self._compute_sigma_0(C, mag, vs30measured) sigma_amp = self.CONSTS['sigma_amp'] return np.sqrt(sigma_0 ** 2 - sigma_amp ** 2)
def _compute_sigma_b(self, C, mag, vs30measured)
Equation 23, page 81.
3.46076
3.217681
1.075545
s1 = np.zeros_like(vs30measured, dtype=float) s2 = np.zeros_like(vs30measured, dtype=float) idx = vs30measured == 1 s1[idx] = C['s1mea'] s2[idx] = C['s2mea'] idx = vs30measured == 0 s1[idx] = C['s1est'] s2[idx] = C['s2est'] return self._compute_std_0(s1, s2, mag)
def _compute_sigma_0(self, C, mag, vs30measured)
Equation 27, page 82.
2.329428
2.283804
1.019977
if mag < 5: return c1 elif mag >= 5 and mag <= 7: return c1 + (c2 - c1) * (mag - 5) / 2 else: return c2
def _compute_std_0(self, c1, c2, mag)
Common part of equations 27 and 28, pag 82.
2.533193
2.393905
1.058185
delta_amp = np.zeros_like(vs30) vlin = C['VLIN'] c = self.CONSTS['c'] b = C['b'] n = self.CONSTS['n'] idx = vs30 < vlin delta_amp[idx] = (- b * pga1100[idx] / (pga1100[idx] + c) + b * pga1100[idx] / (pga1100[idx] + c * ((vs30[idx] / vlin) ** n))) return delta_amp
def _compute_partial_derivative_site_amp(self, C, pga1100, vs30)
Partial derivative of site amplification term with respect to PGA on rock (equation 26), as described in the errata and not in the original paper.
3.359183
3.359813
0.999812
e2 = self._compute_e2_factor(imt, vs30) a21 = e2.copy() vs30_star, v1 = self._compute_vs30_star_factor(imt, vs30) median_z1pt0 = self._compute_median_z1pt0(vs30) numerator = ((C['a10'] + C['b'] * self.CONSTS['n']) * np.log(vs30_star / np.min([v1, 1000]))) denominator = np.log((z1pt0 + self.CONSTS['c2']) / (median_z1pt0 + self.CONSTS['c2'])) idx = numerator + e2 * denominator < 0 a21[idx] = - numerator[idx] / denominator[idx] idx = vs30 >= 1000 a21[idx] = 0.0 return a21
def _compute_a21_factor(self, C, imt, z1pt0, vs30)
Compute and return a21 factor, equation 18, page 80.
3.299549
3.253516
1.014149
v1 = self._compute_v1_factor(imt) vs30_star = vs30.copy() vs30_star[vs30_star >= v1] = v1 return vs30_star, v1
def _compute_vs30_star_factor(self, imt, vs30)
Compute and return vs30 star factor, equation 5, page 77.
3.179237
3.000199
1.059676
if imt.name == "SA": t = imt.period if t <= 0.50: v1 = 1500.0 elif t > 0.50 and t <= 1.0: v1 = np.exp(8.0 - 0.795 * np.log(t / 0.21)) elif t > 1.0 and t < 2.0: v1 = np.exp(6.76 - 0.297 * np.log(t)) else: v1 = 700.0 elif imt.name == "PGA": v1 = 1500.0 else: # this is for PGV v1 = 862.0 return v1
def _compute_v1_factor(self, imt)
Compute and return v1 factor, equation 6, page 77.
2.811533
2.713024
1.03631
e2 = np.zeros_like(vs30) if imt.name == "PGV": period = 1 elif imt.name == "PGA": period = 0 else: period = imt.period if period < 0.35: return e2 else: idx = vs30 <= 1000 if period >= 0.35 and period <= 2.0: e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) * np.log(period / 0.35)) elif period > 2.0: e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) * np.log(2.0 / 0.35)) return e2
def _compute_e2_factor(self, imt, vs30)
Compute and return e2 factor, equation 19, page 80.
2.133896
2.114032
1.009397
z1pt0_median = np.zeros_like(vs30) + 6.745 idx = np.where((vs30 >= 180.0) & (vs30 <= 500.0)) z1pt0_median[idx] = 6.745 - 1.35 * np.log(vs30[idx] / 180.0) idx = vs30 > 500.0 z1pt0_median[idx] = 5.394 - 4.48 * np.log(vs30[idx] / 500.0) return np.exp(z1pt0_median)
def _compute_median_z1pt0(self, vs30)
Compute and return median z1pt0 (in m), equation 17, pqge 79.
2.236836
2.151472
1.039677
if imt.name == 'PGV': return 0.0 period = imt.period if period < 2.0: return 0.0 else: return 0.0625 * (period - 2.0)
def _compute_a22_factor(self, imt)
Compute and return the a22 factor, equation 20, page 80.
3.627638
3.298905
1.099649
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) mean = np.zeros_like(sites.vs30) stddevs = [np.zeros_like(sites.vs30) for _ in stddev_types] idx_rock = sites.vs30 >= self.ROCK_VS30 idx_soil = sites.vs30 < self.ROCK_VS30 if idx_rock.any(): C = self.COEFFS_ROCK[imt] self._compute_mean(C, self.CONSTS['A1_rock'], self.CONSTS['A2_rock'], self.CONSTS['A3_rock'], self.CONSTS['A4_rock'], self.CONSTS['A5_rock'], self.CONSTS['A6_rock'], rup.mag, rup.hypo_depth, dists.rrup, mean, idx_rock) self._compute_std(C, rup.mag, stddevs, idx_rock) if imt == SA(period=4.0, damping=5.0): mean = mean / 0.399 if idx_soil.any(): C = self.COEFFS_SOIL[imt] self._compute_mean(C, self.CONSTS['A1_soil'], self.CONSTS['A2_soil'], self.CONSTS['A3_soil'], self.CONSTS['A4_soil'], self.CONSTS['A5_soil'], self.CONSTS['A6_soil'], rup.mag, rup.hypo_depth, dists.rrup, mean, idx_soil) self._compute_std(C, rup.mag, stddevs, idx_soil) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
1.765536
1.778346
0.992796
mean[idx] = (A1 + A2 * mag + C['C1'] + C['C2'] * (A3 - mag) ** 3 + C['C3'] * np.log(rrup[idx] + A4 * np.exp(A5 * mag)) + A6 * hypo_depth)
def _compute_mean(self, C, A1, A2, A3, A4, A5, A6, mag, hypo_depth, rrup, mean, idx)
Compute mean for subduction interface events, as explained in table 2, page 67.
3.947626
3.985352
0.990534
if mag > 8.0: mag = 8.0 for stddev in stddevs: stddev[idx] += C['C4'] + C['C5'] * mag
def _compute_std(self, C, mag, stddevs, idx)
Compute total standard deviation, as explained in table 2, page 67.
5.626661
5.187365
1.084686
mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) # this is the firm ground adjustment mean += np.log(1.162) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.228964
4.907959
0.861654
''' Creates the map according to the input configuration ''' if self.config['min_lon'] >= self.config['max_lon']: raise ValueError('Upper limit of long is smaller than lower limit') if self.config['min_lon'] >= self.config['max_lon']: raise ValueError('Upper limit of long is smaller than lower limit') # Corners of the map lowcrnrlat = self.config['min_lat'] lowcrnrlon = self.config['min_lon'] uppcrnrlat = self.config['max_lat'] uppcrnrlon = self.config['max_lon'] if 'resolution' not in self.config.keys(): self.config['resolution'] = 'l' lat0 = lowcrnrlat + ((uppcrnrlat - lowcrnrlat) / 2) lon0 = lowcrnrlon + ((uppcrnrlon - lowcrnrlon) / 2) if (uppcrnrlat - lowcrnrlat) >= (uppcrnrlon - lowcrnrlon): fig_aspect = PORTRAIT_ASPECT else: fig_aspect = LANDSCAPE_ASPECT if self.ax is None: self.fig, self.ax = plt.subplots(figsize=fig_aspect, facecolor='w', edgecolor='k') else: self.fig = self.ax.get_figure() if self.title: self.ax.set_title(self.title, fontsize=16) parallels = np.arange(-90., 90., self.lat_lon_spacing) meridians = np.arange(0., 360., self.lat_lon_spacing) # Build Map # Do not import Basemap at top level since it's an optional feature # and it would break doctests from mpl_toolkits.basemap import Basemap self.m = Basemap( llcrnrlon=lowcrnrlon, llcrnrlat=lowcrnrlat, urcrnrlon=uppcrnrlon, urcrnrlat=uppcrnrlat, projection='stere', resolution=self.config['resolution'], area_thresh=1000.0, lat_0=lat0, lon_0=lon0, ax=self.ax) self.m.drawcountries() self.m.drawmapboundary() self.m.drawcoastlines() self.m.drawstates() self.m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=12) self.m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=12) self.m.fillcontinents(color='wheat')
def _build_basemap(self)
Creates the map according to the input configuration
2.094482
2.020851
1.036436
self.fig.savefig(filename, dpi=self.dpi, format=filetype, papertype=papertype)
def savemap(self, filename, filetype='png', papertype="a4")
Save the figure
2.78257
2.767502
1.005444
''' :param catalogue: Earthquake catalogue as instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` :param dict config: Configuration parameters of the algorithm, containing the following information: 'min_lat' Minimum value of latitude (in degrees, float) 'max_lat' Minimum value of longitude (in degrees, float) (min_lat, min_lon) Defines the inferior corner of the map 'min_lon' Maximum value of latitude (in degrees, float) 'max_lon' Maximum value of longitude (in degrees, float) (min_lon, max_lon) Defines the upper corner of the map :returns: Figure with the spatial distribution of the events. ''' # Magnitudes bins and minimum marrker size # min_mag = np.min(catalogue.data['magnitude']) # max_mag = np.max(catalogue.data['magnitude']) con_min = np.where(np.array([symb[0] for symb in DEFAULT_SYMBOLOGY]) < np.min(catalogue.data['magnitude']))[0] con_max = np.where(np.array([symb[1] for symb in DEFAULT_SYMBOLOGY]) > np.max(catalogue.data['magnitude']))[0] if len(con_min) == 1: min_loc = con_min[0] else: min_loc = con_min[-1] if len(con_max) == 1: max_loc = con_max[0] else: max_loc = con_max[1] # min_loc = np.where(np.array([symb[0] for symb in DEFAULT_SYMBOLOGY]) # < np.min(catalogue.data['magnitude']))[0][-1] # max_loc = np.where(np.array([symb[1] for symb in DEFAULT_SYMBOLOGY]) # > np.max(catalogue.data['magnitude']))[0][1] symbology = DEFAULT_SYMBOLOGY[min_loc:max_loc] for sym in symbology: # Create legend string if np.isinf(sym[0]): leg_str = 'M < %5.2f' % sym[1] elif np.isinf(sym[1]): leg_str = 'M >= %5.2f' % sym[0] else: leg_str = '%5.2f <= M < %5.2f' % (sym[0], sym[1]) idx = np.logical_and(catalogue.data['magnitude'] >= sym[0], catalogue.data['magnitude'] < sym[1]) mag_size = 1.2 * np.min([sym[0] + 0.5, sym[1] - 0.5]) x, y = self.m(catalogue.data['longitude'][idx], catalogue.data['latitude'][idx]) self.m.plot(x, y, sym[2], markersize=mag_size, label=leg_str) self.ax.legend(bbox_to_anchor=LEGEND_OFFSET) if self.title: self.ax.set_title(self.title, fontsize=16) if not overlay: plt.show()
def add_catalogue(self, catalogue, overlay=False)
:param catalogue: Earthquake catalogue as instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` :param dict config: Configuration parameters of the algorithm, containing the following information: 'min_lat' Minimum value of latitude (in degrees, float) 'max_lat' Minimum value of longitude (in degrees, float) (min_lat, min_lon) Defines the inferior corner of the map 'min_lon' Maximum value of latitude (in degrees, float) 'max_lon' Maximum value of longitude (in degrees, float) (min_lon, max_lon) Defines the upper corner of the map :returns: Figure with the spatial distribution of the events.
2.411877
1.841227
1.309929
lons = np.hstack([source.geometry.lons, source.geometry.lons[0]]) lats = np.hstack([source.geometry.lats, source.geometry.lats[0]]) x, y = self.m(lons, lats) self.m.plot(x, y, border, linewidth=border_width)
def _plot_area_source(self, source, border='k-', border_width=1.0)
Plots the area source :param source: Area source as instance of :class: mtkAreaSource :param str border: Line properties of border (see matplotlib documentation for detail) :param float border_width: Line width of border (see matplotlib documentation for detail)
2.150619
2.471701
0.870097
x, y = self.m(source.geometry.longitude, source.geometry.latitude) self.m.plot(x, y, point_marker, markersize=point_size)
def _plot_point_source(self, source, point_marker='ks', point_size=2.0)
Plots the area source :param source: Area source as instance of :class: mtkPointSource :param str point_marker: Marker style for point (see matplotlib documentation for detail) :param float marker size for point: Line width of border (see matplotlib documentation for detail)
3.707345
4.039147
0.917853
# Get the trace trace_lons = np.array([pnt.longitude for pnt in source.fault_trace.points]) trace_lats = np.array([pnt.latitude for pnt in source.fault_trace.points]) surface_projection = _fault_polygon_from_mesh(source) # Plot surface projection first x, y = self.m(surface_projection[:, 0], surface_projection[:, 1]) self.m.plot(x, y, border, linewidth=border_width) # Plot fault trace x, y = self.m(trace_lons, trace_lats) self.m.plot(x, y, border, linewidth=1.3 * border_width)
def _plot_simple_fault(self, source, border='k-', border_width=1.0)
Plots the simple fault source as a composite of the fault trace and the surface projection of the fault. :param source: Fault source as instance of :class: mtkSimpleFaultSource :param str border: Line properties of border (see matplotlib documentation for detail) :param float border_width: Line width of border (see matplotlib documentation for detail)
3.102494
2.786546
1.113383
if not max_depth: max_depth = 70. # Get outline top_edge = np.column_stack([source.geometry.mesh.lons[0], source.geometry.mesh.lats[0]]) bottom_edge = np.column_stack([source.geometry.mesh.lons[-1][::-1], source.geometry.mesh.lats[-1][::-1]]) outline = np.vstack([top_edge, bottom_edge, top_edge[0, :]]) lons = source.geometry.mesh.lons.flatten() lats = source.geometry.mesh.lats.flatten() depths = source.geometry.mesh.depths.flatten() norm = Normalize(vmin=min_depth, vmax=max_depth) x1, y1 = self.m(lons, lats) self.m.scatter(x1, y1, marker=".", s=20, c=depths, norm=norm, cmap="jet_r", alpha=alpha, linewidths=0.0, zorder=4) # Plot border x2, y2 = self.m(outline[:, 0], outline[:, 1]) self.m.plot(x2, y2, border, linewidth=border_width)
def _plot_complex_fault(self, source, border='k-', border_width=1.0, min_depth=0., max_depth=None, alpha=1.0)
Plots the simple fault source as a composite of the fault trace and the surface projection of the fault. :param source: Fault source as instance of :class: mtkSimpleFaultSource :param str border: Line properties of border (see matplotlib documentation for detail) :param float border_width: Line width of border (see matplotlib documentation for detail)
2.280967
2.376998
0.9596
for source in model.sources: if isinstance(source, mtkAreaSource): self._plot_area_source(source, area_border, border_width) elif isinstance(source, mtkPointSource): self._plot_point_source(source, point_marker, point_size) elif isinstance(source, mtkComplexFaultSource): self._plot_complex_fault(source, area_border, border_width, min_depth, max_depth, alpha) elif isinstance(source, mtkSimpleFaultSource): self._plot_simple_fault(source, area_border, border_width) else: pass if not overlay: plt.show()
def add_source_model( self, model, area_border='k-', border_width=1.0, point_marker='ks', point_size=2.0, overlay=False, min_depth=0., max_depth=None, alpha=1.0)
Adds a source model to the map :param model: Source model of mixed typologies as instance of :class: openquake.hmtk.sources.source_model.mtkSourceModel
2.190603
2.007307
1.091314
if not norm: norm = Normalize(vmin=np.min(data), vmax=np.max(data)) x, y, = self.m(longitude, latitude) mappable = self.m.scatter(x, y, marker=shape, s=size, c=data, norm=norm, alpha=alpha, linewidths=0.0, zorder=4) self.m.colorbar(mappable=mappable, fig=self.fig, ax=self.ax) if not overlay: plt.show()
def add_colour_scaled_points(self, longitude, latitude, data, shape='s', alpha=1.0, size=20, norm=None, overlay=False)
Overlays a set of points on a map with a fixed size but colour scaled according to the data :param np.ndarray longitude: Longitude :param np.ndarray latitude: Latitude :param np.ndarray data: Data for plotting :param str shape: Marker style :param float alpha: Sets the transparency of the marker (0 for transparent, 1 opaque) :param int size: Marker size :param norm: Normalisation as instance of :class: matplotlib.colors.Normalize
2.34788
2.53197
0.927294
if logplot: data = np.log10(data.copy()) x, y, = self.m(longitude, latitude) self.m.scatter(x, y, marker=shape, s=(smin + data ** sscale), c=colour, alpha=alpha, zorder=2) if not overlay: plt.show()
def add_size_scaled_points( self, longitude, latitude, data, shape='o', logplot=False, alpha=1.0, colour='b', smin=2.0, sscale=2.0, overlay=False)
Plots a set of points with size scaled according to the data :param bool logplot: Choose to scale according to the logarithm (base 10) of the data :param float smin: Minimum scale size :param float sscale: Scaling factor
3.079197
3.875603
0.794508
longitude = catalogue.data['longitude'] latitude = catalogue.data['latitude'] strike = catalogue.data['strike1'] dip = catalogue.data['dip1'] rake = catalogue.data['rake1'] if not magnitude or (magnitude < 0): magnitude = catalogue.data['magnitude'] for i, mag in enumerate(magnitude): color = self._select_color_mag(mag) focal_mechanism = [strike[i], dip[i], rake[i]] x, y = self.m(longitude[i], latitude[i]) self.m.plot(x, y) size = mag * 10000 beach = Beach(focal_mechanism, linewidth=1, xy=(x, y), width=size, zorder=size, facecolor=color) self.ax.add_collection(beach) if not overlay: plt.show() else: for i in range(0, catalogue.get_number_tensors()): x, y = self.m(longitude[i], latitude[i]) self.m.plot(x, y) focal_mechanism = [strike[i], dip[i], rake[i]] size = magnitude * 10000. beach = Beach(focal_mechanism, linewidth=1, xy=(x, y), width=size, zorder=size, facecolor='r') self.ax.add_collection(beach) if not overlay: plt.show()
def add_focal_mechanism(self, catalogue, magnitude=None, overlay=True)
Plots a the the focal mechanism based on the beachball representation. The focal_menchanism flag must contain: strike, dip, rake.
2.448153
2.347771
1.042756
# Create simple magnitude scaled point basemap self.add_size_scaled_points(catalogue.data['longitude'], catalogue.data['latitude'], catalogue.data['magnitude'], shape="o", alpha=0.8, colour=(0.5, 0.5, 0.5), smin=1.0, sscale=1.5, overlay=True) # If cluster ID is not specified just show mainshocks if cluster_id is None: idx = flagvector == 0 self.add_size_scaled_points(catalogue.data['longitude'][idx], catalogue.data['latitude'][idx], catalogue.data['magnitude'][idx], shape="o", colour="r", smin=1.0, sscale=1.5, overlay=overlay) return if not isinstance(cluster_id, collections.Iterable): cluster_id = [cluster_id] for iloc, clid in enumerate(cluster_id): if iloc == (len(cluster_id) - 1): # On last iteration set overlay to function overlay temp_overlay = overlay else: temp_overlay = True idx = vcl == clid self.add_size_scaled_points( catalogue.data["longitude"][idx], catalogue.data["latitude"][idx], catalogue.data["magnitude"][idx], shape="o", colour=DISSIMILAR_COLOURLIST[(iloc + 1) % NCOLS], smin=1.0, sscale=1.5, overlay=temp_overlay)
def add_catalogue_cluster(self, catalogue, vcl, flagvector, cluster_id=None, overlay=True)
Creates a plot of a catalogue showing where particular clusters exist
3.028002
3.10589
0.974923
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES for stddev_type in stddev_types) C = self.COEFFS[imt] mean = self._get_mean( C, rup.mag, rup.rake, rup.dip, dists.rrup, dists.rjb ) stddevs = self._get_stddevs(C, rup.mag, stddev_types, dists.rrup.size) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
2.211419
2.242059
0.986334
f1 = self._compute_magnitude_scaling(C, mag) f2 = self._compute_distance_scaling(C, mag, rrup) f3 = self._compute_faulting_mechanism(C, rake, dip) f4 = self._compute_far_source_soil_effect(C) f5 = self._compute_hanging_wall_effect(C, rjb, rrup, dip, mag) mean = ( C['c1'] + f1 + C['c4'] * np.log(np.sqrt(f2)) + f3 + f4 + f5 ) return mean
def _get_mean(self, C, mag, rake, dip, rrup, rjb)
Return mean value (eq. 1, page 319).
3.366467
3.257702
1.033387
std = C['c16'] + np.zeros(num_sites) if mag < 7.4: std -= 0.07 * mag else: std -= 0.518 # only the 'total' standard deviation is supported, therefore the # std is always the same for all types stddevs = [std for _ in stddev_types] return stddevs
def _get_stddevs(self, C, mag, stddev_types, num_sites)
Return standard deviation as defined in eq.11 page 319.
6.884762
6.540948
1.052563
g = C['c5'] + C['c6'] * 0.5 + C['c7'] * 0.5 return ( rrup ** 2 + (np.exp(C['c8'] * mag + C['c9'] * (8.5 - mag) ** 2) * g) ** 2 )
def _compute_distance_scaling(self, C, mag, rrup)
Compute distance scaling term (eq.3, page 319). The distance scaling assumes the near-source effect of local site conditions due to 50% very firm soil and soft rock and 50% firm rock.
4.695154
4.986691
0.941537
# flag for reverse faulting frv = float((dip > 45) and (22.5 <= rake <= 157.5)) # flag for thrust faulting fth = float((dip <= 45) and (22.5 <= rake <= 157.5)) return C['c10'] * frv + C['c11'] * fth
def _compute_faulting_mechanism(self, C, rake, dip)
Compute faulting mechanism term (see eq. 5, page 319). Reverse faulting is defined as occurring on steep faults (dip > 45) and rake in (22.5, 157.5). Thrust faulting is defined as occurring on shallow dipping faults (dip <=45) and rake in (22.5, 157.5)
4.55581
2.916337
1.562169
# eq. 8 (to be noticed that the USGS-NSHMP implementation defines # the hanging-wall term for all rjb distances, while in the original # manuscript, hw is computed only for rjb < 5). Again the 'firm rock' # is considered hw = np.zeros_like(rjb) if dip <= 70.: hw = (5. - rjb) / 5. # eq. 9 f_m = 1 if mag > 6.5 else mag - 5.5 # # eq. 10 f_rrup = C['c15'] + np.zeros_like(rrup) idx = rrup < 8 f_rrup[idx] *= rrup[idx] / 8 # eq. 7 (to be noticed that the f3 factor is not included # while this is defined in the original manuscript) f_hw = hw * f_m * f_rrup return f_hw
def _compute_hanging_wall_effect(self, C, rjb, rrup, dip, mag)
Compute hanging-wall effect (see eq. 7, 8, 9 and 10 page 319). Considers correct version of equation 8 as given in the erratum and not in the original paper.
7.293806
6.899342
1.057174
name = method.__name__ def newmethod(self): try: val = self.__dict__[name] except KeyError: val = method(self) self.__dict__[name] = val return val newmethod.__name__ = method.__name__ newmethod.__doc__ = method.__doc__ return property(newmethod)
def cached_property(method)
:param method: a method without arguments except self :returns: a cached property
1.870785
2.10667
0.888029
known = set() outlist = [] for key in keys: if key not in known: outlist.append(key) known.add(key) return outlist
def distinct(keys)
Return the distinct keys in order.
2.706611
2.517574
1.075087
assert b > 0, b return int(math.ceil(float(a) / b))
def ceil(a, b)
Divide a / b and return the biggest integer close to the quotient. :param a: a number :param b: a positive number :returns: the biggest integer close to the quotient
4.078199
5.860004
0.695938
if max_weight <= 0: raise ValueError('max_weight=%s' % max_weight) ws = WeightedSequence([]) prev_key = 'Unspecified' for item in items: w = weight(item) k = key(item) if w < 0: # error raise ValueError('The item %r got a negative weight %s!' % (item, w)) elif ws.weight + w > max_weight or k != prev_key: new_ws = WeightedSequence([(item, w)]) if ws: yield ws ws = new_ws elif w > 0: # ignore items with 0 weight ws.append((item, w)) prev_key = k if ws: yield ws
def block_splitter(items, max_weight, weight=lambda item: 1, key=nokey)
:param items: an iterator over items :param max_weight: the max weight to split on :param weight: a function returning the weigth of a given item :param key: a function returning the kind of a given item Group together items of the same kind until the total weight exceeds the `max_weight` and yield `WeightedSequence` instances. Items with weight zero are ignored. For instance >>> items = 'ABCDE' >>> list(block_splitter(items, 3)) [<WeightedSequence ['A', 'B', 'C'], weight=3>, <WeightedSequence ['D', 'E'], weight=2>] The default weight is 1 for all items. Here is an example leveraning on the key to group together results: >>> items = ['A1', 'C2', 'D2', 'E2'] >>> list(block_splitter(items, 2, key=operator.itemgetter(1))) [<WeightedSequence ['A1'], weight=1>, <WeightedSequence ['C2', 'D2'], weight=2>, <WeightedSequence ['E2'], weight=1>]
3.302151
3.461693
0.953912
assert number > 0, number assert num_slices > 0, num_slices blocksize = int(math.ceil(number / num_slices)) slices = [] start = 0 while True: stop = min(start + blocksize, number) slices.append(slice(start, stop)) if stop == number: break start += blocksize return slices
def split_in_slices(number, num_slices)
:param number: a positive number to split in slices :param num_slices: the number of slices to return (at most) :returns: a list of slices >>> split_in_slices(4, 2) [slice(0, 2, None), slice(2, 4, None)] >>> split_in_slices(5, 1) [slice(0, 5, None)] >>> split_in_slices(5, 2) [slice(0, 3, None), slice(3, 5, None)] >>> split_in_slices(2, 4) [slice(0, 1, None), slice(1, 2, None)]
2.087048
2.364756
0.882564
if isinstance(sequence, int): return split_in_slices(sequence, hint) elif hint in (0, 1) and key is nokey: # do not split return [sequence] elif hint in (0, 1): # split by key blocks = [] for k, group in groupby(sequence, key).items(): blocks.append(group) return blocks items = sorted(sequence, key=lambda item: (key(item), weight(item))) assert hint > 0, hint assert len(items) > 0, len(items) total_weight = float(sum(weight(item) for item in items)) return block_splitter(items, math.ceil(total_weight / hint), weight, key)
def split_in_blocks(sequence, hint, weight=lambda item: 1, key=nokey)
Split the `sequence` in a number of WeightedSequences close to `hint`. :param sequence: a finite sequence of items :param hint: an integer suggesting the number of subsequences to generate :param weight: a function returning the weigth of a given item :param key: a function returning the key of a given item The WeightedSequences are of homogeneous key and they try to be balanced in weight. For instance >>> items = 'ABCDE' >>> list(split_in_blocks(items, 3)) [<WeightedSequence ['A', 'B'], weight=2>, <WeightedSequence ['C', 'D'], weight=2>, <WeightedSequence ['E'], weight=1>]
3.406066
3.870628
0.879978
if isinstance(a, float) or isinstance(a, numpy.ndarray) and a.shape: # shortcut numpy.testing.assert_allclose(a, b, rtol, atol) return if isinstance(a, (str, bytes, int)): # another shortcut assert a == b, (a, b) return if hasattr(a, '_slots_'): # record-like objects assert a._slots_ == b._slots_ for x in a._slots_: assert_close(getattr(a, x), getattr(b, x), rtol, atol, x) return if hasattr(a, 'keys'): # dict-like objects assert a.keys() == b.keys() for x in a: if x != '__geom__': assert_close(a[x], b[x], rtol, atol, x) return if hasattr(a, '__dict__'): # objects with an attribute dictionary assert_close(vars(a), vars(b), context=a) return if hasattr(a, '__iter__'): # iterable objects xs, ys = list(a), list(b) assert len(xs) == len(ys), ('Lists of different lenghts: %d != %d' % (len(xs), len(ys))) for x, y in zip(xs, ys): assert_close(x, y, rtol, atol, x) return if a == b: # last attempt to avoid raising the exception return ctx = '' if context is None else 'in context ' + repr(context) raise AssertionError('%r != %r %s' % (a, b, ctx))
def assert_close(a, b, rtol=1e-07, atol=0, context=None)
Compare for equality up to a given precision two composite objects which may contain floats. NB: if the objects are or contain generators, they are exhausted. :param a: an object :param b: another object :param rtol: relative tolerance :param atol: absolute tolerance
2.583319
2.564836
1.007206
if dir is not None: if not os.path.exists(dir): os.makedirs(dir) fh, path = tempfile.mkstemp(dir=dir, prefix=prefix, suffix=suffix) _tmp_paths.append(path) if content: fh = os.fdopen(fh, "wb") if hasattr(content, 'encode'): content = content.encode('utf8') fh.write(content) fh.close() return path
def gettemp(content=None, dir=None, prefix="tmp", suffix="tmp")
Create temporary file with the given content. Please note: the temporary file must be deleted by the caller. :param string content: the content to write to the temporary file. :param string dir: directory where the file should be created :param string prefix: file name prefix :param string suffix: file name suffix :returns: a string with the path to the temporary file
2.008547
2.224368
0.902975
for path in _tmp_paths: if os.path.exists(path): # not removed yet try: os.remove(path) except PermissionError: pass
def removetmp()
Remove the temporary files created by gettemp
4.272044
4.698182
0.909297
# we assume that the .git folder is two levels above any package # i.e. openquake/engine/../../.git git_path = os.path.join(os.path.dirname(fname), '..', '..', '.git') # macOS complains if we try to execute git and it's not available. # Code will run, but a pop-up offering to install bloatware (Xcode) # is raised. This is annoying in end-users installations, so we check # if .git exists before trying to execute the git executable if os.path.isdir(git_path): try: gh = subprocess.check_output( ['git', 'rev-parse', '--short', 'HEAD'], stderr=open(os.devnull, 'w'), cwd=os.path.dirname(git_path)).strip() gh = "-git" + decode(gh) if gh else '' return gh except Exception: # trapping everything on purpose; git may not be installed or it # may not work properly pass return ''
def git_suffix(fname)
:returns: `<short git hash>` if Git repository found
7.173479
7.125527
1.006729
if args: code %= args try: out = subprocess.check_output([sys.executable, '-c', code]) except subprocess.CalledProcessError as exc: print(exc.cmd[-1], file=sys.stderr) raise if out: return eval(out, {}, {})
def run_in_process(code, *args)
Run in an external process the given Python code and return the output as a Python object. If there are arguments, then code is taken as a template and traditional string interpolation is performed. :param code: string or template describing Python code :param args: arguments to be used for interpolation :returns: the output of the process, as a Python object
3.366219
3.416717
0.98522
already_imported = set(sys.modules) mod_or_pkg = importlib.import_module(module_or_package) if not hasattr(mod_or_pkg, '__path__'): # is a simple module return set(sys.modules) - already_imported # else import all modules contained in the package [pkg_path] = mod_or_pkg.__path__ n = len(pkg_path) for cwd, dirs, files in os.walk(pkg_path): if all(os.path.basename(f) != '__init__.py' for f in files): # the current working directory is not a subpackage continue for f in files: if f.endswith('.py'): # convert PKGPATH/subpackage/module.py -> subpackage.module # works at any level of nesting modname = (module_or_package + cwd[n:].replace(os.sep, '.') + '.' + os.path.basename(f[:-3])) importlib.import_module(modname) return set(sys.modules) - already_imported
def import_all(module_or_package)
If `module_or_package` is a module, just import it; if it is a package, recursively imports all the modules it contains. Returns the names of the modules that were imported as a set. The set can be empty if the modules were already in sys.modules.
3.219366
3.283817
0.980373
assert packages, 'At least one package must be specified' import_package = 'from openquake.baselib.general import import_all\n' \ 'print(import_all("%s"))' % package imported_modules = run_in_process(import_package) for mod in imported_modules: for pkg in packages: if mod.startswith(pkg): raise CodeDependencyError('%s depends on %s' % (package, pkg))
def assert_independent(package, *packages)
:param package: Python name of a module/package :param packages: Python names of modules/packages Make sure the `package` does not depend from the `packages`.
4.278147
4.093308
1.045156
lst = module.split(".") pkg, submodule = lst[0], ".".join(lst[1:]) try: fileobj, filepath, descr = imp.find_module(pkg, syspath) except ImportError: return if submodule: # recursive search return search_module(submodule, [filepath]) return filepath
def search_module(module, syspath=sys.path)
Given a module name (possibly with dots) returns the corresponding filepath, or None, if the module cannot be found. :param module: (dotted) name of the Python module to look for :param syspath: a list of directories to search (default sys.path)
4.113458
4.323189
0.951487
kgroups = itertools.groupby(sorted(objects, key=key), key) return {k: reducegroup(group) for k, group in kgroups}
def groupby(objects, key, reducegroup=list)
:param objects: a sequence of objects with a key value :param key: the key function to extract the key value :param reducegroup: the function to apply to each group :returns: a dict {key value: map(reducegroup, group)} >>> groupby(['A1', 'A2', 'B1', 'B2', 'B3'], lambda x: x[0], ... lambda group: ''.join(x[1] for x in group)) {'A': '12', 'B': '123'}
3.757102
5.61622
0.668973
if isinstance(kfield, tuple): kgetter = operator.itemgetter(*kfield) else: kgetter = operator.itemgetter(kfield) if isinstance(vfield, tuple): vgetter = operator.itemgetter(*vfield) else: vgetter = operator.itemgetter(vfield) dic = groupby(records, kgetter, lambda rows: [vgetter(r) for r in rows]) return list(dic.items())
def groupby2(records, kfield, vfield)
:param records: a sequence of records with positional or named fields :param kfield: the index/name/tuple specifying the field to use as a key :param vfield: the index/name/tuple specifying the field to use as a value :returns: an list of pairs of the form (key, [value, ...]). >>> groupby2(['A1', 'A2', 'B1', 'B2', 'B3'], 0, 1) [('A', ['1', '2']), ('B', ['1', '2', '3'])] Here is an example where the keyfield is a tuple of integers: >>> groupby2(['A11', 'A12', 'B11', 'B21'], (0, 1), 2) [(('A', '1'), ['1', '2']), (('B', '1'), ['1']), (('B', '2'), ['1'])]
2.113641
2.542819
0.831219
for name, value in kw.items(): array = array[array[name] == value] return array
def get_array(array, **kw)
Extract a subarray by filtering on the given keyword arguments
3.981772
2.601238
1.530722
if array_or_none1 is None and array_or_none2 is None: return False elif array_or_none1 is None and array_or_none2 is not None: return True elif array_or_none1 is not None and array_or_none2 is None: return True if array_or_none1.shape != array_or_none2.shape: return True return (array_or_none1 != array_or_none2).any()
def not_equal(array_or_none1, array_or_none2)
Compare two arrays that can also be None or have diffent shapes and returns a boolean. >>> a1 = numpy.array([1]) >>> a2 = numpy.array([2]) >>> a3 = numpy.array([2, 3]) >>> not_equal(a1, a2) True >>> not_equal(a1, a3) True >>> not_equal(a1, None) True
1.38388
1.52682
0.906381
if nbytes == 0: return '0 B' i = 0 while nbytes >= 1024 and i < len(suffixes) - 1: nbytes /= 1024. i += 1 f = ('%.2f' % nbytes).rstrip('0').rstrip('.') return '%s %s' % (f, suffixes[i])
def humansize(nbytes, suffixes=('B', 'KB', 'MB', 'GB', 'TB', 'PB'))
Return file size in a human-friendly format
1.332913
1.283436
1.03855
msg = '%s.%s has been deprecated. %s' % ( func.__module__, func.__name__, msg) if not hasattr(func, 'called'): warnings.warn(msg, DeprecationWarning, stacklevel=2) func.called = 0 func.called += 1 return func(*args, **kw)
def deprecated(func, msg='', *args, **kw)
A family of decorators to mark deprecated functions. :param msg: the message to print the first time the deprecated function is used. Here is an example of usage: >>> @deprecated(msg='Use new_function instead') ... def old_function(): ... 'Do something' Notice that if the function is called several time, the deprecation warning will be displayed only the first time.
2.362278
3.358503
0.703372
assert 0 < reduction_factor <= 1, reduction_factor rnd = random.Random(seed) out = [] for obj in objects: if rnd.random() <= reduction_factor: out.append(obj) return out
def random_filter(objects, reduction_factor, seed=42)
Given a list of objects, returns a sublist by extracting randomly some elements. The reduction factor (< 1) tells how small is the extracted list compared to the original list.
2.758647
2.704845
1.019891
numpy.random.seed(seed) return numpy.histogram(numpy.random.random(counts), nbins, (0, 1))[0]
def random_histogram(counts, nbins, seed)
Distribute a total number of counts on a set of bins homogenously. >>> random_histogram(1, 2, 42) array([1, 0]) >>> random_histogram(100, 5, 42) array([28, 18, 17, 19, 18]) >>> random_histogram(10000, 5, 42) array([2043, 2015, 2050, 1930, 1962])
2.946641
6.63715
0.443962
indices = AccumDict(accum=[]) # idx -> [(start, stop), ...] start = 0 for i, vals in itertools.groupby(integers): n = sum(1 for val in vals) indices[i].append((start, start + n)) start += n return indices
def get_indices(integers)
:param integers: a sequence of integers (with repetitions) :returns: a dict integer -> [(start, stop), ...] >>> get_indices([0, 0, 3, 3, 3, 2, 2, 0]) {0: [(0, 2), (7, 8)], 3: [(2, 5)], 2: [(5, 7)]}
5.336184
5.915383
0.902086
new_args = [] # when stdout is redirected to a file, python 2 uses ascii for the writer; # python 3 uses what is configured in the system (i.e. 'utf-8') # if sys.stdout is replaced by a StringIO instance, Python 2 does not # have an attribute 'encoding', and we assume ascii in that case str_encoding = getattr(sys.stdout, 'encoding', None) or 'ascii' for s in args: new_args.append(s.encode('utf-8').decode(str_encoding, 'ignore')) return print(*new_args, **kwargs)
def safeprint(*args, **kwargs)
Convert and print characters using the proper encoding
5.199837
4.997266
1.040536
if hasattr(hostport, 'startswith'): # string representation of the hostport combination if hostport.startswith('tcp://'): hostport = hostport[6:] # strip tcp:// host, port = hostport.split(':') hostport = (host, int(port)) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: exc = sock.connect_ex(hostport) finally: sock.close() return False if exc else True
def socket_ready(hostport)
:param hostport: a pair (host, port) or a string (tcp://)host:port :returns: True if the socket is ready and False otherwise
2.628654
2.606596
1.008463
prefix = len(os.path.commonprefix([os.path.dirname(f) for f in fnames])) with zipfile.ZipFile( archive, mode, zipfile.ZIP_DEFLATED, allowZip64=True) as z: for f in fnames: log('Archiving %s' % f) z.write(f, f[prefix:]) if cleanup: # remove the zipped file os.remove(f) log('Generated %s' % archive) return archive
def zipfiles(fnames, archive, mode='w', log=lambda msg: None, cleanup=False)
Build a zip archive from the given file names. :param fnames: list of path names :param archive: path of the archive
2.499738
3.192151
0.783089
# see https://pagure.io/python-daemon/blob/master/f/daemon/daemon.py and # https://stackoverflow.com/questions/45911705/why-use-os-setsid-in-python def fork_then_exit_parent(): pid = os.fork() if pid: # in parent os._exit(0) fork_then_exit_parent() os.setsid() fork_then_exit_parent()
def detach_process()
Detach the current process from the controlling terminal by using a double fork. Can be used only on platforms with fork (no Windows).
4.043463
4.139364
0.976832
sys.stdout.write(msg) sys.stdout.flush() sys.stdout.write('\x08' * len(msg)) sys.stdout.flush()
def println(msg)
Convenience function to print messages on a single line in the terminal
2.284684
2.422348
0.943169
msg = templ % args if args else templ tmp = tempfile.gettempdir() with open(os.path.join(tmp, 'debug.txt'), 'a', encoding='utf8') as f: f.write(msg + '\n')
def debug(templ, *args)
Append a debug line to the file /tmp/debug.txt
2.685175
2.386333
1.12523
if not args: sys.stderr.write('WARNING: ' + msg) else: sys.stderr.write('WARNING: ' + msg % args)
def warn(msg, *args)
Print a warning on stderr
2.448314
2.222998
1.101357
''' Find the memory footprint of a Python object recursively, see https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609 :param o: the object :returns: the size in bytes ''' ids = ids or set() if id(o) in ids: return 0 nbytes = sys.getsizeof(o) ids.add(id(o)) if isinstance(o, Mapping): return nbytes + sum(getsizeof(k, ids) + getsizeof(v, ids) for k, v in o.items()) elif isinstance(o, Container): return nbytes + sum(getsizeof(x, ids) for x in o) return nbytes
def getsizeof(o, ids=None)
Find the memory footprint of a Python object recursively, see https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609 :param o: the object :returns: the size in bytes
2.663966
1.72855
1.541157
item, weight = item_weight self._seq.insert(i, item) self.weight += weight
def insert(self, i, item_weight)
Insert an item with the given weight in the sequence
6.081522
5.181479
1.173704
def decorator(func): for key in keys: self[key] = func return func return decorator
def add(self, *keys)
Return a decorator registering a new implementation for the CallableDict for the given keys.
3.951791
2.839437
1.391752
return self.__class__({key: func(value, *extras) for key, value in self.items()})
def apply(self, func, *extras)
>> a = AccumDict({'a': 1, 'b': 2}) >> a.apply(lambda x, y: 2 * x + y, 1) {'a': 3, 'b': 5}
4.193492
3.797237
1.104354
assert len(self.array) == len(array) arr = object.__new__(self.__class__) arr.dt = self.dt arr.slicedic = self.slicedic arr.array = array return arr
def new(self, array)
Convert an array of compatible length into a DictArray: >>> d = DictArray({'PGA': [0.01, 0.02, 0.04], 'PGV': [0.1, 0.2]}) >>> d.new(numpy.arange(0, 5, 1)) # array of lenght 5 = 3 + 2 <DictArray PGA: [0 1 2] PGV: [3 4]>
5.010815
6.040902
0.829481
# extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] imean = self._get_mean(C, rup, dists, sites) if imt.name in "SA PGA": # Convert units to g, # but only for PGA and SA (not PGV): mean = np.log((10.0 ** (imean - 2.0)) / g) else: # PGV: mean = np.log(10.0 ** imean) istddevs = self._get_stddevs(C, stddev_types, len(sites.vs30)) stddevs = np.log(10.0 ** np.array(istddevs)) return mean + self.adjustment_factor, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
4.262465
4.340137
0.982104
dmag = mag - self.CONSTS["Mh"] if mag < self.CONSTS["Mh"]: return C["e1"] + (C["b1"] * dmag) + (C["b2"] * (dmag ** 2.0)) else: return C["e1"] + (C["b3"] * dmag)
def _get_magnitude_scaling_term(self, C, mag)
Returns the magnitude scaling term of the GMPE described in equation 3
3.674091
3.46122
1.061502
r_adj = np.sqrt(rval ** 2.0 + C["h"] ** 2.0) return ( (C["c1"] + C["c2"] * (mag - self.CONSTS["Mref"])) * np.log10(r_adj / self.CONSTS["Rref"]) - (C["c3"] * (r_adj - self.CONSTS["Rref"])))
def _get_distance_scaling_term(self, C, rval, mag)
Returns the distance scaling term of the GMPE described in equation 2
3.704178
3.427003
1.08088
SS, NS, RS = 0.0, 0.0, 0.0 if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0: # strike-slip SS = 1.0 elif rup.rake > 30.0 and rup.rake < 150.0: # reverse RS = 1.0 else: # normal NS = 1.0 return (C["sofN"] * NS) + (C["sofR"] * RS) + (C["sofS"] * SS)
def _get_style_of_faulting_term(self, C, rup)
Returns the style-of-faulting term. Fault type (Strike-slip, Normal, Thrust/reverse) is derived from rake angle. Rakes angles within 30 of horizontal are strike-slip, angles from 30 to 150 are reverse, and angles from -30 to -150 are normal. Note that the 'Unspecified' case is not considered in this class as rake is required as an input variable
2.579466
2.222566
1.16058
return C["gamma"] * np.log10(vs30 / self.CONSTS["Vref"])
def _get_site_amplification_term(self, C, vs30)
Returns the site amplification term for the case in which Vs30 is used directly
11.840969
13.735375
0.862078
f_s = np.zeros_like(vs30) # Site class B idx = np.logical_and(vs30 < 800.0, vs30 >= 360.0) f_s[idx] = C["eB"] # Site Class C idx = np.logical_and(vs30 < 360.0, vs30 >= 180.0) f_s[idx] = C["eC"] # Site Class D idx = vs30 < 180.0 f_s[idx] = C["eD"] return f_s
def _get_site_amplification_term(self, C, vs30)
Returns the site amplification given Eurocode 8 site classification
2.240484
2.095175
1.069354
return (self._get_magnitude_scaling_term(C, rup.mag) + self._get_distance_scaling_term(C, dists.rjb, rup.mag) + self._get_site_amplification_term(C, sites.vs30))
def _get_mean(self, C, rup, dists, sites)
Returns the mean value of ground motion - noting that in this case the style-of-faulting term is neglected
2.907911
2.863605
1.015472
oq = dstore['oqparam'] L = len(oq.loss_dt().names) R = dstore['csm_info'].get_num_rlzs() serials = dstore['ruptures']['serial'] idx_by_ser = dict(zip(serials, range(len(serials)))) tbl = numpy.zeros((len(serials), L), F32) lbr = numpy.zeros((R, L), F32) # losses by rlz for rec in dstore['losses_by_event'].value: # call .value for speed idx = idx_by_ser[rec['eid'] // TWO32] tbl[idx] += rec['loss'] lbr[rec['rlzi']] += rec['loss'] return tbl, lbr
def build_loss_tables(dstore)
Compute the total losses by rupture and losses by rlzi.
7.626108
6.376886
1.195898
L = len(riskmodel.lti) epspath = param['epspath'] for ri in riskinputs: with monitor('getting hazard'): ri.hazard_getter.init() hazard = ri.hazard_getter.get_hazard() mon = monitor('build risk curves', measuremem=False) A = len(ri.aids) R = ri.hazard_getter.num_rlzs try: avg = numpy.zeros((A, R, L), F32) except MemoryError: raise MemoryError( 'Building array avg of shape (%d, %d, %d)' % (A, R, L)) result = dict(aids=ri.aids, avglosses=avg) acc = AccumDict() # accumulator eidx -> agglosses aid2idx = {aid: idx for idx, aid in enumerate(ri.aids)} if 'builder' in param: builder = param['builder'] P = len(builder.return_periods) all_curves = numpy.zeros((A, R, P), builder.loss_dt) # update the result dictionary and the agg array with each output for out in riskmodel.gen_outputs(ri, monitor, epspath, hazard): if len(out.eids) == 0: # this happens for sites with no events continue r = out.rlzi agglosses = numpy.zeros((len(out.eids), L), F32) for l, loss_type in enumerate(riskmodel.loss_types): loss_ratios = out[loss_type] if loss_ratios is None: # for GMFs below the minimum_intensity continue avalues = riskmodels.get_values(loss_type, ri.assets) for a, asset in enumerate(ri.assets): aval = avalues[a] aid = asset['ordinal'] idx = aid2idx[aid] ratios = loss_ratios[a] # length E # average losses avg[idx, r, l] = ( ratios.sum(axis=0) * param['ses_ratio'] * aval) # agglosses agglosses[:, l] += ratios * aval if 'builder' in param: with mon: # this is the heaviest part all_curves[idx, r][loss_type] = ( builder.build_curve(aval, ratios, r)) # NB: I could yield the agglosses per output, but then I would # have millions of small outputs with big data transfer and slow # saving time acc += dict(zip(out.eids, agglosses)) if 'builder' in param: clp = param['conditional_loss_poes'] result['curves-rlzs'], result['curves-stats'] = builder.pair( all_curves, param['stats']) if R > 1 and param['individual_curves'] is False: del result['curves-rlzs'] if clp: result['loss_maps-rlzs'], result['loss_maps-stats'] = ( builder.build_maps(all_curves, clp, param['stats'])) if R > 1 and param['individual_curves'] is False: del result['loss_maps-rlzs'] # store info about the GMFs, must be done at the end result['agglosses'] = (numpy.array(list(acc)), numpy.array(list(acc.values()))) yield result
def event_based_risk(riskinputs, riskmodel, param, monitor)
:param riskinputs: :class:`openquake.risklib.riskinput.RiskInput` objects :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: a dictionary of parameters :param monitor: :class:`openquake.baselib.performance.Monitor` instance :returns: a dictionary of numpy arrays of shape (L, R)
5.993564
5.905738
1.014871
name = 'table%d' % next(tablecounter) return HtmlTable([map(str, row) for row in header_rows], name).render()
def html(header_rows)
Convert a list of tuples describing a table into a HTML string
8.0272
7.58498
1.058302
templ = ''' <div id="tabs"> <ul> %s </ul> %s </div>''' lis = [] contents = [] for i, (tag_id, status, tag_content) in enumerate( zip(tag_ids, tag_status, tag_contents), 1): mark = '.' if status == 'complete' else '!' lis.append('<li><a href="#tabs-%d">%s%s</a></li>' % (i, tag_id, mark)) contents.append('<div id="tabs-%d">%s</div>' % ( i, tag_content)) return templ % ('\n'.join(lis), '\n'.join(contents))
def make_tabs(tag_ids, tag_status, tag_contents)
Return a HTML string containing all the tabs we want to display
2.167095
2.139637
1.012833
if isodate == 'today': isodate = date.today() else: isodate = date(*time.strptime(isodate, '%Y-%m-%d')[:3]) isodate1 = isodate + timedelta(1) # +1 day tag_ids = [] tag_status = [] tag_contents = [] # the fetcher returns an header which is stripped with [1:] jobs = dbcmd( 'fetch', ALL_JOBS, isodate.isoformat(), isodate1.isoformat()) page = '<h2>%d job(s) finished before midnight of %s</h2>' % ( len(jobs), isodate) for job_id, user, status, ds_calc in jobs: tag_ids.append(job_id) tag_status.append(status) [stats] = dbcmd('fetch', JOB_STATS, job_id) (job_id, user, start_time, stop_time, status, duration) = stats try: ds = read(job_id, datadir=os.path.dirname(ds_calc)) txt = view_fullreport('fullreport', ds) report = html_parts(txt) except Exception as exc: report = dict( html_title='Could not generate report: %s' % cgi.escape( str(exc), quote=True), fragment='') page = report['html_title'] page += html([stats._fields, stats]) page += report['fragment'] tag_contents.append(page) page = make_tabs(tag_ids, tag_status, tag_contents) + ( 'Report last updated: %s' % datetime.now()) fname = 'jobs-%s.html' % isodate with open(fname, 'w') as f: f.write(PAGE_TEMPLATE % page) return fname
def make_report(isodate='today')
Build a HTML report with the computations performed at the given isodate. Return the name of the report, which is saved in the current directory.
4.999781
4.85687
1.029424
E = param['E'] L = len(riskmodel.loss_types) result = dict(agg=numpy.zeros((E, L), F32), avg=[], all_losses=AccumDict(accum={})) for ri in riskinputs: for out in riskmodel.gen_outputs(ri, monitor, param['epspath']): r = out.rlzi weight = param['weights'][r] slc = param['event_slice'](r) for l, loss_type in enumerate(riskmodel.loss_types): losses = out[loss_type] if numpy.product(losses.shape) == 0: # happens for all NaNs continue stats = numpy.zeros(len(ri.assets), stat_dt) # mean, stddev for a, asset in enumerate(ri.assets): stats['mean'][a] = losses[a].mean() stats['stddev'][a] = losses[a].std(ddof=1) result['avg'].append((l, r, asset['ordinal'], stats[a])) agglosses = losses.sum(axis=0) # shape num_gmfs result['agg'][slc, l] += agglosses * weight if param['asset_loss_table']: aids = ri.assets['ordinal'] result['all_losses'][l, r] += AccumDict(zip(aids, losses)) return result
def scenario_risk(riskinputs, riskmodel, param, monitor)
Core function for a scenario computation. :param riskinput: a of :class:`openquake.risklib.riskinput.RiskInput` object :param riskmodel: a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance :param param: dictionary of extra parameters :param monitor: :class:`openquake.baselib.performance.Monitor` instance :returns: a dictionary { 'agg': array of shape (E, L, R, 2), 'avg': list of tuples (lt_idx, rlz_idx, asset_ordinal, statistics) } where E is the number of simulated events, L the number of loss types, R the number of realizations and statistics is an array of shape (n, R, 4), with n the number of assets in the current riskinput object
6.38099
5.257714
1.213643
if config.dbserver.multi_user and getpass.getuser() != 'openquake': sys.exit('oq workers only works in single user mode') master = workerpool.WorkerMaster(config.dbserver.host, **config.zworkers) print(getattr(master, cmd)())
def workers(cmd)
start/stop/restart the workers, or return their status
14.841114
13.976057
1.061896
return EvenlyDiscretizedMFD(self.mmin + self.bin_width / 2., self.bin_width, self.occurrence_rate.tolist())
def to_evenly_discretized_mfd(self)
Returns the activity rate as an instance of the :class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD
4.952557
4.662654
1.062175
'''Returns the default upper and lower depth values if not in dictionary :param input_dict: Dictionary corresponding to the kwargs dictionary of calling function :returns: 'upper_depth': Upper seismogenic depth (float) 'lower_depth': Lower seismogenic depth (float) ''' if ('upper_depth' in input_dict.keys()) and input_dict['upper_depth']: if input_dict['upper_depth'] < 0.: raise ValueError('Upper seismogenic depth must be positive') else: upper_depth = input_dict['upper_depth'] else: upper_depth = 0.0 if ('lower_depth' in input_dict.keys()) and input_dict['lower_depth']: if input_dict['lower_depth'] < upper_depth: raise ValueError('Lower depth must take a greater value than' ' upper depth!') else: lower_depth = input_dict['lower_depth'] else: lower_depth = np.inf return upper_depth, lower_depth
def _check_depth_limits(input_dict)
Returns the default upper and lower depth values if not in dictionary :param input_dict: Dictionary corresponding to the kwargs dictionary of calling function :returns: 'upper_depth': Upper seismogenic depth (float) 'lower_depth': Lower seismogenic depth (float)
2.613053
1.751053
1.492276
''' As the decimal time function requires inputs in the form of numpy arrays need to convert each value in the datetime object to a single numpy array ''' # Get decimal seconds from seconds + microseconds temp_seconds = np.float(time.second) + (np.float(time.microsecond) / 1.0E6) return decimal_time(np.array([time.year], dtype=int), np.array([time.month], dtype=int), np.array([time.day], dtype=int), np.array([time.hour], dtype=int), np.array([time.minute], dtype=int), np.array([temp_seconds], dtype=int))
def _get_decimal_from_datetime(time)
As the decimal time function requires inputs in the form of numpy arrays need to convert each value in the datetime object to a single numpy array
3.654263
2.096639
1.742915
''' Method to post-process the catalogue based on the selection options :param numpy.ndarray valid_id: Boolean vector indicating whether each event is selected (True) or not (False) :returns: Catalogue of selected events as instance of openquake.hmtk.seismicity.catalogue.Catalogue class ''' if not np.any(valid_id): # No events selected - create clean instance of class output = Catalogue() output.processes = self.catalogue.processes elif np.all(valid_id): if self.copycat: output = deepcopy(self.catalogue) else: output = self.catalogue else: if self.copycat: output = deepcopy(self.catalogue) else: output = self.catalogue output.purge_catalogue(valid_id) return output
def select_catalogue(self, valid_id)
Method to post-process the catalogue based on the selection options :param numpy.ndarray valid_id: Boolean vector indicating whether each event is selected (True) or not (False) :returns: Catalogue of selected events as instance of openquake.hmtk.seismicity.catalogue.Catalogue class
3.816234
2.149209
1.775646
''' Select earthquakes within polygon :param polygon: Centre point as instance of nhlib.geo.polygon.Polygon class :param float distance: Buffer distance (km) (can take negative values) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' if distance: # If a distance is specified then dilate the polyon by distance zone_polygon = polygon.dilate(distance) else: zone_polygon = polygon # Make valid all events inside depth range upper_depth, lower_depth = _check_depth_limits(kwargs) valid_depth = np.logical_and( self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth) # Events outside polygon returned to invalid assignment catalogue_mesh = Mesh(self.catalogue.data['longitude'], self.catalogue.data['latitude'], self.catalogue.data['depth']) valid_id = np.logical_and(valid_depth, zone_polygon.intersects(catalogue_mesh)) return self.select_catalogue(valid_id)
def within_polygon(self, polygon, distance=None, **kwargs)
Select earthquakes within polygon :param polygon: Centre point as instance of nhlib.geo.polygon.Polygon class :param float distance: Buffer distance (km) (can take negative values) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
5.294781
3.124508
1.694597
''' Select earthquakes within a distance from a Point :param point: Centre point as instance of nhlib.geo.point.Point class :param float distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' if kwargs['distance_type'] is 'epicentral': locations = Mesh( self.catalogue.data['longitude'], self.catalogue.data['latitude'], np.zeros(len(self.catalogue.data['longitude']), dtype=float)) point = Point(point.longitude, point.latitude, 0.0) else: locations = self.catalogue.hypocentres_as_mesh() is_close = point.closer_than(locations, distance) return self.select_catalogue(is_close)
def circular_distance_from_point(self, point, distance, **kwargs)
Select earthquakes within a distance from a Point :param point: Centre point as instance of nhlib.geo.point.Point class :param float distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
5.106774
2.608767
1.957544
''' Select earthquakes from within a square centered on a point :param point: Centre point as instance of nhlib.geo.point.Point class :param distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` class containing only selected events ''' point_surface = Point(point.longitude, point.latitude, 0.) # As distance is north_point = point_surface.point_at(distance, 0., 0.) east_point = point_surface.point_at(distance, 0., 90.) south_point = point_surface.point_at(distance, 0., 180.) west_point = point_surface.point_at(distance, 0., 270.) is_long = np.logical_and( self.catalogue.data['longitude'] >= west_point.longitude, self.catalogue.data['longitude'] < east_point.longitude) is_surface = np.logical_and( is_long, self.catalogue.data['latitude'] >= south_point.latitude, self.catalogue.data['latitude'] < north_point.latitude) upper_depth, lower_depth = _check_depth_limits(kwargs) is_valid = np.logical_and( is_surface, self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth) return self.select_catalogue(is_valid)
def cartesian_square_centred_on_point(self, point, distance, **kwargs)
Select earthquakes from within a square centered on a point :param point: Centre point as instance of nhlib.geo.point.Point class :param distance: Distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` class containing only selected events
2.870654
2.040558
1.406799