code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
''' Select events within a Joyner-Boore distance of a fault :param surface: Fault surface as instance of nhlib.geo.surface.base.SimpleFaultSurface or as instance of nhlib.geo.surface.ComplexFaultSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' upper_depth, lower_depth = _check_depth_limits(kwargs) rjb = surface.get_joyner_boore_distance( self.catalogue.hypocentres_as_mesh()) is_valid = np.logical_and( rjb <= distance, np.logical_and(self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth)) return self.select_catalogue(is_valid)
def within_joyner_boore_distance(self, surface, distance, **kwargs)
Select events within a Joyner-Boore distance of a fault :param surface: Fault surface as instance of nhlib.geo.surface.base.SimpleFaultSurface or as instance of nhlib.geo.surface.ComplexFaultSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
4.965473
2.377896
2.088179
''' Select events within a rupture distance from a fault surface :param surface: Fault surface as instance of nhlib.geo.surface.base.BaseSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' # Check for upper and lower depths upper_depth, lower_depth = _check_depth_limits(kwargs) rrupt = surface.get_min_distance(self.catalogue.hypocentres_as_mesh()) is_valid = np.logical_and( rrupt <= distance, np.logical_and(self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth)) return self.select_catalogue(is_valid)
def within_rupture_distance(self, surface, distance, **kwargs)
Select events within a rupture distance from a fault surface :param surface: Fault surface as instance of nhlib.geo.surface.base.BaseSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
4.708744
2.81672
1.671711
''' Select earthquakes occurring within a given time period :param start_time: Earliest time (as datetime.datetime object) :param end_time: Latest time (as datetime.datetime object) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' time_value = self.catalogue.get_decimal_time() if not start_time: if not end_time: # No times input, therefore skip everything and return catalog return self.catalogue else: start_time = np.min(self.catalogue.data['year']) else: start_time = _get_decimal_from_datetime(start_time) if not end_time: end_time = _get_decimal_from_datetime(datetime.now()) else: end_time = _get_decimal_from_datetime(end_time) # Get decimal time values time_value = self.catalogue.get_decimal_time() is_valid = np.logical_and(time_value >= start_time, time_value < end_time) return self.select_catalogue(is_valid)
def within_time_period(self, start_time=None, end_time=None)
Select earthquakes occurring within a given time period :param start_time: Earliest time (as datetime.datetime object) :param end_time: Latest time (as datetime.datetime object) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
2.93198
2.222681
1.319119
''' Selects events within a specified depth range :param float lower_depth: Lower depth for consideration :param float upper_depth: Upper depth for consideration :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events ''' if not lower_depth: if not upper_depth: # No limiting depths defined - so return entire catalogue! return self.catalogue else: lower_depth = np.inf if not upper_depth: upper_depth = 0.0 is_valid = np.logical_and(self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth) return self.select_catalogue(is_valid)
def within_depth_range(self, lower_depth=None, upper_depth=None)
Selects events within a specified depth range :param float lower_depth: Lower depth for consideration :param float upper_depth: Upper depth for consideration :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
3.272588
2.316366
1.412811
''' :param float lower_mag: Lower magnitude for consideration :param float upper_mag: Upper magnitude for consideration :returns: Instance of openquake.hmtk.seismicity.catalogue.Catalogue class containing only selected events ''' if not lower_mag: if not upper_mag: # No limiting magnitudes defined - return entire catalogue! return self.catalogue else: lower_mag = -np.inf if not upper_mag: upper_mag = np.inf is_valid = np.logical_and( self.catalogue.data['magnitude'] >= lower_mag, self.catalogue.data['magnitude'] < upper_mag) return self.select_catalogue(is_valid)
def within_magnitude_range(self, lower_mag=None, upper_mag=None)
:param float lower_mag: Lower magnitude for consideration :param float upper_mag: Upper magnitude for consideration :returns: Instance of openquake.hmtk.seismicity.catalogue.Catalogue class containing only selected events
3.196357
2.201318
1.452019
num_clust = np.max(vcl) cluster_set = [] for clid in range(0, num_clust + 1): idx = np.where(vcl == clid)[0] cluster_cat = deepcopy(self.catalogue) cluster_cat.select_catalogue_events(idx) cluster_set.append((clid, cluster_cat)) return dict(cluster_set)
def create_cluster_set(self, vcl)
For a given catalogue and list of cluster IDs this function splits the catalogue into a dictionary containing an individual catalogue of events within each cluster :param numpy.ndarray vcl: Cluster ID list :returns: Dictionary of instances of the :class: openquake.hmtk.seismicity.catalogue.Catalogue, where each instance if the catalogue of each cluster
3.304651
3.114629
1.06101
is_valid = np.logical_and( self.catalogue.data['longitude'] >= limits[0], np.logical_and(self.catalogue.data['longitude'] <= limits[2], np.logical_and( self.catalogue.data['latitude'] >= limits[1], self.catalogue.data['latitude'] <= limits[3]))) return self.select_catalogue(is_valid)
def within_bounding_box(self, limits)
Selects the earthquakes within a bounding box. :parameter limits: A list or a numpy array with four elements in the following order: - min x (longitude) - min y (latitude) - max x (longitude) - max y (latitude) :returns: Returns a :class:htmk.seismicity.catalogue.Catalogue` instance
2.53593
2.302256
1.101498
datadir = os.environ.get('OQ_DATADIR') if not datadir: shared_dir = config.directory.shared_dir if shared_dir: datadir = os.path.join(shared_dir, getpass.getuser(), 'oqdata') else: # use the home of the user datadir = os.path.join(os.path.expanduser('~'), 'oqdata') return datadir
def get_datadir()
Extracts the path of the directory where the openquake data are stored from the environment ($OQ_DATADIR) or from the shared_dir in the configuration file.
3.116903
2.418192
1.288939
datadir = datadir or get_datadir() if not os.path.exists(datadir): return [] calc_ids = set() for f in os.listdir(datadir): mo = re.match(CALC_REGEX, f) if mo: calc_ids.add(int(mo.group(2))) return sorted(calc_ids)
def get_calc_ids(datadir=None)
Extract the available calculation IDs from the datadir, in order.
2.152604
1.982213
1.08596
datadir = datadir or get_datadir() calcs = get_calc_ids(datadir) if not calcs: return 0 return calcs[-1]
def get_last_calc_id(datadir=None)
Extract the latest calculation ID from the given directory. If none is found, return 0.
2.949256
2.708882
1.088735
datadir = datadir or get_datadir() calc_id = get_last_calc_id(datadir) + 1 fname = os.path.join(datadir, 'calc_%d.hdf5' % calc_id) new = hdf5.File(fname, 'w') new.path = fname return new
def hdf5new(datadir=None)
Return a new `hdf5.File by` instance with name determined by the last calculation in the datadir (plus one). Set the .path attribute to the generated filename.
3.084782
2.552432
1.208566
datadir = datadir or get_datadir() try: calc_id = int(filename) except ValueError: filename = os.path.abspath(filename) datadir = os.path.dirname(filename) mo = re.match(CALC_REGEX, os.path.basename(filename)) if mo is None: raise ValueError('Cannot extract calc_id from %s' % filename) calc_id = int(mo.group(2)) return calc_id, datadir
def extract_calc_id_datadir(filename, datadir=None)
Extract the calculation ID from the given filename or integer: >>> extract_calc_id_datadir('/mnt/ssd/oqdata/calc_25.hdf5') (25, '/mnt/ssd/oqdata') >>> extract_calc_id_datadir('/mnt/ssd/oqdata/wrong_name.hdf5') Traceback (most recent call last): ... ValueError: Cannot extract calc_id from /mnt/ssd/oqdata/wrong_name.hdf5
2.209175
2.469721
0.894504
datadir = datadir or get_datadir() dstore = DataStore(calc_id, datadir, mode=mode) try: hc_id = dstore['oqparam'].hazard_calculation_id except KeyError: # no oqparam hc_id = None if hc_id: dstore.parent = read(hc_id, datadir=os.path.dirname(dstore.filename)) return dstore
def read(calc_id, mode='r', datadir=None)
:param calc_id: calculation ID or filename :param mode: 'r' or 'w' :param datadir: the directory where to look :returns: the corresponding DataStore instance Read the datastore, if it exists and it is accessible.
4.606495
4.504827
1.022569
if self.hdf5 == (): # not already open kw = dict(mode=mode, libver='latest') if mode == 'r': kw['swmr'] = True try: self.hdf5 = hdf5.File(self.filename, **kw) except OSError as exc: raise OSError('%s in %s' % (exc, self.filename))
def open(self, mode)
Open the underlying .hdf5 file and the parent, if any
3.674115
3.342254
1.099293
try: obj = h5py.File.__getitem__(self.hdf5, key) except KeyError: if self.parent != (): return self.parent.get_attr(key, name, default) else: raise try: return obj.attrs[name] except KeyError: if default is None: raise return default
def get_attr(self, key, name, default=None)
:param key: dataset path :param name: name of the attribute :param default: value to return if the attribute is missing
3.284578
3.21676
1.021083
try: dset = h5py.File.__getitem__(self.hdf5, key) except KeyError: if self.parent != (): dset = h5py.File.__getitem__(self.parent.hdf5, key) else: raise return dict(dset.attrs)
def get_attrs(self, key)
:param key: dataset path :returns: dictionary of attributes for that path
3.850148
3.535444
1.089014
return hdf5.create( self.hdf5, key, dtype, shape, compression, fillvalue, attrs)
def create_dset(self, key, dtype, shape=(None,), compression=None, fillvalue=0, attrs=None)
Create a one-dimensional HDF5 dataset. :param key: name of the dataset :param dtype: dtype of the dataset (usually composite) :param shape: shape of the dataset, possibly extendable :param compression: the kind of HDF5 compression to use :param attrs: dictionary of attributes of the dataset :returns: a HDF5 dataset
4.431398
6.953101
0.637327
try: dset = self.hdf5[key] except KeyError: dset = hdf5.create(self.hdf5, key, array.dtype, shape=(None,) + array.shape[1:]) hdf5.extend(dset, array) for k, v in attrs.items(): dset.attrs[k] = v return dset
def extend(self, key, array, **attrs)
Extend the dataset associated to the given key; create it if needed :param key: name of the dataset :param array: array to store :param attrs: a dictionary of attributes
2.628594
2.99429
0.877869
if key not in self: obj = hdf5.LiteralAttrs() else: obj = self[key] vars(obj).update(kw) self[key] = obj self.flush()
def save(self, key, kw)
Update the object associated to `key` with the `kw` dictionary; works for LiteralAttrs objects and automatically flushes.
5.745552
3.495337
1.643776
# removing inner slashed to avoid creating intermediate directories name, ext = relname.replace('/', '-').rsplit('.', 1) newname = '%s_%s.%s' % (name, self.calc_id, ext) if export_dir is None: export_dir = self.export_dir return os.path.join(export_dir, newname)
def export_path(self, relname, export_dir=None)
Return the path of the exported file by adding the export_dir in front, the calculation ID at the end. :param relname: relative file name :param export_dir: export directory (if None use .export_dir)
4.189471
4.043695
1.03605
if hasattr(postfix, 'sm_lt_path'): # is a realization fname = '%s-rlz-%03d.%s' % (prefix, postfix.ordinal, fmt) else: fname = prefix + ('-%s' % postfix if postfix else '') + '.' + fmt return self.export_path(fname, export_dir)
def build_fname(self, prefix, postfix, fmt, export_dir=None)
Build a file name from a realization, by using prefix and extension. :param prefix: the prefix to use :param postfix: the postfix to use (can be a realization object) :param fmt: the extension ('csv', 'xml', etc) :param export_dir: export directory (if None use .export_dir) :returns: relative pathname including the extension
7.166985
5.939987
1.206566
if self.parent != (): self.parent.flush() if self.hdf5: # is open self.hdf5.flush()
def flush(self)
Flush the underlying hdf5 file
10.432878
7.916738
1.317825
if self.parent != (): self.parent.flush() self.parent.close() if self.hdf5: # is open self.hdf5.flush() self.hdf5.close() self.hdf5 = ()
def close(self)
Close the underlying hdf5 file
5.442894
4.5593
1.1938
if key is None: return os.path.getsize(self.filename) return hdf5.ByteCounter.get_nbytes( h5py.File.__getitem__(self.hdf5, key))
def getsize(self, key=None)
Return the size in byte of the output associated to the given key. If no key is given, returns the total size of all files.
6.509826
6.258111
1.040222
if isinstance(value, (list, tuple)) and isinstance(value[0], str): return encode(value) return value
def maybe_encode(value)
If value is a sequence of strings, encode it
3.715611
3.071349
1.209765
if shape[0] is None: # extendable dataset dset = hdf5.create_dataset( name, (0,) + shape[1:], dtype, chunks=True, maxshape=shape, compression=compression) else: # fixed-shape dataset dset = hdf5.create_dataset(name, shape, dtype, fillvalue=fillvalue, compression=compression) if attrs: for k, v in attrs.items(): dset.attrs[k] = maybe_encode(v) return dset
def create(hdf5, name, dtype, shape=(None,), compression=None, fillvalue=0, attrs=None)
:param hdf5: a h5py.File object :param name: an hdf5 key string :param dtype: dtype of the dataset (usually composite) :param shape: shape of the dataset (can be extendable) :param compression: None or 'gzip' are recommended :param attrs: dictionary of attributes of the dataset :returns: a HDF5 dataset
2.360765
2.4301
0.971468
length = len(dset) if len(array) == 0: return length newlength = length + len(array) if array.dtype.name == 'object': # vlen array shape = (newlength,) + preshape(array[0]) else: shape = (newlength,) + array.shape[1:] dset.resize(shape) dset[length:newlength] = array for key, val in attrs.items(): dset.attrs[key] = val return newlength
def extend(dset, array, **attrs)
Extend an extensible dataset with an array of a compatible dtype. :param dset: an h5py dataset :param array: an array of length L :returns: the total length of the dataset (i.e. initial length + L)
3.025543
3.139657
0.963654
with h5py.File(filename) as h5: try: dset = h5[key] except KeyError: if array.dtype.name == 'object': # vlen array shape = (None,) + preshape(array[0]) else: shape = (None,) + array.shape[1:] dset = create(h5, key, array.dtype, shape) length = extend(dset, array) for key, val in attrs.items(): dset.attrs[key] = val h5.flush() return length
def extend3(filename, key, array, **attrs)
Extend an HDF5 file dataset with the given array
3.122638
3.164783
0.986683
modname, clsname = dotname.rsplit('.', 1) return getattr(importlib.import_module(modname), clsname)
def dotname2cls(dotname)
The class associated to the given dotname (i.e. `pkg.subpkg.mod.cls`)
2.29221
2.597623
0.882426
if 'nbytes' in dset.attrs: # look if the dataset has an attribute nbytes return dset.attrs['nbytes'] elif hasattr(dset, 'dtype'): # else extract nbytes from the underlying array return dset.size * numpy.zeros(1, dset.dtype).nbytes
def get_nbytes(dset)
If the dataset has an attribute 'nbytes', return it. Otherwise get the size of the underlying array. Returns None if the dataset is actually a group.
4.564229
3.868791
1.179756
ls = [] for el in lst: try: ls.append(el.encode('utf-8')) except AttributeError: ls.append(el) return numpy.array(ls, vstr)
def array_of_vstr(lst)
:param lst: a list of strings or bytes :returns: an array of variable length ASCII strings
3.12429
3.443279
0.907359
out = [] for val in values: try: out.append(val.decode('utf8')) except AttributeError: out.append(val) return out
def decode_array(values)
Decode the values which are bytestrings.
2.48258
2.258223
1.099351
shp = list(dset.shape) if len(shp) != len(d_slices): raise ValueError('Array with %d dimensions but %d slices' % (len(shp), len(d_slices))) sizes = [] slices = [] for i, slc in enumerate(d_slices): if slc == slice(None): size = shp[i] slices.append([slice(None)]) elif hasattr(slc, 'start'): size = slc.stop - slc.start slices.append([slice(slc.start, slc.stop, 0)]) elif isinstance(slc, list): size = len(slc) slices.append([slice(s, s + 1, j) for j, s in enumerate(slc)]) elif isinstance(slc, Number): size = 1 slices.append([slice(slc, slc + 1, 0)]) else: size = shp[i] slices.append([slc]) sizes.append(size) array = numpy.zeros(sizes, dset.dtype) for tup in itertools.product(*slices): aidx = tuple(s if s.step is None else slice(s.step, s.step + s.stop - s.start) for s in tup) sel = tuple(s if s.step is None else slice(s.start, s.stop) for s in tup) array[aidx] = dset[sel] return array
def extract(dset, *d_slices)
:param dset: a D-dimensional dataset or array :param d_slices: D slice objects (or similar) :returns: a reduced D-dimensional array >>> a = numpy.array([[1, 2, 3], [4, 5, 6]]) # shape (2, 3) >>> extract(a, slice(None), 1) array([[2], [5]]) >>> extract(a, [0, 1], slice(1, 3)) array([[2, 3], [5, 6]])
2.133955
2.219102
0.96163
fh, path = tempfile.mkstemp(suffix='.hdf5') os.close(fh) self = cls(path, 'w') self.path = path return self
def temporary(cls)
Returns a temporary hdf5 file, open for writing. The temporary name is stored in the .path attribute. It is the user responsability to remove the file when closed.
4.040938
2.973662
1.35891
shape = (None,) + data[0].shape[:-1] try: dset = self[key] except KeyError: vdt = h5py.special_dtype(vlen=data[0].dtype) dset = create(self, key, vdt, shape, fillvalue=None) nbytes = dset.attrs.get('nbytes', 0) totlen = dset.attrs.get('totlen', 0) for i, val in enumerate(data): nbytes += val.nbytes totlen += len(val) length = len(dset) dset.resize((length + len(data),) + shape[1:]) for i, arr in enumerate(data): dset[length + i] = arr dset.attrs['nbytes'] = nbytes dset.attrs['totlen'] = totlen
def save_vlen(self, key, data)
Save a sequence of variable-length arrays :param key: name of the dataset :param data: data to store as a list of arrays
2.467288
2.645107
0.932775
obj = super().__getitem__(key) if nbytes is not None: # size set from outside obj.attrs['nbytes'] = nbytes else: # recursively determine the size of the datagroup obj.attrs['nbytes'] = nbytes = ByteCounter.get_nbytes(obj) return nbytes
def set_nbytes(self, key, nbytes=None)
Set the `nbytes` attribute on the HDF5 object identified by `key`.
6.245479
5.526163
1.130165
setitem = super().__setitem__ getitem = super().__getitem__ tag = nodedict['tag'] text = nodedict.get('text', None) if hasattr(text, 'strip'): text = text.strip() attrib = nodedict.get('attrib', {}) path = '/'.join([root, tag]) nodes = nodedict.get('nodes', []) if text not in ('', None): # text=0 is stored try: setitem(path, text) except Exception as exc: sys.stderr.write('%s: %s\n' % (path, exc)) raise elif attrib and not nodes: setitem(path, numpy.nan) for subdict in _resolve_duplicates(nodes): self.save(subdict, path) if attrib: dset = getitem(path) for k, v in attrib.items(): dset.attrs[k] = maybe_encode(v)
def save(self, nodedict, root='')
Save a node dictionary in the .hdf5 file, starting from the root dataset. A common application is to convert XML files into .hdf5 files, see the usage in :mod:`openquake.commands.to_hdf5`. :param nodedict: a dictionary with keys 'tag', 'attrib', 'text', 'nodes'
3.69443
3.534039
1.045384
with File(path, 'w') as f: for key, val in vars(self).items(): assert val is not None, key # sanity check try: f[key] = maybe_encode(val) except ValueError as err: if 'Object header message is too large' in str(err): logging.error(str(err)) for k, v in extra.items(): f.attrs[k] = maybe_encode(v)
def save(self, path, **extra)
:param path: an .hdf5 pathname :param extra: extra attributes to be saved in the file
4.254668
3.874105
1.098232
shape = self.shape # the tagnames are bytestrings so they must be decoded tagnames = decode_array(self.tagnames) if len(shape) == len(tagnames): return [tagnames + ['value']] + self._to_table() elif len(shape) == len(tagnames) + 1: # there is an extra field tbl = [tagnames + [self.extra[0], 'value']] return tbl + self._to_table(self.extra[1:]) else: raise TypeError( 'There are %d dimensions but only %d tagnames' % (len(shape), len(tagnames)))
def to_table(self)
Convert an ArrayWrapper with shape (D1, ..., DN) and attributes T1, ..., TN which are list of tags of lenghts D1, ... DN into a table with rows (tag1, ... tagN, value) of maximum length D1 * ... * DN. Zero values are discarded. >>> from pprint import pprint >>> dic = dict(tagnames=['taxonomy', 'occupancy'], ... taxonomy=['?', 'RC', 'WOOD'], ... occupancy=['?', 'RES', 'IND', 'COM']) >>> arr = numpy.zeros((2, 3)) >>> arr[0, 0] = 2000 >>> arr[0, 1] = 5000 >>> arr[1, 0] = 500 >>> pprint(ArrayWrapper(arr, dic).to_table()) [['taxonomy', 'occupancy', 'value'], ['RC', 'RES', 2000.0], ['RC', 'IND', 5000.0], ['WOOD', 'RES', 500.0]]
4.447888
4.515378
0.985053
mean, stds = self._get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) stddevs = [np.ones(len(dists.repi))*get_sigma(imt)] return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See documentation for method `GroundShakingIntensityModel` in :class:~`openquake.hazardlib.gsim.base.GSIM`
3.979118
3.851596
1.033109
delta = np.maximum((0.1-0.001*dists.repi), np.zeros_like(dists.repi)) return delta
def _get_delta(self, stds, dists)
Computes the additional delta to be used for the computation of the upp and low models
10.067355
8.98277
1.120741
# distances distsl = copy.copy(dists) distsl.rjb, distsl.rrup = \ utils.get_equivalent_distances_east(rup.mag, dists.repi) # # Pezeshk et al. 2011 - Rrup mean1, stds1 = super().get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types) mean1 = self.apply_correction_to_BC(mean1, imt, distsl) # # Atkinson 2008 - Rjb gmpe = Atkinson2008prime() mean2, stds2 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types) # # Silva et al. 2002 - Rjb gmpe = SilvaEtAl2002SingleCornerSaturation() mean4, stds4 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types) mean4 = self.apply_correction_to_BC(mean4, imt, distsl) # # Silva et al. 2002 - Rjb gmpe = SilvaEtAl2002DoubleCornerSaturation() mean5, stds5 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types) mean5 = self.apply_correction_to_BC(mean5, imt, distsl) # # distances distsl.rjb, distsl.rrup = \ utils.get_equivalent_distances_east(rup.mag, dists.repi, ab06=True) # # Atkinson and Boore 2006 - Rrup gmpe = AtkinsonBoore2006Modified2011() mean3, stds3 = gmpe.get_mean_and_stddevs(sites, rup, distsl, imt, stddev_types) # Computing adjusted mean and stds mean_adj = mean1*0.2 + mean2*0.2 + mean3*0.2 + mean4*0.2 + mean5*0.2 # Note that in this case we do not apply a triangular smoothing on # distance as explained at page 996 of Atkinson and Adams (2013) # for the calculation of the standard deviation stds_adj = np.log(np.exp(stds1)*0.2 + np.exp(stds2)*0.2 + np.exp(stds3)*0.2 + np.exp(stds4)*0.2 + np.exp(stds5)*0.2) # return mean_adj, stds_adj
def _get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
Returns only the mean values. See documentation for method `GroundShakingIntensityModel` in :class:~`openquake.hazardlib.gsim.base.GSIM`
2.643734
2.648829
0.998077
# This is just used for testing purposes if len(stddev_types) == 0: stddev_types = [StdDev.TOTAL] mean, stds = self._get_mean_and_stddevs(sites, rup, dists, imt, stddev_types) stddevs = [np.ones(len(dists.repi))*get_sigma(imt)] delta = self._get_delta(stds, dists) mean = mean + stds + delta mean = np.squeeze(mean) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See documentation for method `GroundShakingIntensityModel` in :class:~`openquake.hazardlib.gsim.base.GSIM`
4.185468
4.152385
1.007967
fmt = ekey[-1] oq = dstore['oqparam'] num_ses = oq.ses_per_logic_tree_path mesh = get_mesh(dstore['sitecol']) ruptures_by_grp = {} for rgetter in gen_rupture_getters(dstore): ebrs = [ebr.export(mesh, rgetter.rlzs_by_gsim, num_ses) for ebr in rgetter.get_ruptures()] if ebrs: ruptures_by_grp[rgetter.grp_id] = ebrs dest = dstore.export_path('ses.' + fmt) writer = hazard_writers.SESXMLWriter(dest) writer.serialize(ruptures_by_grp, oq.investigation_time) return [dest]
def export_ruptures_xml(ekey, dstore)
:param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object
7.417582
7.761577
0.95568