code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
for obj in args: if obj is not None and hasattr(obj, 'cleanup'): try: obj.cleanup() except NotImplementedError: pass except Exception: logger.exception("Unable to cleanup %s object", obj)
def cleanup(logger, *args)
Environment's cleanup routine.
3.292773
3.094443
1.064092
self.logger.debug("Allocating environment.") self._allocate() self.logger.debug("Environment successfully allocated.")
def allocate(self)
Builds the context and the Hooks.
6.433357
5.417366
1.187543
self.logger.debug("Deallocating environment.") self._deallocate() self.logger.debug("Environment successfully deallocated.")
def deallocate(self)
Cleans up the context and the Hooks.
4.968068
4.524926
1.097934
domain = etree.fromstring(xml) subelement(domain, './/name', 'name', identifier) subelement(domain, './/uuid', 'uuid', identifier) devices = subelement(domain, './/devices', 'devices', None) for mount in mounts: filesystem = etree.SubElement(devices, 'filesystem', type='mount') etree.SubElement(filesystem, 'source', dir=mount[0]) etree.SubElement(filesystem, 'target', dir=mount[1]) if network_name is not None: network = subelement(devices, './/interface[@type="network"]', 'interface', None, type='network') subelement(network, './/source', 'source', None, network=network_name) return etree.tostring(domain).decode('utf-8')
def domain_xml(identifier, xml, mounts, network_name=None)
Fills the XML file with the required fields. @param identifier: (str) UUID of the Environment. @param xml: (str) XML configuration of the domain. @param filesystem: (tuple) ((source, target), (source, target)) * name * uuid * devices * network * filesystem
2.437577
2.699233
0.903063
mounts = [] with open(configuration['configuration']) as config_file: domain_config = config_file.read() if 'filesystem' in configuration: if isinstance(configuration['filesystem'], (list, tuple)): for mount in configuration['filesystem']: mounts.append(mountpoint(mount, identifier)) else: mounts.append(mountpoint(configuration['filesystem'], identifier)) xml_config = domain_xml(identifier, domain_config, tuple(mounts), network_name=network_name) return hypervisor.defineXML(xml_config)
def domain_create(hypervisor, identifier, configuration, network_name=None)
libvirt Domain definition. @raise: ConfigError, IOError, libvirt.libvirtError.
2.995647
2.996699
0.999649
if domain is not None: try: if domain.isActive(): domain.destroy() except libvirt.libvirtError: logger.exception("Unable to destroy the domain.") try: domain.undefine() except libvirt.libvirtError: logger.exception("Unable to undefine the domain.") try: if filesystem is not None and os.path.exists(filesystem): shutil.rmtree(filesystem) except Exception: logger.exception("Unable to remove the shared folder.")
def domain_delete(domain, logger, filesystem)
libvirt domain undefinition. @raise: libvirt.libvirtError.
2.410115
2.204248
1.093395
network_name = None self._hypervisor = libvirt.open( self.configuration.get('hypervisor', 'lxc:///')) if 'network' in self.configuration: self._network = network.create(self._hypervisor, self.identifier, self.configuration['network']) network_name = self._network.name() self._domain = domain_create(self._hypervisor, self.identifier, self.configuration['domain'], network_name=network_name) if self._network is None: self._network = network.lookup(self._domain)
def allocate(self)
Initializes libvirt resources.
3.878826
3.407068
1.138465
if self._domain is not None: self._domain_delete() if self._network is not None and 'network' in self.configuration: self._network_delete() if self._hypervisor is not None: self._hypervisor_delete()
def deallocate(self)
Releases all resources.
4.152195
3.878409
1.070593
manager = HookManager(identifier, configuration) manager.load_hooks(context) return manager
def hooks_factory(identifier, configuration, context)
Returns the initialized hooks.
5.521143
5.089566
1.084796
for hook in self.configuration.get('hooks', ()): config = hook.get('configuration', {}) config.update(self.configuration.get('configuration', {})) try: self._load_hook(hook['name'], config, context) except KeyError: self.logger.exception('Provided hook has no name: %s.', hook)
def load_hooks(self, context)
Initializes the Hooks and loads them within the Environment.
3.959594
3.817969
1.037094
subelm = element.find(xpath) if subelm is None: subelm = etree.SubElement(element, tag) else: subelm.tag = tag subelm.text = text for attr, value in kwargs.items(): subelm.set(attr, value) return subelm
def subelement(element, xpath, tag, text, **kwargs)
Searches element matching the *xpath* in *parent* and replaces it's *tag*, *text* and *kwargs* attributes. If the element in *xpath* is not found a new child element is created with *kwargs* attributes and added. Returns the found/created element.
2.003437
2.617007
0.765545
group = group if group else cmod.PairingGroup(PAIRING_GROUP) h_challenge = sha256() serialedArgs = [group.serialize(arg) if isGroupElement(arg) else cmod.Conversion.IP2OS(arg) for arg in args] for arg in sorted(serialedArgs): h_challenge.update(arg) return bytes_to_int(h_challenge.digest())
def get_hash_as_int(*args, group: cmod.PairingGroup = None)
Enumerate over the input tuple and generate a hash using the tuple values :param args: sequence of either group or integer elements :param group: pairing group if an element is a group element :return:
6.663049
5.95246
1.119377
return ''.join(sample(chars, size))
def randomString(size: int = 20, chars: str = string.ascii_letters + string.digits) -> str
Generate a random string of the specified size. Ensure that the size is less than the length of chars as this function uses random.choice which uses random sampling without replacement. :param size: size of the random string to generate :param chars: the set of characters to use to generate the random string. Uses alphanumerics by default. :return: the random string generated
15.234615
18.286118
0.833125
prime = cmod.randomPrime(LARGE_PRIME) i = 0 while not cmod.isPrime(2 * prime + 1): prime = cmod.randomPrime(LARGE_PRIME) i += 1 return prime
def genPrime()
Generate 2 large primes `p_prime` and `q_prime` and use them to generate another 2 primes `p` and `q` of 1024 bits
3.618021
4.035852
0.89647
encoded = {} for i in range(len(self.credType.names)): self.credType.names[i] attr_types = self.credType.attrTypes[i] for at in attr_types: attrName = at.name if attrName in self._vals: if at.encode: encoded[attrName] = encodeAttr(self._vals[attrName]) else: encoded[attrName] = self._vals[at.name] return encoded
def encoded(self)
This function will encode all the attributes to 256 bit integers :return:
4.037981
3.917125
1.030853
schema = Schema(name, version, attrNames, self.issuerId) return await self.wallet.submitSchema(schema)
async def genSchema(self, name, version, attrNames) -> Schema
Generates and submits Schema. :param name: schema name :param version: schema version :param attrNames: a list of attributes the schema contains :return: submitted Schema
8.006314
8.859216
0.903727
pk, sk = await self._primaryIssuer.genKeys(schemaId, p_prime, q_prime) pkR, skR = await self._nonRevocationIssuer.genRevocationKeys() pk = await self.wallet.submitPublicKeys(schemaId=schemaId, pk=pk, pkR=pkR) pkR = await self.wallet.submitSecretKeys(schemaId=schemaId, sk=sk, skR=skR) return pk, pkR
async def genKeys(self, schemaId: ID, p_prime=None, q_prime=None) -> ( PublicKey, RevocationPublicKey)
Generates and submits keys (both public and secret, primary and non-revocation). :param schemaId: The schema ID (reference to claim definition schema) :param p_prime: optional p_prime parameter :param q_prime: optional q_prime parameter :return: Submitted Public keys (both primary and non-revocation)
3.806715
3.126334
1.217629
accum, tails, accPK, accSK = await self._nonRevocationIssuer.issueAccumulator( schemaId, iA, L) accPK = await self.wallet.submitAccumPublic(schemaId=schemaId, accumPK=accPK, accum=accum, tails=tails) await self.wallet.submitAccumSecret(schemaId=schemaId, accumSK=accSK) return accPK
async def issueAccumulator(self, schemaId: ID, iA, L) -> AccumulatorPublicKey
Issues and submits an accumulator used for non-revocation proof. :param schemaId: The schema ID (reference to claim definition schema) :param iA: accumulator ID :param L: maximum number of claims within accumulator. :return: Submitted accumulator public key
5.371327
5.215873
1.029804
acc, ts = await self._nonRevocationIssuer.revoke(schemaId, i) await self.wallet.submitAccumUpdate(schemaId=schemaId, accum=acc, timestampMs=ts)
async def revoke(self, schemaId: ID, i)
Performs revocation of a Claim. :param schemaId: The schema ID (reference to claim definition schema) :param i: claim's sequence number within accumulator
17.687687
15.526985
1.139158
schemaKey = (await self.wallet.getSchema(schemaId)).getKey() attributes = self._attrRepo.getAttributes(schemaKey, claimRequest.userId) # TODO re-enable when revocation registry is implemented # iA = iA if iA else (await self.wallet.getAccumulator(schemaId)).iA # TODO this has un-obvious side-effects await self._genContxt(schemaId, iA, claimRequest.userId) (c1, claim) = await self._issuePrimaryClaim(schemaId, attributes, claimRequest.U) # TODO re-enable when revocation registry is fully implemented c2 = await self._issueNonRevocationClaim(schemaId, claimRequest.Ur, iA, i) if claimRequest.Ur else None signature = Claims(primaryClaim=c1, nonRevocClaim=c2) return (signature, claim)
async def issueClaim(self, schemaId: ID, claimRequest: ClaimRequest, iA=None, i=None) -> (Claims, Dict[str, ClaimAttributeValues])
Issue a claim for the given user and schema. :param schemaId: The schema ID (reference to claim definition schema) :param claimRequest: A claim request containing prover ID and prover-generated values :param iA: accumulator ID :param i: claim's sequence number within accumulator :return: The claim (both primary and non-revocation)
6.670876
6.039009
1.104631
res = {} for schemaId, claimReq in allClaimRequest.items(): res[schemaId] = await self.issueClaim(schemaId, claimReq) return res
async def issueClaims(self, allClaimRequest: Dict[ID, ClaimRequest]) -> \ Dict[ID, Claims]
Issue claims for the given users and schemas. :param allClaimRequest: a map of schema ID to a claim request containing prover ID and prover-generated values :return: The claims (both primary and non-revocation)
2.996399
2.965393
1.010456
if proofRequest.verifiableAttributes.keys() != proof.requestedProof.revealed_attrs.keys(): raise ValueError('Received attributes ={} do not correspond to requested={}'.format( proof.requestedProof.revealed_attrs.keys(), proofRequest.verifiableAttributes.keys())) if proofRequest.predicates.keys() != proof.requestedProof.predicates.keys(): raise ValueError('Received predicates ={} do not correspond to requested={}'.format( proof.requestedProof.predicates.keys(), proofRequest.predicates.keys())) TauList = [] for (uuid, proofItem) in proof.proofs.items(): if proofItem.proof.nonRevocProof: TauList += await self._nonRevocVerifier.verifyNonRevocation( proofRequest, proofItem.schema_seq_no, proof.aggregatedProof.cHash, proofItem.proof.nonRevocProof) if proofItem.proof.primaryProof: TauList += await self._primaryVerifier.verify(proofItem.schema_seq_no, proof.aggregatedProof.cHash, proofItem.proof.primaryProof) CHver = self._get_hash(proof.aggregatedProof.CList, self._prepare_collection(TauList), cmod.integer(proofRequest.nonce)) return CHver == proof.aggregatedProof.cHash
async def verify(self, proofRequest: ProofRequest, proof: FullProof)
Verifies a proof from the prover. :param proofRequest: description of a proof to be presented (revealed attributes, predicates, timestamps for non-revocation) :param proof: a proof :return: True if verified successfully and false otherwise.
4.017373
3.728319
1.077529
await self._genMasterSecret(schemaId) U = await self._genU(schemaId) Ur = None if not reqNonRevoc else await self._genUr(schemaId) proverId = proverId if proverId else self.proverId return ClaimRequest(userId=proverId, U=U, Ur=Ur)
async def createClaimRequest(self, schemaId: ID, proverId=None, reqNonRevoc=True) -> ClaimRequest
Creates a claim request to the issuer. :param schemaId: The schema ID (reference to claim definition schema) :param proverId: a prover ID request a claim for (if None then the current prover default ID is used) :param reqNonRevoc: whether to request non-revocation claim :return: Claim Request
4.370374
4.301028
1.016123
res = {} for schemaId in schemaIds: res[schemaId] = await self.createClaimRequest(schemaId, proverId, reqNonRevoc) return res
async def createClaimRequests(self, schemaIds: Sequence[ID], proverId=None, reqNonRevoc=True) -> Dict[ID, ClaimRequest]
Creates a claim request to the issuer. :param schemaIds: The schema IDs (references to claim definition schema) :param proverId: a prover ID request a claim for (if None then the current prover default ID is used) :param reqNonRevoc: whether to request non-revocation claim :return: a dictionary of Claim Requests for each Schema.
2.164969
2.55313
0.847966
await self.wallet.submitContextAttr(schemaId, signature.primaryClaim.m2) await self.wallet.submitClaimAttributes(schemaId, claimAttributes) await self._initPrimaryClaim(schemaId, signature.primaryClaim) if signature.nonRevocClaim: await self._initNonRevocationClaim(schemaId, signature.nonRevocClaim)
async def processClaim(self, schemaId: ID, claimAttributes: Dict[str, ClaimAttributeValues], signature: Claims)
Processes and saves a received Claim for the given Schema. :param schemaId: The schema ID (reference to claim definition schema) :param claims: claims to be processed and saved
5.381391
5.631796
0.955537
res = [] for schemaId, (claim_signature, claim_attributes) in allClaims.items(): res.append(await self.processClaim(schemaId, claim_attributes, claim_signature)) return res
async def processClaims(self, allClaims: Dict[ID, Claims])
Processes and saves received Claims. :param claims: claims to be processed and saved for each claim definition.
5.527894
5.497355
1.005555
claims, requestedProof = await self._findClaims(proofRequest) proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof) return proof
async def presentProof(self, proofRequest: ProofRequest) -> FullProof
Presents a proof to the verifier. :param proofRequest: description of a proof to be presented (revealed attributes, predicates, timestamps for non-revocation) :return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
6.369228
7.991984
0.796952
v = struct.unpack( 'q', struct.pack('Q', int(hex_string, 16)))[0] # type: int return v
def unsigned_hex_to_signed_int(hex_string: str) -> int
Converts a 64-bit hex string to a signed int value. This is due to the fact that Apache Thrift only has signed values. Examples: '17133d482ba4f605' => 1662740067609015813 'b6dbb1c2b362bf51' => -5270423489115668655 :param hex_string: the string representation of a zipkin ID :returns: signed int representation
5.980719
5.950161
1.005136
hex_string = hex(struct.unpack('Q', struct.pack('q', signed_int))[0])[2:] if hex_string.endswith('L'): return hex_string[:-1] return hex_string
def signed_int_to_unsigned_hex(signed_int: int) -> str
Converts a signed int value to a 64-bit hex string. Examples: 1662740067609015813 => '17133d482ba4f605' -5270423489115668655 => 'b6dbb1c2b362bf51' :param signed_int: an int to convert :returns: unsigned hex string
3.067513
3.604851
0.85094
app[tracer_key] = tracer m = middleware_maker(skip_routes=skip_routes, tracer_key=tracer_key, request_key=request_key) app.middlewares.append(m) # register cleanup signal to close zipkin transport connections async def close_aiozipkin(app: Application) -> None: await app[tracer_key].close() app.on_cleanup.append(close_aiozipkin) return app
def setup(app: Application, tracer: Tracer, *, skip_routes: Optional[AbstractRoute] = None, tracer_key: str = APP_AIOZIPKIN_KEY, request_key: str = REQUEST_AIOZIPKIN_KEY) -> Application
Sets required parameters in aiohttp applications for aiozipkin. Tracer added into application context and cleaned after application shutdown. You can provide custom tracer_key, if default name is not suitable.
3.136774
3.369557
0.930916
return cast(Tracer, app[tracer_key])
def get_tracer( app: Application, tracer_key: str = APP_AIOZIPKIN_KEY) -> Tracer
Returns tracer object from application context. By default tracer has APP_AIOZIPKIN_KEY in aiohttp application context, you can provide own key, if for some reason default one is not suitable.
11.965389
12.937226
0.924881
return cast(SpanAbc, request[request_key])
def request_span(request: Request, request_key: str = REQUEST_AIOZIPKIN_KEY) -> SpanAbc
Returns span created by middleware from request context, you can use it as parent on next child span.
8.546787
8.985882
0.951135
trace_config = aiohttp.TraceConfig() zipkin = ZipkinClientSignals(tracer) trace_config.on_request_start.append(zipkin.on_request_start) trace_config.on_request_end.append(zipkin.on_request_end) trace_config.on_request_exception.append(zipkin.on_request_exception) return trace_config
def make_trace_config(tracer: Tracer) -> aiohttp.TraceConfig
Creates aiohttp.TraceConfig with enabled aiozipking instrumentation for aiohttp client.
2.206369
1.974051
1.117686
return Endpoint(service_name, ipv4, ipv6, port)
def create_endpoint(service_name: str, *, ipv4: OptStr = None, ipv6: OptStr = None, port: OptInt = None) -> Endpoint
Factory function to create Endpoint object.
4.178916
3.892404
1.073608
ts = ts if ts is not None else time.time() return int(ts * 1000 * 1000)
def make_timestamp(ts: OptTs = None) -> int
Create zipkin timestamp in microseconds, or convert available one from second. Useful when user supplies ts from time.time() call.
3.053584
2.226197
1.371659
headers = { TRACE_ID_HEADER: context.trace_id, SPAN_ID_HEADER: context.span_id, FLAGS_HEADER: '0', SAMPLED_ID_HEADER: '1' if context.sampled else '0', } if context.parent_id is not None: headers[PARENT_ID_HEADER] = context.parent_id return headers
def make_headers(context: TraceContext) -> Headers
Creates dict with zipkin headers from supplied trace context.
2.115369
1.977107
1.069932
# b3={TraceId}-{SpanId}-{SamplingState}-{ParentSpanId} c = context # encode sampled flag if c.debug: sampled = 'd' elif c.sampled: sampled = '1' else: sampled = '0' params = [c.trace_id, c.span_id, sampled] # type: List[str] if c.parent_id is not None: params.append(c.parent_id) h = DELIMITER.join(params) headers = {SINGLE_HEADER: h} return headers
def make_single_header(context: TraceContext) -> Headers
Creates dict with zipkin single header format.
4.188604
3.990451
1.049657
# TODO: add validation for trace_id/span_id/parent_id # normalize header names just in case someone passed regular dict # instead dict with case insensitive keys headers = {k.lower(): v for k, v in headers.items()} required = (TRACE_ID_HEADER.lower(), SPAN_ID_HEADER.lower()) has_b3 = all(h in headers for h in required) has_b3_single = SINGLE_HEADER in headers if not(has_b3_single or has_b3): return None if has_b3: debug = parse_debug_header(headers) sampled = debug if debug else parse_sampled_header(headers) context = TraceContext( trace_id=headers[TRACE_ID_HEADER.lower()], parent_id=headers.get(PARENT_ID_HEADER.lower()), span_id=headers[SPAN_ID_HEADER.lower()], sampled=sampled, debug=debug, shared=False, ) return context return _parse_single_header(headers)
def make_context(headers: Headers) -> Optional[TraceContext]
Converts available headers to TraceContext, if headers mapping does not contain zipkin headers, function returns None.
3.053705
3.138375
0.973021
def limited_filter(k: str, v: Any) -> bool: return k not in keys or v is not None # type: ignore def full_filter(k: str, v: Any) -> bool: return v is not None f = limited_filter if keys is not None else full_filter return {k: v for k, v in data.items() if f(k, v)}
def filter_none(data: Dict[str, Any], keys: OptKeys = None) -> Dict[str, Any]
Filter keys from dict with None values. Check occurs only on root level. If list of keys specified, filter works only for selected keys
2.696413
2.67354
1.008555
d = rhypo d[d <= 15.0] = 15.0 return C['a3'] * np.log10(d)
def _compute_term_3(self, C, rhypo)
Compute term 3 in equation 2 page 462. Distances are clipped at 15 km (as per Ezio Faccioli's personal communication.)
7.046542
4.836496
1.456952
# for rock values the site term is zero site_term = np.zeros_like(vs30) # hard soil site_term[(vs30 >= 360) & (vs30 < 800)] = C['aB'] # medium soil site_term[(vs30 >= 180) & (vs30 < 360)] = C['aC'] # soft soil site_term[vs30 < 180] = C['aD'] return site_term
def _compute_site_term(self, C, vs30)
Compute site term as a function of vs30: 4th, 5th and 6th terms in equation 2 page 462.
2.911491
2.922841
0.996117
if rake > -120.0 and rake <= -60.0: return C['aN'] elif rake > 30.0 and rake <= 150.0: return C['aR'] else: return C['aS']
def _compute_faulting_style_term(self, C, rake)
Compute faulting style term as a function of rake angle value as given in equation 5 page 465.
3.686196
3.693901
0.997914
mean = (self._compute_term_1_2(C, mag) + self._compute_term_3(C, dists.rhypo) + self._compute_site_term(C, vs30) + self._compute_faulting_style_term(C, rake)) # convert from cm/s**2 to g for SA and from m/s**2 to g for PGA (PGV # is already in cm/s) and also convert from base 10 to base e. if imt.name == "PGA": mean = np.log((10 ** mean) / g) elif imt.name == "SA": mean = np.log((10 ** mean) * ((2 * np.pi / imt.period) ** 2) * 1e-2 / g) else: mean = np.log(10 ** mean) return mean
def _compute_mean(self, C, mag, dists, vs30, rake, imt)
Compute mean value for PGV, PGA and Displacement responce spectrum, as given in equation 2, page 462 with the addition of the faulting style term as given in equation 5, page 465. Converts also displacement responce spectrum values to SA.
3.85306
3.586135
1.074433
stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES stddevs.append(np.log(10 ** C['sigma']) + np.zeros(num_sites)) return stddevs
def _get_stddevs(self, C, stddev_types, num_sites)
Return total standard deviation.
3.235297
3.267241
0.990223
cmaker = ContextMaker(rupture.tectonic_region_type, [gsim]) gc = GmfComputer(rupture, sites, [str(imt) for imt in imts], cmaker, truncation_level, correlation_model) res, _sig, _eps = gc.compute(gsim, realizations, seed) return {imt: res[imti] for imti, imt in enumerate(gc.imts)}
def ground_motion_fields(rupture, sites, imts, gsim, truncation_level, realizations, correlation_model=None, seed=None)
Given an earthquake rupture, the ground motion field calculator computes ground shaking over a set of sites, by randomly sampling a ground shaking intensity model. A ground motion field represents a possible 'realization' of the ground shaking due to an earthquake rupture. .. note:: This calculator is using random numbers. In order to reproduce the same results numpy random numbers generator needs to be seeded, see http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html :param openquake.hazardlib.source.rupture.Rupture rupture: Rupture to calculate ground motion fields radiated from. :param openquake.hazardlib.site.SiteCollection sites: Sites of interest to calculate GMFs. :param imts: List of intensity measure type objects (see :mod:`openquake.hazardlib.imt`). :param gsim: Ground-shaking intensity model, instance of subclass of either :class:`~openquake.hazardlib.gsim.base.GMPE` or :class:`~openquake.hazardlib.gsim.base.IPE`. :param truncation_level: Float, number of standard deviations for truncation of the intensity distribution, or ``None``. :param realizations: Integer number of GMF realizations to compute. :param correlation_model: Instance of correlation model object. See :mod:`openquake.hazardlib.correlation`. Can be ``None``, in which case non-correlated ground motion fields are calculated. Correlation model is not used if ``truncation_level`` is zero. :param int seed: The seed used in the numpy random number generator :returns: Dictionary mapping intensity measure type objects (same as in parameter ``imts``) to 2d numpy arrays of floats, representing different realizations of ground shaking intensity for all sites in the collection. First dimension represents sites and second one is for realizations.
5.306162
5.995108
0.885082
try: # read the seed from self.rupture.serial seed = seed or self.rupture.serial except AttributeError: pass if seed is not None: numpy.random.seed(seed) result = numpy.zeros((len(self.imts), len(self.sids), num_events), F32) sig = numpy.zeros((len(self.imts), num_events), F32) eps = numpy.zeros((len(self.imts), num_events), F32) for imti, imt in enumerate(self.imts): if isinstance(gsim, MultiGMPE): gs = gsim[str(imt)] # MultiGMPE else: gs = gsim # regular GMPE try: result[imti], sig[imti], eps[imti] = self._compute( None, gs, num_events, imt) except Exception as exc: raise exc.__class__( '%s for %s, %s, srcidx=%s' % (exc, gs, imt, self.srcidx) ).with_traceback(exc.__traceback__) return result, sig, eps
def compute(self, gsim, num_events, seed=None)
:param gsim: a GSIM instance :param num_events: the number of seismic events :param seed: a random seed or None :returns: a 32 bit array of shape (num_imts, num_sites, num_events) and two arrays with shape (num_imts, num_events): sig for stddev_inter and eps for the random part
3.34285
2.946965
1.134337
rctx = getattr(self.rupture, 'rupture', self.rupture) if seed is not None: numpy.random.seed(seed) dctx = self.dctx.roundup(gsim.minimum_distance) if self.truncation_level == 0: assert self.correlation_model is None mean, _stddevs = gsim.get_mean_and_stddevs( self.sctx, rctx, dctx, imt, stddev_types=[]) mean = gsim.to_imt_unit_values(mean) mean.shape += (1, ) mean = mean.repeat(num_events, axis=1) return (mean, numpy.zeros(num_events, F32), numpy.zeros(num_events, F32)) elif self.truncation_level is None: distribution = scipy.stats.norm() else: assert self.truncation_level > 0 distribution = scipy.stats.truncnorm( - self.truncation_level, self.truncation_level) num_sids = len(self.sids) if gsim.DEFINED_FOR_STANDARD_DEVIATION_TYPES == {StdDev.TOTAL}: # If the GSIM provides only total standard deviation, we need # to compute mean and total standard deviation at the sites # of interest. # In this case, we also assume no correlation model is used. if self.correlation_model: raise CorrelationButNoInterIntraStdDevs( self.correlation_model, gsim) mean, [stddev_total] = gsim.get_mean_and_stddevs( self.sctx, rctx, dctx, imt, [StdDev.TOTAL]) stddev_total = stddev_total.reshape(stddev_total.shape + (1, )) mean = mean.reshape(mean.shape + (1, )) total_residual = stddev_total * rvs( distribution, num_sids, num_events) gmf = gsim.to_imt_unit_values(mean + total_residual) stddev_inter = numpy.empty(num_events, F32) stddev_inter.fill(numpy.nan) epsilons = numpy.empty(num_events, F32) epsilons.fill(numpy.nan) else: mean, [stddev_inter, stddev_intra] = gsim.get_mean_and_stddevs( self.sctx, rctx, dctx, imt, [StdDev.INTER_EVENT, StdDev.INTRA_EVENT]) stddev_intra = stddev_intra.reshape(stddev_intra.shape + (1, )) stddev_inter = stddev_inter.reshape(stddev_inter.shape + (1, )) mean = mean.reshape(mean.shape + (1, )) intra_residual = stddev_intra * rvs( distribution, num_sids, num_events) if self.correlation_model is not None: ir = self.correlation_model.apply_correlation( self.sites, imt, intra_residual, stddev_intra) # this fixes a mysterious bug: ir[row] is actually # a matrix of shape (E, 1) and not a vector of size E intra_residual = numpy.zeros(ir.shape) for i, val in numpy.ndenumerate(ir): intra_residual[i] = val epsilons = rvs(distribution, num_events) inter_residual = stddev_inter * epsilons gmf = gsim.to_imt_unit_values( mean + intra_residual + inter_residual) return gmf, stddev_inter.max(axis=0), epsilons
def _compute(self, seed, gsim, num_events, imt)
:param seed: a random seed or None if the seed is already set :param gsim: a GSIM instance :param num_events: the number of seismic events :param imt: an IMT instance :returns: (gmf(num_sites, num_events), stddev_inter(num_events), epsilons(num_events))
2.822967
2.692907
1.048297
if os.path.exists(oqdata): sys.exit('%s exists already' % oqdata) if '://' in archive: # get the zip archive from an URL resp = requests.get(archive) _, archive = archive.rsplit('/', 1) with open(archive, 'wb') as f: f.write(resp.content) if not os.path.exists(archive): sys.exit('%s does not exist' % archive) t0 = time.time() oqdata = os.path.abspath(oqdata) assert archive.endswith('.zip'), archive os.mkdir(oqdata) zipfile.ZipFile(archive).extractall(oqdata) dbpath = os.path.join(oqdata, 'db.sqlite3') db = Db(sqlite3.connect, dbpath, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES) n = 0 for fname in os.listdir(oqdata): mo = re.match('calc_(\d+)\.hdf5', fname) if mo: job_id = int(mo.group(1)) fullname = os.path.join(oqdata, fname)[:-5] # strip .hdf5 db("UPDATE job SET user_name=?x, ds_calc_dir=?x WHERE id=?x", getpass.getuser(), fullname, job_id) safeprint('Restoring ' + fname) n += 1 dt = time.time() - t0 safeprint('Extracted %d calculations into %s in %d seconds' % (n, oqdata, dt))
def restore(archive, oqdata)
Build a new oqdata directory from the data contained in the zip archive
3.225163
3.14822
1.02444
return (C["c2"] * rhypo) + (C["c3"] * np.log10(rhypo))
def _compute_distance_term(self, C, rhypo)
Returns the distance scaling term
5.40274
5.098066
1.059763
epsilon = rhypo - (4.853 + 1.347E-6 * (mag ** 8.163)) rjb = np.zeros_like(rhypo) idx = epsilon >= 3. rjb[idx] = np.sqrt((epsilon[idx] ** 2.) - 9.0) rjb[rjb < 0.0] = 0.0 return rjb
def rhypo_to_rjb(rhypo, mag)
Converts hypocentral distance to an equivalent Joyner-Boore distance dependent on the magnitude
4.577103
4.881083
0.937723
# Convert rhypo to rrup rrup = rhypo_to_rrup(dists.rhypo, rup.mag) mean = (self._get_magnitude_scaling_term(C, rup.mag) + self._get_distance_scaling_term(C, rup.mag, rrup) + self._get_style_of_faulting_term(C, rup.rake) + self._get_site_amplification_term(C, sites.vs30)) # convert from cm/s**2 to g for SA and from cm/s**2 to g for PGA (PGV # is already in cm/s) and also convert from base 10 to base e. if isinstance(imt, PGA): mean = np.log((10 ** mean) * ((2 * np.pi / 0.01) ** 2) * 1e-2 / g) elif isinstance(imt, SA): mean = np.log((10 ** mean) * ((2 * np.pi / imt.period) ** 2) * 1e-2 / g) else: mean = np.log(10 ** mean) return mean + self.adjustment_factor
def _compute_mean(self, C, rup, dists, sites, imt)
Returns the mean ground motion acceleration and velocity
3.45773
3.499865
0.987961
# List must be in following order p_n = [] # Rjb # Note that Rjb must be clipped at 0.1 km rjb = rhypo_to_rjb(dists.rhypo, rup.mag) rjb[rjb < 0.1] = 0.1 p_n.append(self._get_normalised_term(np.log10(rjb), self.CONSTANTS["logMaxR"], self.CONSTANTS["logMinR"])) # Magnitude p_n.append(self._get_normalised_term(rup.mag, self.CONSTANTS["maxMw"], self.CONSTANTS["minMw"])) # Vs30 p_n.append(self._get_normalised_term(np.log10(sites.vs30), self.CONSTANTS["logMaxVs30"], self.CONSTANTS["logMinVs30"])) # Depth p_n.append(self._get_normalised_term(rup.hypo_depth, self.CONSTANTS["maxD"], self.CONSTANTS["minD"])) # Style of Faulting p_n.append(self._get_normalised_term(sof, self.CONSTANTS["maxFM"], self.CONSTANTS["minFM"])) return p_n
def get_pn(self, rup, sites, dists, sof)
Normalise the input parameters within their upper and lower defined range
2.643449
2.587291
1.021705
dtlist = [(imt, numpy.float32) for imt in sorted_imts] imt_dt = numpy.dtype(dtlist) return numpy.dtype([(str(gsim), imt_dt) for gsim in sorted_gsims])
def gsim_imt_dt(sorted_gsims, sorted_imts)
Build a numpy dtype as a nested record with keys 'idx' and nested (gsim, imt). :param sorted_gsims: a list of GSIM instances, sorted lexicographically :param sorted_imts: a list of intensity measure type strings
2.992019
3.973702
0.752955
# notation from http://en.wikipedia.org/wiki/Truncated_normal_distribution. # given that mu = 0 and sigma = 1, we have alpha = a and beta = b. # "CDF" in comments refers to cumulative distribution function # of non-truncated distribution with that mu and sigma values. # assume symmetric truncation, that is ``a = - truncation_level`` # and ``b = + truncation_level``. # calculate CDF of b phi_b = ndtr(truncation_level) # calculate Z as ``Z = CDF(b) - CDF(a)``, here we assume that # ``CDF(a) == CDF(- truncation_level) == 1 - CDF(b)`` z = phi_b * 2 - 1 # calculate the result of survival function of ``values``, # and restrict it to the interval where probability is defined -- # 0..1. here we use some transformations of the original formula # that is ``SF(x) = 1 - (CDF(x) - CDF(a)) / Z`` in order to minimize # number of arithmetic operations and function calls: # ``SF(x) = (Z - CDF(x) + CDF(a)) / Z``, # ``SF(x) = (CDF(b) - CDF(a) - CDF(x) + CDF(a)) / Z``, # ``SF(x) = (CDF(b) - CDF(x)) / Z``. return ((phi_b - ndtr(values)) / z).clip(0.0, 1.0)
def _truncnorm_sf(truncation_level, values)
Survival function for truncated normal distribution. Assumes zero mean, standard deviation equal to one and symmetric truncation. :param truncation_level: Positive float number representing the truncation on both sides around the mean, in units of sigma. :param values: Numpy array of values as input to a survival function for the given distribution. :returns: Numpy array of survival function results in a range between 0 and 1. >>> from scipy.stats import truncnorm >>> truncnorm(-3, 3).sf(0.12345) == _truncnorm_sf(3, 0.12345) True
5.294886
5.302577
0.998549
with warnings.catch_warnings(): warnings.simplefilter("ignore") # avoid RuntimeWarning: divide by zero encountered in log return numpy.log(values)
def to_distribution_values(self, values)
Returns numpy array of natural logarithms of ``values``.
4.40253
3.576656
1.230907
for key in (ADMITTED_STR_PARAMETERS + ADMITTED_FLOAT_PARAMETERS + ADMITTED_SET_PARAMETERS): try: val = getattr(self.gmpe, key) except AttributeError: pass else: setattr(self, key, val)
def set_parameters(self)
Combines the parameters of the GMPE provided at the construction level with the ones originally assigned to the backbone modified GMPE.
5.15016
4.331088
1.189115
table = table.strip().splitlines() header = table.pop(0).split() if not header[0].upper() == "IMT": raise ValueError('first column in a table must be IMT') coeff_names = header[1:] for row in table: row = row.split() imt_name = row[0].upper() if imt_name == 'SA': raise ValueError('specify period as float value ' 'to declare SA IMT') imt_coeffs = dict(zip(coeff_names, map(float, row[1:]))) try: sa_period = float(imt_name) except Exception: if imt_name not in imt_module.registry: raise ValueError('unknown IMT %r' % imt_name) imt = imt_module.registry[imt_name]() self.non_sa_coeffs[imt] = imt_coeffs else: if sa_damping is None: raise TypeError('attribute "sa_damping" is required ' 'for tables defining SA') imt = imt_module.SA(sa_period, sa_damping) self.sa_coeffs[imt] = imt_coeffs
def _setup_table_from_str(self, table, sa_damping)
Builds the input tables from a string definition
3.103283
3.08874
1.004708
return (C["r1"] + C["r2"] * mag) *\ np.log(np.sqrt(rrup ** 2. + C["h1"] ** 2.))
def get_distance_term(self, C, rrup, mag)
Returns distance scaling term
4.900184
4.884715
1.003167
stddevs = [] zeros_array = np.zeros(nsites) for stddev in stddev_types: assert stddev in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev == const.StdDev.TOTAL: stddevs.append(np.sqrt(C["tau"] ** 2. + C["phi"] ** 2.) + zeros_array) elif stddev == const.StdDev.INTER_EVENT: stddevs.append(C["tau"] + zeros_array) elif stddev == const.StdDev.INTRA_EVENT: stddevs.append(C["phi"] + zeros_array) return stddevs
def get_stddevs(self, C, nsites, stddev_types)
Returns the standard deviations
2.208638
2.244793
0.983894
if imt.period < 0.2: return np.log(10**0.23) elif imt.period > 1.0: return np.log(10**0.27) else: return np.log(10**(0.23 + (imt.period - 0.2)/0.8 * 0.04))
def get_sigma(imt)
Return the value of the total sigma :param float imt: An :class:`openquake.hazardlib.imt.IMT` instance :returns: A float representing the total sigma value
2.899735
3.179808
0.911921
for field, type_info in fields_spec.items(): has_default = not isinstance(type_info, type) if field not in config and not has_default: raise RuntimeError( "Configuration not complete. %s missing" % field)
def check_config(self, config, fields_spec)
Check that `config` has each field in `fields_spec` if a default has not been provided.
4.711301
4.303918
1.094654
defaults = dict([(f, d) for f, d in fields_spec.items() if not isinstance(d, type)]) for field, default_value in defaults.items(): if field not in config: config[field] = default_value
def set_defaults(self, config, fields_spec)
Set default values got from `fields_spec` into the `config` dictionary
3.001233
3.015491
0.995272
def class_decorator(class_obj): original_method = getattr(class_obj, method_name) if sys.version[0] == '2': # Python 2 original_method = original_method.im_func def caller(fn, obj, catalogue, config=None, *args, **kwargs): config = config or {} self.set_defaults(config, fields) self.check_config(config, fields) return fn(obj, catalogue, config, *args, **kwargs) new_method = decorator(caller, original_method) setattr(class_obj, method_name, new_method) instance = class_obj() func = functools.partial(new_method, instance) func.fields = fields func.model = instance func.completeness = completeness functools.update_wrapper(func, new_method) self[class_obj.__name__] = func return class_obj return class_decorator
def add(self, method_name, completeness=False, **fields)
Class decorator. Decorate `method_name` by adding a call to `set_defaults` and `check_config`. Then, save into the registry a callable function with the same signature of the original method. :param str method_name: the method to decorate :param bool completeness: True if the method accepts in input an optional parameter for the completeness table :param fields: a dictionary of field spec corresponding to the keys expected to be present in the config dictionary for the decorated method, e.g. time_bin=numpy.float, b_value=1E-6
3.322329
3.111795
1.067657
def dec(fn): if completeness: def fn_with_config_and_c( catalogue, config, completeness_table=None): return fn(catalogue, completeness_table, **config) fn_with_config = fn_with_config_and_c else: def fn_with_config_without_c(catalogue, config): return fn(catalogue, **config) fn_with_config = fn_with_config_without_c fn_with_config.fields = fields fn_with_config.completeness = completeness fn.fields = fields self[fn.__name__] = fn_with_config return fn return dec
def add_function(self, completeness=False, **fields)
Function decorator. Decorate a function by adding a call to `set_defaults` and `check_config`. Then, save into the registry a callable function with the same signature of the original method :param fields: a dictionary of field spec, e.g. time_bin=numpy.float, b_value=1E-6
2.636609
2.695635
0.978103
C = self.COEFFS[imt] mean = (np.log(self.get_magnitude_term(C, rup) + self.get_distance_term(C, dists.rrup)) + self.get_site_amplification(C, sites)) stddevs = self.get_stddevs(C, sites.vs30.shape, rup.mag, stddev_types) return mean, stddevs
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types)
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
3.139919
3.177233
0.988256
b0, stress_drop = self._get_sof_terms(C, rup.rake) if rup.mag <= C["m1"]: return b0 else: # Calculate moment (equation 5) m_0 = 10.0 ** (1.5 * rup.mag + 16.05) # Get stress-drop scaling (equation 6) if rup.mag > C["m2"]: stress_drop += (C["b2"] * (C["m2"] - self.CONSTANTS["mstar"]) + (C["b3"] * (rup.mag - C["m2"]))) else: stress_drop += (C["b2"] * (rup.mag - self.CONSTANTS["mstar"])) stress_drop = np.exp(stress_drop) # Get corner frequency (equation 4) f0 = 4.9 * 1.0E6 * 3.2 * ((stress_drop / m_0) ** (1. / 3.)) return 1. / f0
def get_magnitude_term(self, C, rup)
Returns the magnitude scaling term in equation 3
4.569621
4.376541
1.044117
f_p = C["c1"] * rrup idx = np.logical_and(rrup > self.CONSTANTS["r1"], rrup <= self.CONSTANTS["r2"]) f_p[idx] = (C["c1"] * self.CONSTANTS["r1"]) +\ C["c2"] * (rrup[idx] - self.CONSTANTS["r1"]) idx = rrup > self.CONSTANTS["r2"] f_p[idx] = C["c1"] * self.CONSTANTS["r1"] +\ C["c2"] * (self.CONSTANTS["r2"] - self.CONSTANTS["r1"]) +\ C["c3"] * (rrup[idx] - self.CONSTANTS["r2"]) return f_p
def get_distance_term(self, C, rrup)
Returns the distance scaling term in equation 7
2.016462
1.985432
1.015629
if rake >= 45.0 and rake <= 135.0: # Reverse faulting return C["b0R"], C["b1R"] elif rake <= -45. and rake >= -135.0: # Normal faulting return C["b0N"], C["b1N"] else: # Strike slip return C["b0SS"], C["b1SS"]
def _get_sof_terms(self, C, rake)
Returns the style-of-faulting scaling parameters
3.212344
2.922731
1.09909
# Gets delta normalised z1 dz1 = sites.z1pt0 - np.exp(self._get_lnmu_z1(sites.vs30)) f_s = C["c5"] * dz1 # Calculates site amplification term f_s[dz1 > self.CONSTANTS["dz1ref"]] = (C["c5"] * self.CONSTANTS["dz1ref"]) idx = sites.vs30 > self.CONSTANTS["v1"] f_s[idx] += (C["c4"] * np.log(self.CONSTANTS["v1"] / C["vref"])) idx = np.logical_not(idx) f_s[idx] += (C["c4"] * np.log(sites.vs30[idx] / C["vref"])) return f_s
def get_site_amplification(self, C, sites)
Returns the site amplification term
4.469441
4.34065
1.029671
tau = self._get_tau(C, mag) + np.zeros(nsites) phi = self._get_phi(C, mag) + np.zeros(nsites) stddevs = [] for stddev in stddev_types: assert stddev in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev == const.StdDev.TOTAL: stddevs.append(np.sqrt(tau ** 2. + phi ** 2.)) elif stddev == const.StdDev.INTER_EVENT: stddevs.append(tau) elif stddev == const.StdDev.INTRA_EVENT: stddevs.append(phi) return stddevs
def get_stddevs(self, C, nsites, mag, stddev_types)
Returns the standard deviations
1.958251
1.97443
0.991806
if mag < 6.5: return C["tau1"] elif mag < 7.: return C["tau1"] + (C["tau2"] - C["tau1"]) * ((mag - 6.5) / 0.5) else: return C["tau2"]
def _get_tau(self, C, mag)
Returns magnitude dependent inter-event standard deviation (tau) (equation 14)
2.42675
2.277428
1.065566
if mag < 5.5: return C["phi1"] elif mag < 5.75: return C["phi1"] + (C["phi2"] - C["phi1"]) * ((mag - 5.5) / 0.25) else: return C["phi2"]
def _get_phi(self, C, mag)
Returns the magnitude dependent intra-event standard deviation (phi) (equation 15)
2.250347
2.155106
1.044193
delta = 0.00750 * 10 ** (0.507 * mag) # computing R for different values of mag if mag < 6.5: R = np.sqrt(dists.rhypo ** 2 + delta ** 2) else: R = np.sqrt(dists.rrup ** 2 + delta ** 2) mean = ( # 1st term C['c1'] + C['c2'] * mag + # 2nd term C['c3'] * R - # 3rd term C['c4'] * np.log10(R) + # 4th term C['c5'] * hypo_depth ) # convert from base 10 to base e if imt == PGV(): mean = np.log(10 ** mean) else: # convert from cm/s**2 to g mean = np.log((10 ** mean) * 1e-2 / g) return mean
def _compute_mean(self, C, g, mag, hypo_depth, dists, imt)
Compute mean according to equation on Table 2, page 2275.
3.378587
3.270968
1.032901
with tempfile.TemporaryFile(mode='w+') as stream: ps = pstats.Stats(pstatfile, stream=stream) ps.sort_stats('cumtime') ps.print_stats(n) stream.seek(0) lines = list(stream) for i, line in enumerate(lines): if line.startswith(' ncalls'): break data = [] for line in lines[i + 2:]: columns = line.split() if len(columns) == 6: data.append(PStatData(*columns)) rows = [(rec.ncalls, rec.cumtime, rec.path) for rec in data] # here is an example of the expected output table: # ====== ======= ======================================================== # ncalls cumtime path # ====== ======= ======================================================== # 1 33.502 commands/run.py:77(_run) # 1 33.483 calculators/base.py:110(run) # 1 25.166 calculators/classical.py:115(execute) # 1 25.104 baselib.parallel.py:249(apply_reduce) # 1 25.099 calculators/classical.py:41(classical) # 1 25.099 hazardlib/calc/hazard_curve.py:164(classical) return views.rst_table(rows, header='ncalls cumtime path'.split())
def get_pstats(pstatfile, n)
Return profiling information as an RST table. :param pstatfile: path to a .pstat file :param n: the maximum number of stats to retrieve
4.277205
4.296143
0.995592
hcalc = base.calculators(readinput.get_oqparam(job_haz), calc_id) hcalc.run(concurrent_tasks=concurrent_tasks, pdb=pdb, exports=exports, **params) hc_id = hcalc.datastore.calc_id rcalc_id = logs.init(level=getattr(logging, loglevel.upper())) oq = readinput.get_oqparam(job_risk, hc_id=hc_id) rcalc = base.calculators(oq, rcalc_id) rcalc.run(pdb=pdb, exports=exports, **params) return rcalc
def run2(job_haz, job_risk, calc_id, concurrent_tasks, pdb, loglevel, exports, params)
Run both hazard and risk, one after the other
4.420427
4.192934
1.054256
dbserver.ensure_on() if param: params = oqvalidation.OqParam.check( dict(p.split('=', 1) for p in param.split(','))) else: params = {} if slowest: prof = cProfile.Profile() stmt = ('_run(job_ini, concurrent_tasks, pdb, loglevel, hc, ' 'exports, params)') prof.runctx(stmt, globals(), locals()) pstat = calc_path + '.pstat' prof.dump_stats(pstat) print('Saved profiling info in %s' % pstat) print(get_pstats(pstat, slowest)) else: _run(job_ini, concurrent_tasks, pdb, loglevel, hc, exports, params)
def run(job_ini, slowest=False, hc=None, param='', concurrent_tasks=None, exports='', loglevel='info', pdb=None)
Run a calculation bypassing the database layer
4.468775
4.507727
0.991359
''' Return an ordered dictionary with the available classes in the scalerel submodule with classes that derives from `base_class`, keyed by class name. ''' gsims = {} for fname in os.listdir(os.path.dirname(__file__)): if fname.endswith('.py'): modname, _ext = os.path.splitext(fname) mod = importlib.import_module( 'openquake.hazardlib.scalerel.' + modname) for cls in mod.__dict__.values(): if inspect.isclass(cls) and issubclass(cls, base_class) \ and cls != base_class \ and not inspect.isabstract(cls): gsims[cls.__name__] = cls return dict((k, gsims[k]) for k in sorted(gsims))
def _get_available_class(base_class)
Return an ordered dictionary with the available classes in the scalerel submodule with classes that derives from `base_class`, keyed by class name.
3.17078
1.977244
1.603636
if host is None: host_cores = self.host_cores else: host_cores = [hc for hc in self.host_cores if hc[0] == host] lst = [] for host, _ in host_cores: ready = general.socket_ready((host, self.ctrl_port)) lst.append((host, 'running' if ready else 'not-running')) return lst
def status(self, host=None)
:returns: a list of pairs (hostname, 'running'|'not-running')
4.331424
3.486223
1.24244
if streamer and not general.socket_ready(self.task_in_url): # started self.streamer = multiprocessing.Process( target=_streamer, args=(self.master_host, self.task_in_port, self.task_out_port)) self.streamer.start() starting = [] for host, cores in self.host_cores: if self.status(host)[0][1] == 'running': print('%s:%s already running' % (host, self.ctrl_port)) continue ctrl_url = 'tcp://%s:%s' % (host, self.ctrl_port) if host == '127.0.0.1': # localhost args = [sys.executable] else: args = ['ssh', host, self.remote_python] args += ['-m', 'openquake.baselib.workerpool', ctrl_url, self.task_out_url, cores] starting.append(' '.join(args)) po = subprocess.Popen(args) self.pids.append(po.pid) return 'starting %s' % starting
def start(self, streamer=False)
Start multiple workerpools, possibly on remote servers via ssh, and possibly a streamer, depending on the `streamercls`. :param streamer: if True, starts a streamer with multiprocessing.Process
4.401027
4.478606
0.982678
stopped = [] for host, _ in self.host_cores: if self.status(host)[0][1] == 'not-running': print('%s not running' % host) continue ctrl_url = 'tcp://%s:%s' % (host, self.ctrl_port) with z.Socket(ctrl_url, z.zmq.REQ, 'connect') as sock: sock.send('stop') stopped.append(host) if hasattr(self, 'streamer'): self.streamer.terminate() return 'stopped %s' % stopped
def stop(self)
Send a "stop" command to all worker pools
5.24889
4.934224
1.063772
setproctitle('oq-zworker') with sock: for cmd, args, mon in sock: parallel.safely_call(cmd, args, mon)
def worker(self, sock)
:param sock: a zeromq.Socket of kind PULL receiving (cmd, args)
22.181215
17.620947
1.258798
setproctitle('oq-zworkerpool %s' % self.ctrl_url[6:]) # strip tcp:// # start workers self.workers = [] for _ in range(self.num_workers): sock = z.Socket(self.task_out_port, z.zmq.PULL, 'connect') proc = multiprocessing.Process(target=self.worker, args=(sock,)) proc.start() sock.pid = proc.pid self.workers.append(sock) # start control loop accepting the commands stop and kill with z.Socket(self.ctrl_url, z.zmq.REP, 'bind') as ctrlsock: for cmd in ctrlsock: if cmd in ('stop', 'kill'): msg = getattr(self, cmd)() ctrlsock.send(msg) break elif cmd == 'getpid': ctrlsock.send(self.pid) elif cmd == 'get_num_workers': ctrlsock.send(self.num_workers)
def start(self)
Start worker processes and a control loop
4.592849
4.239015
1.083471
for sock in self.workers: os.kill(sock.pid, signal.SIGTERM) return 'WorkerPool %s stopped' % self.ctrl_url
def stop(self)
Send a SIGTERM to all worker processes
9.125393
7.830828
1.165316
for sock in self.workers: os.kill(sock.pid, signal.SIGKILL) return 'WorkerPool %s killed' % self.ctrl_url
def kill(self)
Send a SIGKILL to all worker processes
8.73343
7.444025
1.173213
address = address or (config.dbserver.host, DBSERVER_PORT) return 'running' if socket_ready(address) else 'not-running'
def get_status(address=None)
Check if the DbServer is up. :param address: pair (hostname, port) :returns: 'running' or 'not-running'
10.392028
7.95271
1.306728
if not config.dbserver.multi_user: remote_server_path = logs.dbcmd('get_path') if different_paths(server_path, remote_server_path): return('You are trying to contact a DbServer from another' ' instance (got %s, expected %s)\n' 'Check the configuration or stop the foreign' ' DbServer instance') % (remote_server_path, server_path)
def check_foreign()
Check if we the DbServer is the right one
10.174126
8.537409
1.191711
if get_status() == 'not-running': if config.dbserver.multi_user: sys.exit('Please start the DbServer: ' 'see the documentation for details') # otherwise start the DbServer automatically; NB: I tried to use # multiprocessing.Process(target=run_server).start() and apparently # it works, but then run-demos.sh hangs after the end of the first # calculation, but only if the DbServer is started by oq engine (!?) subprocess.Popen([sys.executable, '-m', 'openquake.server.dbserver', '-l', 'INFO']) # wait for the dbserver to start waiting_seconds = 30 while get_status() == 'not-running': if waiting_seconds == 0: sys.exit('The DbServer cannot be started after 30 seconds. ' 'Please check the configuration') time.sleep(1) waiting_seconds -= 1
def ensure_on()
Start the DbServer if it is off
7.379638
6.568725
1.123451
if dbhostport: # assume a string of the form "dbhost:port" dbhost, port = dbhostport.split(':') addr = (dbhost, int(port)) else: addr = (config.dbserver.listen, DBSERVER_PORT) # create the db directory if needed dirname = os.path.dirname(dbpath) if not os.path.exists(dirname): os.makedirs(dirname) # create and upgrade the db if needed db('PRAGMA foreign_keys = ON') # honor ON DELETE CASCADE actions.upgrade_db(db) # the line below is needed to work around a very subtle bug of sqlite; # we need new connections, see https://github.com/gem/oq-engine/pull/3002 db.close() # reset any computation left in the 'executing' state actions.reset_is_running(db) # configure logging and start the server logging.basicConfig(level=getattr(logging, loglevel)) DbServer(db, addr).start()
def run_server(dbpath=os.path.expanduser(config.dbserver.file), dbhostport=None, loglevel='WARN')
Run the DbServer on the given database file and port. If not given, use the settings in openquake.cfg.
5.676618
5.706903
0.994693
# give a nice name to the process w.setproctitle('oq-dbserver') dworkers = [] for _ in range(self.num_workers): sock = z.Socket(self.backend, z.zmq.REP, 'connect') threading.Thread(target=self.dworker, args=(sock,)).start() dworkers.append(sock) logging.warning('DB server started with %s on %s, pid %d', sys.executable, self.frontend, self.pid) if ZMQ: # start task_in->task_out streamer thread c = config.zworkers threading.Thread( target=w._streamer, args=(self.master_host, c.task_in_port, c.task_out_port) ).start() logging.warning('Task streamer started from %s -> %s', c.task_in_port, c.task_out_port) # start zworkers and wait a bit for them msg = self.master.start() logging.warning(msg) time.sleep(1) # start frontend->backend proxy for the database workers try: z.zmq.proxy(z.bind(self.frontend, z.zmq.ROUTER), z.bind(self.backend, z.zmq.DEALER)) except (KeyboardInterrupt, z.zmq.ZMQError): for sock in dworkers: sock.running = False sock.zsocket.close() logging.warning('DB server stopped') finally: self.stop()
def start(self)
Start database worker threads
5.58385
5.494311
1.016297
if ZMQ: logging.warning(self.master.stop()) z.context.term() self.db.close()
def stop(self)
Stop the DbServer and the zworkers if any
19.93317
14.152339
1.408472
mean = (C['c1'] + self._compute_magnitude_term(C, rup) + self._compute_distance_term(C, rup, rjb)) return mean
def _compute_mean(self, C, rup, rjb)
Compute mean value according to equation 30, page 1021.
3.822392
3.587911
1.065353
return C['c2'] * (rup.mag - 8.0) + C['c3'] * (rup.mag - 8.0) ** 2
def _compute_magnitude_term(self, C, rup)
This computes the term f1 equation 8 Drouet & Cotton (2015)
3.634901
3.206273
1.133684
return (C['c4'] + C['c5'] * rup.mag) * np.log( np.sqrt(rjb ** 2. + C['c6'] ** 2.)) + C['c7'] * rjb
def _compute_distance_term(self, C, rup, rjb)
This computes the term f2 equation 8 Drouet & Cotton (2015)
3.206475
3.131918
1.023805
cutoff = 6.056877878 rhypo = dists.rhypo.copy() rhypo[rhypo <= cutoff] = cutoff return C['c3'] * np.log(rhypo) + C['c4'] * rhypo
def _compute_term_3_4(self, dists, C)
Compute term 3 and 4 in equation 1 page 1.
6.203712
5.828004
1.064466
S = self._get_site_type_dummy_variables(sites) return (C['c5'] * S)
def _get_site_amplification(self, sites, imt, C)
Compute the fith term of the equation (1), p. 1: ``c5 * S``
18.062935
9.11095
1.982552
S = np.zeros_like(sites.vs30) # S=0 for rock sites, S=1 otherwise pag 1. idxS = (sites.vs30 < 760.0) S[idxS] = 1 return S
def _get_site_type_dummy_variables(self, sites)
Get site type dummy variables, ``S`` (for rock and soil sites)
8.147133
6.186584
1.316903
mean = (self._compute_term_1_2(rup, C) + self._compute_term_3_4(dists, C) + self._get_site_amplification(sites, imt, C)) # convert from m/s**2 to g for PGA and from m/s to g for PSV # and divided this value for the ratio(SA_larger/SA_geo_mean) if imt.name == "PGA": mean = (np.exp(mean) / g) / C['r_SA'] else: W = (2. * np.pi)/imt.period mean = ((np.exp(mean) * W) / g) / C['r_SA'] return np.log(mean)
def _compute_mean(self, C, rup, dists, sites, imt)
Compute mean value for PGA and pseudo-velocity response spectrum, as given in equation 1. Converts also pseudo-velocity response spectrum values to SA, using: SA = (PSV * W)/ratio(SA_larger/SA_geo_mean) W = (2 * pi / T) T = period (sec)
6.297293
4.357153
1.445277
return C["c"] * np.log10(np.sqrt((rhypo ** 2.) + (C["h"] ** 2.))) +\ (C["d"] * rhypo)
def _compute_distance_scaling(self, C, rhypo)
Returns the distance scaling term accounting for geometric and anelastic attenuation
5.363792
5.256295
1.020451
site_term = np.zeros(len(vs30), dtype=float) # For soil sites add on the site coefficient site_term[vs30 < 760.0] = C["e"] return site_term
def _compute_site_scaling(self, C, vs30)
Returns the site scaling term as a simple coefficient
6.409783
5.925246
1.081775
if param == 'rrup': dist = rupture.surface.get_min_distance(mesh) elif param == 'rx': dist = rupture.surface.get_rx_distance(mesh) elif param == 'ry0': dist = rupture.surface.get_ry0_distance(mesh) elif param == 'rjb': dist = rupture.surface.get_joyner_boore_distance(mesh) elif param == 'rhypo': dist = rupture.hypocenter.distance_to_mesh(mesh) elif param == 'repi': dist = rupture.hypocenter.distance_to_mesh(mesh, with_depths=False) elif param == 'rcdpp': dist = rupture.get_cdppvalue(mesh) elif param == 'azimuth': dist = rupture.surface.get_azimuth(mesh) elif param == "rvolc": # Volcanic distance not yet supported, defaulting to zero dist = numpy.zeros_like(mesh.lons) else: raise ValueError('Unknown distance measure %r' % param) return dist
def get_distances(rupture, mesh, param)
:param rupture: a rupture :param mesh: a mesh of points or a site collection :param param: the kind of distance to compute (default rjb) :returns: an array of distances from the given mesh
2.634681
2.628232
1.002454