body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def test_simple_grad(): 'Test the use of jax.grad' dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='jax', diff_method='parameter-shift') def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) weights = jnp.array([0.1, 0.2]) val = jax.grad(circuit)(weights) assert ('DeviceArray' in val.__repr__())
8,963,306,386,412,158,000
Test the use of jax.grad
tests/tape/interfaces/test_qnode_jax.py
test_simple_grad
PritishSehzpaul/pennylane
python
def test_simple_grad(): dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='jax', diff_method='parameter-shift') def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) weights = jnp.array([0.1, 0.2]) val = jax.grad(circuit)(weights) assert ('DeviceArray' in val.__repr__())
@pytest.mark.parametrize('diff_method', ['parameter-shift', 'finite-diff']) def test_differentiable_expand(diff_method): 'Test that operation and nested tapes expansion\n is differentiable' class U3(qml.U3): def expand(self): (theta, phi, lam) = self.data wires = self.wires with JacobianTape() as tape: qml.Rot(lam, theta, (- lam), wires=wires) qml.PhaseShift((phi + lam), wires=wires) return tape dev = qml.device('default.mixed', wires=1) a = jnp.array(0.1) p = jnp.array([0.1, 0.2, 0.3]) @qnode(dev, diff_method=diff_method, interface='jax') def circuit(a, p): qml.RX(a, wires=0) U3(p[0], p[1], p[2], wires=0) return qml.expval(qml.PauliX(0)) res = circuit(a, p) expected = (((np.cos(a) * np.cos(p[1])) * np.sin(p[0])) + (np.sin(a) * ((np.cos(p[2]) * np.sin(p[1])) + ((np.cos(p[0]) * np.cos(p[1])) * np.sin(p[2]))))) tol = 1e-05 assert np.allclose(res, expected, atol=tol, rtol=0) res = jax.grad(circuit, argnums=1)(a, p) expected = np.array([(np.cos(p[1]) * ((np.cos(a) * np.cos(p[0])) - ((np.sin(a) * np.sin(p[0])) * np.sin(p[2])))), (((np.cos(p[1]) * np.cos(p[2])) * np.sin(a)) - (np.sin(p[1]) * ((np.cos(a) * np.sin(p[0])) + ((np.cos(p[0]) * np.sin(a)) * np.sin(p[2]))))), (np.sin(a) * (((np.cos(p[0]) * np.cos(p[1])) * np.cos(p[2])) - (np.sin(p[1]) * np.sin(p[2]))))]) assert np.allclose(res, expected, atol=tol, rtol=0)
-6,303,920,968,199,098,000
Test that operation and nested tapes expansion is differentiable
tests/tape/interfaces/test_qnode_jax.py
test_differentiable_expand
PritishSehzpaul/pennylane
python
@pytest.mark.parametrize('diff_method', ['parameter-shift', 'finite-diff']) def test_differentiable_expand(diff_method): 'Test that operation and nested tapes expansion\n is differentiable' class U3(qml.U3): def expand(self): (theta, phi, lam) = self.data wires = self.wires with JacobianTape() as tape: qml.Rot(lam, theta, (- lam), wires=wires) qml.PhaseShift((phi + lam), wires=wires) return tape dev = qml.device('default.mixed', wires=1) a = jnp.array(0.1) p = jnp.array([0.1, 0.2, 0.3]) @qnode(dev, diff_method=diff_method, interface='jax') def circuit(a, p): qml.RX(a, wires=0) U3(p[0], p[1], p[2], wires=0) return qml.expval(qml.PauliX(0)) res = circuit(a, p) expected = (((np.cos(a) * np.cos(p[1])) * np.sin(p[0])) + (np.sin(a) * ((np.cos(p[2]) * np.sin(p[1])) + ((np.cos(p[0]) * np.cos(p[1])) * np.sin(p[2]))))) tol = 1e-05 assert np.allclose(res, expected, atol=tol, rtol=0) res = jax.grad(circuit, argnums=1)(a, p) expected = np.array([(np.cos(p[1]) * ((np.cos(a) * np.cos(p[0])) - ((np.sin(a) * np.sin(p[0])) * np.sin(p[2])))), (((np.cos(p[1]) * np.cos(p[2])) * np.sin(a)) - (np.sin(p[1]) * ((np.cos(a) * np.sin(p[0])) + ((np.cos(p[0]) * np.sin(a)) * np.sin(p[2]))))), (np.sin(a) * (((np.cos(p[0]) * np.cos(p[1])) * np.cos(p[2])) - (np.sin(p[1]) * np.sin(p[2]))))]) assert np.allclose(res, expected, atol=tol, rtol=0)
def qtransform(qnode, a, framework=jnp): 'Transforms every RY(y) gate in a circuit to RX(-a*cos(y))' def construct(self, args, kwargs): 'New quantum tape construct method, that performs\n the transform on the tape in a define-by-run manner' t_op = [] QNode.construct(self, args, kwargs) new_ops = [] for o in self.qtape.operations: if isinstance(o, qml.RY): t_op.append(qml.RX(((- a) * framework.cos(o.data[0])), wires=o.wires)) new_ops.append(t_op[(- 1)]) else: new_ops.append(o) self.qtape._ops = new_ops self.qtape._update() import copy new_qnode = copy.deepcopy(qnode) new_qnode.construct = construct.__get__(new_qnode, QNode) return new_qnode
7,020,693,661,897,376,000
Transforms every RY(y) gate in a circuit to RX(-a*cos(y))
tests/tape/interfaces/test_qnode_jax.py
qtransform
PritishSehzpaul/pennylane
python
def qtransform(qnode, a, framework=jnp): def construct(self, args, kwargs): 'New quantum tape construct method, that performs\n the transform on the tape in a define-by-run manner' t_op = [] QNode.construct(self, args, kwargs) new_ops = [] for o in self.qtape.operations: if isinstance(o, qml.RY): t_op.append(qml.RX(((- a) * framework.cos(o.data[0])), wires=o.wires)) new_ops.append(t_op[(- 1)]) else: new_ops.append(o) self.qtape._ops = new_ops self.qtape._update() import copy new_qnode = copy.deepcopy(qnode) new_qnode.construct = construct.__get__(new_qnode, QNode) return new_qnode
@pytest.mark.parametrize('dev_name,diff_method', [('default.mixed', 'finite-diff'), ('default.qubit.autograd', 'parameter-shift')]) def test_transform(dev_name, diff_method, monkeypatch, tol): 'Test an example transform' monkeypatch.setattr(qml.operation.Operation, 'do_check_domain', False) dev = qml.device(dev_name, wires=1) @qnode(dev, interface='jax', diff_method=diff_method) def circuit(weights): op1 = qml.RY(weights[0], wires=0) op2 = qml.RX(weights[1], wires=0) return qml.expval(qml.PauliZ(wires=0)) weights = np.array([0.32, 0.543]) a = np.array(0.5) def loss(weights, a): new_circuit = qtransform(circuit, a) res = new_circuit(weights) res2 = circuit(jnp.sin(weights)) return (res + res2) res = loss(weights, a) grad = jax.grad(loss, argnums=[0, 1])(weights, a) assert (len(grad) == 2) assert (grad[0].shape == weights.shape) assert (grad[1].shape == a.shape) tol = 1e-05 assert np.allclose(res, 1.8244501889992706, atol=tol, rtol=0) assert np.allclose(grad[0], [(- 0.26610258), (- 0.47053553)], atol=tol, rtol=0) assert np.allclose(grad[1], 0.06486032, atol=tol, rtol=0)
3,371,618,771,618,339,000
Test an example transform
tests/tape/interfaces/test_qnode_jax.py
test_transform
PritishSehzpaul/pennylane
python
@pytest.mark.parametrize('dev_name,diff_method', [('default.mixed', 'finite-diff'), ('default.qubit.autograd', 'parameter-shift')]) def test_transform(dev_name, diff_method, monkeypatch, tol): monkeypatch.setattr(qml.operation.Operation, 'do_check_domain', False) dev = qml.device(dev_name, wires=1) @qnode(dev, interface='jax', diff_method=diff_method) def circuit(weights): op1 = qml.RY(weights[0], wires=0) op2 = qml.RX(weights[1], wires=0) return qml.expval(qml.PauliZ(wires=0)) weights = np.array([0.32, 0.543]) a = np.array(0.5) def loss(weights, a): new_circuit = qtransform(circuit, a) res = new_circuit(weights) res2 = circuit(jnp.sin(weights)) return (res + res2) res = loss(weights, a) grad = jax.grad(loss, argnums=[0, 1])(weights, a) assert (len(grad) == 2) assert (grad[0].shape == weights.shape) assert (grad[1].shape == a.shape) tol = 1e-05 assert np.allclose(res, 1.8244501889992706, atol=tol, rtol=0) assert np.allclose(grad[0], [(- 0.26610258), (- 0.47053553)], atol=tol, rtol=0) assert np.allclose(grad[1], 0.06486032, atol=tol, rtol=0)
def construct(self, args, kwargs): 'New quantum tape construct method, that performs\n the transform on the tape in a define-by-run manner' t_op = [] QNode.construct(self, args, kwargs) new_ops = [] for o in self.qtape.operations: if isinstance(o, qml.RY): t_op.append(qml.RX(((- a) * framework.cos(o.data[0])), wires=o.wires)) new_ops.append(t_op[(- 1)]) else: new_ops.append(o) self.qtape._ops = new_ops self.qtape._update()
5,238,125,990,119,009,000
New quantum tape construct method, that performs the transform on the tape in a define-by-run manner
tests/tape/interfaces/test_qnode_jax.py
construct
PritishSehzpaul/pennylane
python
def construct(self, args, kwargs): 'New quantum tape construct method, that performs\n the transform on the tape in a define-by-run manner' t_op = [] QNode.construct(self, args, kwargs) new_ops = [] for o in self.qtape.operations: if isinstance(o, qml.RY): t_op.append(qml.RX(((- a) * framework.cos(o.data[0])), wires=o.wires)) new_ops.append(t_op[(- 1)]) else: new_ops.append(o) self.qtape._ops = new_ops self.qtape._update()
def testDocxSetHeaderRequest(self): 'Test DocxSetHeaderRequest' pass
-4,734,426,132,462,906,000
Test DocxSetHeaderRequest
test/test_docx_set_header_request.py
testDocxSetHeaderRequest
Cloudmersive/Cloudmersive.APIClient.Python.Convert
python
def testDocxSetHeaderRequest(self): pass
@staticmethod def updateCouplings(connection): '\n The shape has changed, which means couplings might have to change, be added or removed.\n To be sure all couplings in this connection are deleted and then build up from scratch.\n ' 'Remove all old couplings.' for coupling in connection.couplings(): connection.Document.removeObject(coupling.Name) 'Add couplings for every shape.' connection.addCouplings()
1,902,094,766,423,032,800
The shape has changed, which means couplings might have to change, be added or removed. To be sure all couplings in this connection are deleted and then build up from scratch.
Sea/adapter/connections/Connection.py
updateCouplings
FRidh/Sea
python
@staticmethod def updateCouplings(connection): '\n The shape has changed, which means couplings might have to change, be added or removed.\n To be sure all couplings in this connection are deleted and then build up from scratch.\n ' 'Remove all old couplings.' for coupling in connection.couplings(): connection.Document.removeObject(coupling.Name) 'Add couplings for every shape.' connection.addCouplings()
@staticmethod def addCouplings(connection): '\n Add couplings to the :attr:`connection`.\n \n :param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`\n ' for (comp_from, comp_to) in itertools.permutations(connection.Components, 2): coupling_sort = Connection.determineCouplingType(connection.ClassName, comp_from, comp_to) if (not coupling_sort): App.Console.PrintWarning('Cannot add coupling.\n') return for (sub_from, sub_to) in itertools.product(comp_from.subsystems(), comp_to.subsystems()): connection.makeCoupling(sub_from, sub_to, coupling_sort)
-1,349,375,267,989,642,800
Add couplings to the :attr:`connection`. :param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
Sea/adapter/connections/Connection.py
addCouplings
FRidh/Sea
python
@staticmethod def addCouplings(connection): '\n Add couplings to the :attr:`connection`.\n \n :param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`\n ' for (comp_from, comp_to) in itertools.permutations(connection.Components, 2): coupling_sort = Connection.determineCouplingType(connection.ClassName, comp_from, comp_to) if (not coupling_sort): App.Console.PrintWarning('Cannot add coupling.\n') return for (sub_from, sub_to) in itertools.product(comp_from.subsystems(), comp_to.subsystems()): connection.makeCoupling(sub_from, sub_to, coupling_sort)
@staticmethod def determineCouplingType(connection_type, component_from, component_to): '\n Determine the type of coupling. Detects what type of connection the components have.\n Based on the type of connection and on the types of components a coupling is returned.\n \n :param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n :param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n ' if connection_type: item = (connection_type, component_from.ClassName, component_to.ClassName) try: return Connection.coupling_options[item] except KeyError: txt = (((((('Could not determine the type of coupling for ' + component_from.ClassName) + ' to ') + component_to.ClassName) + ' with ') + connection_type) + '.\n') App.Console.PrintWarning(txt) return None
-5,089,945,891,643,414,000
Determine the type of coupling. Detects what type of connection the components have. Based on the type of connection and on the types of components a coupling is returned. :param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component` :param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
Sea/adapter/connections/Connection.py
determineCouplingType
FRidh/Sea
python
@staticmethod def determineCouplingType(connection_type, component_from, component_to): '\n Determine the type of coupling. Detects what type of connection the components have.\n Based on the type of connection and on the types of components a coupling is returned.\n \n :param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n :param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n ' if connection_type: item = (connection_type, component_from.ClassName, component_to.ClassName) try: return Connection.coupling_options[item] except KeyError: txt = (((((('Could not determine the type of coupling for ' + component_from.ClassName) + ' to ') + component_to.ClassName) + ' with ') + connection_type) + '.\n') App.Console.PrintWarning(txt) return None
@staticmethod def makeCoupling(connection, subsystem_from, subsystem_to, sort): '\n Add a coupling to system.\n \n :param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`\n :param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n :param subsystem_from: string representing the type of subsystem\n :param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n :param subsystem_to: string representing the type of subsystem\n :param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`\n \n ' from Sea.adapter.object_maps import couplings_map obj = connection.Document.addObject('App::FeaturePython', 'Coupling') couplings_map[sort](obj, connection, subsystem_from, subsystem_to) try: Sea.adapter.couplings.ViewProviderCoupling(obj.ViewObject) except AttributeError: pass obj.Label = ((((obj.ClassName + '_') + subsystem_from.ClassName.replace('Subsystem', '')) + '_to_') + subsystem_to.ClassName.replace('Subsystem', '')) logging.info('Sea: Created %s.', obj.Name) obj.Document.recompute() return obj
-7,819,181,587,509,591,000
Add a coupling to system. :param connection: an instance of :class:`Sea.adapter.baseclasses.Connection` :param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component` :param subsystem_from: string representing the type of subsystem :param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component` :param subsystem_to: string representing the type of subsystem :param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`
Sea/adapter/connections/Connection.py
makeCoupling
FRidh/Sea
python
@staticmethod def makeCoupling(connection, subsystem_from, subsystem_to, sort): '\n Add a coupling to system.\n \n :param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`\n :param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n :param subsystem_from: string representing the type of subsystem\n :param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`\n :param subsystem_to: string representing the type of subsystem\n :param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`\n \n ' from Sea.adapter.object_maps import couplings_map obj = connection.Document.addObject('App::FeaturePython', 'Coupling') couplings_map[sort](obj, connection, subsystem_from, subsystem_to) try: Sea.adapter.couplings.ViewProviderCoupling(obj.ViewObject) except AttributeError: pass obj.Label = ((((obj.ClassName + '_') + subsystem_from.ClassName.replace('Subsystem', )) + '_to_') + subsystem_to.ClassName.replace('Subsystem', )) logging.info('Sea: Created %s.', obj.Name) obj.Document.recompute() return obj
@test_util.run_v1_only('b/120545219') def testControlFlowInitialization(self): 'Expects an error if an initializer is in a control-flow scope.' def cond(i, _): return (i < 10) def body(i, _): zero = array_ops.zeros([], dtype=dtypes.int32) v = variables.Variable(initial_value=zero) return ((i + 1), v.read_value()) with self.assertRaisesRegex(ValueError, 'inside a control-flow'): control_flow_ops.while_loop(cond, body, [0, 0])
-3,174,679,683,478,967,000
Expects an error if an initializer is in a control-flow scope.
tensorflow/python/kernel_tests/variables_test.py
testControlFlowInitialization
ArnovanHilten/tensorflow
python
@test_util.run_v1_only('b/120545219') def testControlFlowInitialization(self): def cond(i, _): return (i < 10) def body(i, _): zero = array_ops.zeros([], dtype=dtypes.int32) v = variables.Variable(initial_value=zero) return ((i + 1), v.read_value()) with self.assertRaisesRegex(ValueError, 'inside a control-flow'): control_flow_ops.while_loop(cond, body, [0, 0])
def search(taxonKey=None, repatriated=None, kingdomKey=None, phylumKey=None, classKey=None, orderKey=None, familyKey=None, genusKey=None, subgenusKey=None, scientificName=None, country=None, publishingCountry=None, hasCoordinate=None, typeStatus=None, recordNumber=None, lastInterpreted=None, continent=None, geometry=None, recordedBy=None, recordedByID=None, identifiedByID=None, basisOfRecord=None, datasetKey=None, eventDate=None, catalogNumber=None, year=None, month=None, decimalLatitude=None, decimalLongitude=None, elevation=None, depth=None, institutionCode=None, collectionCode=None, hasGeospatialIssue=None, issue=None, q=None, spellCheck=None, mediatype=None, limit=300, offset=0, establishmentMeans=None, facet=None, facetMincount=None, facetMultiselect=None, timeout=60, **kwargs): '\n Search GBIF occurrences\n\n :param taxonKey: [int] A GBIF occurrence identifier\n :param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.\n :param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.\n IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``\n :param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in\n :param kingdomKey: [int] Kingdom classification key\n :param phylumKey: [int] Phylum classification key\n :param classKey: [int] Class classification key\n :param orderKey: [int] Order classification key\n :param familyKey: [int] Family classification key\n :param genusKey: [int] Genus classification key\n :param subgenusKey: [int] Subgenus classification key\n :param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.\n :param datasetKey: [str] The occurrence dataset key (a uuid)\n :param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.\n :param recordedBy: [str] The person who recorded the occurrence.\n :param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence\n :param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence.\n :param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.\n :param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.\n :param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2\n :param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:\n\n - ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.\n - ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.\n - ``LIVING_SPECIMEN`` An occurrence record describing a living specimen.\n - ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.\n - ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication.\n - ``OBSERVATION`` An occurrence record describing an observation.\n - ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis.\n - ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.\n\n :param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or\n MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``\n wouldn\'t work)\n :param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,\n smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn\'t work)\n :param month: [int] The month of the year, starting with 1 for January. Supports range queries,\n smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn\'t work)\n :param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.\n Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn\'t work)\n :param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.\n Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn\'t work).\n :param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the\n country in which the occurrence was recorded.\n :param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger\n (e.g., ``5,30``, whereas ``30,5`` wouldn\'t work)\n :param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a\n lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,\n whereas ``30,5`` wouldn\'t work)\n :param geometry: [str] Searches for occurrences inside a polygon described in Well Known\n Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING\n POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.\n Polygons must have counter-clockwise ordering of points.\n :param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial\n issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``\n returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes\n only records without spatial issues. The absence of this parameter returns any\n record with or without spatial issues.\n :param issue: [str] One or more of many possible issues with each occurrence record. See\n Details. Issues passed to this parameter filter results by the issue.\n :param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or\n all records (``False``, default).\n :param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus\n :param recordNumber: [int] Number recorded by collector of the data, different from GBIF record\n number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info\n :param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:\n yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,\n ``1990,1991``, whereas ``1991,1990`` wouldn\'t work)\n :param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``\n (North America includes the Caribbean and reachies down and includes Panama), ``oceania``,\n or ``south_america``\n :param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,\n key, latitude, and longitude. Or specify each field you want returned by name, e.g.\n ``fields = c(\'name\',\'latitude\',\'elevation\')``.\n :param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:\n ``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``\n :param limit: [int] Number of results to return. Default: ``300``\n :param offset: [int] Record to start at. Default: ``0``\n :param facet: [str] a character vector of length 1 or greater\n :param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,\n INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN\n :param facetMincount: [int] minimum number of records to be included in the faceting results\n :param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently\n filtered. See examples. Default: ``False``\n\n :return: A dictionary\n\n Usage::\n\n from pygbif import occurrences\n occurrences.search(taxonKey = 3329049)\n\n # Return 2 results, this is the default by the way\n occurrences.search(taxonKey=3329049, limit=2)\n\n # Instead of getting a taxon key first, you can search for a name directly\n # However, note that using this approach (with `scientificName="..."`)\n # you are getting synonyms too. The results for using `scientifcName` and\n # `taxonKey` parameters are the same in this case, but I wouldn\'t be surprised if for some\n # names they return different results\n occurrences.search(scientificName = \'Ursus americanus\')\n from pygbif import species\n key = species.name_backbone(name = \'Ursus americanus\', rank=\'species\')[\'usageKey\']\n occurrences.search(taxonKey = key)\n\n # Search by dataset key\n occurrences.search(datasetKey=\'7b5d6a48-f762-11e1-a439-00145eb45e9a\', limit=20)\n\n # Search by catalog number\n occurrences.search(catalogNumber="49366", limit=20)\n # occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)\n\n # Use paging parameters (limit and offset) to page. Note the different results\n # for the two queries below.\n occurrences.search(datasetKey=\'7b5d6a48-f762-11e1-a439-00145eb45e9a\', offset=10, limit=5)\n occurrences.search(datasetKey=\'7b5d6a48-f762-11e1-a439-00145eb45e9a\', offset=20, limit=5)\n\n # Many dataset keys\n # occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)\n\n # Search by collector name\n res = occurrences.search(recordedBy="smith", limit=20)\n [ x[\'recordedBy\'] for x in res[\'results\'] ]\n\n # Many collector names\n # occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)\n \n # recordedByID\n occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)\n\n # identifiedByID\n occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)\n\n # Search for many species\n splist = [\'Cyanocitta stelleri\', \'Junco hyemalis\', \'Aix sponsa\']\n keys = [ species.name_suggest(x)[0][\'key\'] for x in splist ]\n out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]\n [ x[\'results\'][0][\'speciesKey\'] for x in out ]\n\n # Search - q parameter\n occurrences.search(q = "kingfisher", limit=20)\n ## spell check - only works with the `search` parameter\n ### spelled correctly - same result as above call\n occurrences.search(q = "kingfisher", limit=20, spellCheck = True)\n ### spelled incorrectly - stops with suggested spelling\n occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)\n ### spelled incorrectly - stops with many suggested spellings\n ### and number of results for each\n occurrences.search(q = "helir", limit=20, spellCheck = True)\n\n # Search on latitidue and longitude\n occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)\n\n # Search on a bounding box\n ## in well known text format\n occurrences.search(geometry=\'POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))\', limit=20)\n from pygbif import species\n key = species.name_suggest(q=\'Aesculus hippocastanum\')[0][\'key\']\n occurrences.search(taxonKey=key, geometry=\'POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))\', limit=20)\n ## multipolygon\n wkt = \'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))\'\n occurrences.search(geometry = wkt, limit = 20)\n\n # Search on country\n occurrences.search(country=\'US\', limit=20)\n occurrences.search(country=\'FR\', limit=20)\n occurrences.search(country=\'DE\', limit=20)\n\n # Get only occurrences with lat/long data\n occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)\n\n # Get only occurrences that were recorded as living specimens\n occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)\n\n # Get occurrences for a particular eventDate\n occurrences.search(taxonKey=key, eventDate="2013", limit=20)\n occurrences.search(taxonKey=key, year="2013", limit=20)\n occurrences.search(taxonKey=key, month="6", limit=20)\n\n # Get occurrences based on depth\n key = species.name_backbone(name=\'Salmo salar\', kingdom=\'animals\')[\'usageKey\']\n occurrences.search(taxonKey=key, depth="5", limit=20)\n\n # Get occurrences based on elevation\n key = species.name_backbone(name=\'Puma concolor\', kingdom=\'animals\')[\'usageKey\']\n occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)\n\n # Get occurrences based on institutionCode\n occurrences.search(institutionCode="TLMF", limit=20)\n\n # Get occurrences based on collectionCode\n occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)\n\n # Get only those occurrences with spatial issues\n occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)\n\n # Search using a query string\n occurrences.search(q="kingfisher", limit=20)\n\n # Range queries\n ## See Detail for parameters that support range queries\n ### this is a range depth, with lower/upper limits in character string\n occurrences.search(depth=\'50,100\')\n\n ## Range search with year\n occurrences.search(year=\'1999,2000\', limit=20)\n\n ## Range search with latitude\n occurrences.search(decimalLatitude=\'29.59,29.6\')\n\n # Search by specimen type status\n ## Look for possible values of the typeStatus parameter looking at the typestatus dataset\n occurrences.search(typeStatus = \'allotype\')\n\n # Search by specimen record number\n ## This is the record number of the person/group that submitted the data, not GBIF\'s numbers\n ## You can see that many different groups have record number 1, so not super helpful\n occurrences.search(recordNumber = 1)\n\n # Search by last time interpreted: Date the record was last modified in GBIF\n ## The lastInterpreted parameter accepts ISO 8601 format dates, including\n ## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted\n occurrences.search(lastInterpreted = \'2014-04-01\')\n\n # Search by continent\n ## One of africa, antarctica, asia, europe, north_america, oceania, or south_america\n occurrences.search(continent = \'south_america\')\n occurrences.search(continent = \'africa\')\n occurrences.search(continent = \'oceania\')\n occurrences.search(continent = \'antarctica\')\n\n # Search for occurrences with images\n occurrences.search(mediatype = \'StillImage\')\n occurrences.search(mediatype = \'MovingImage\')\n x = occurrences.search(mediatype = \'Sound\')\n [z[\'media\'] for z in x[\'results\']]\n\n # Query based on issues\n occurrences.search(taxonKey=1, issue=\'DEPTH_UNLIKELY\')\n occurrences.search(taxonKey=1, issue=[\'DEPTH_UNLIKELY\',\'COORDINATE_ROUNDED\'])\n # Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF\n # backbone properly:\n occurrences.search(datasetKey=\'84c0e1a0-f762-11e1-a439-00145eb45e9a\', issue=[\'TAXON_MATCH_NONE\',\'TAXON_MATCH_HIGHERRANK\'])\n\n # If you pass in an invalid polygon you get hopefully informative errors\n ### the WKT string is fine, but GBIF says bad polygon\n wkt = \'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,\n -147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,\n -112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,\n -82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,\n -77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,\n 179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,\n 163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,\n 127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,\n 149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,\n 178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))\'\n occurrences.search(geometry = wkt)\n\n # Faceting\n ## return no occurrence records with limit=0\n x = occurrences.search(facet = "country", limit = 0)\n x[\'facets\']\n\n ## also return occurrence records\n x = occurrences.search(facet = "establishmentMeans", limit = 10)\n x[\'facets\']\n x[\'results\']\n\n ## multiple facet variables\n x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)\n x[\'results\']\n x[\'facets\']\n x[\'facets\'][\'country\']\n x[\'facets\'][\'basisOfRecord\']\n x[\'facets\'][\'basisOfRecord\'][\'count\']\n\n ## set a minimum facet count\n x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)\n x[\'facets\']\n\n ## paging per each faceted variable\n ### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"\n ### or "country" + "_facetOffset" = "country_facetOffset"\n x = occurrences.search(\n facet = ["country", "basisOfRecord", "hasCoordinate"],\n country_facetLimit = 3,\n basisOfRecord_facetLimit = 6,\n limit = 0\n )\n x[\'facets\']\n\n # requests package options\n ## There\'s an acceptable set of requests options ([\'timeout\', \'cookies\', \'auth\',\n ## \'allow_redirects\', \'proxies\', \'verify\', \'stream\', \'cert\']) you can pass\n ## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds.\n x = occurrences.search(timeout = 1)\n ' url = (gbif_baseurl + 'occurrence/search') args = {'taxonKey': taxonKey, 'repatriated': repatriated, 'kingdomKey': kingdomKey, 'phylumKey': phylumKey, 'classKey': classKey, 'orderKey': orderKey, 'familyKey': familyKey, 'genusKey': genusKey, 'subgenusKey': subgenusKey, 'scientificName': scientificName, 'country': country, 'publishingCountry': publishingCountry, 'hasCoordinate': bool2str(hasCoordinate), 'typeStatus': typeStatus, 'recordNumber': recordNumber, 'lastInterpreted': lastInterpreted, 'continent': continent, 'geometry': geometry, 'recordedBy': recordedBy, 'recordedByID': recordedByID, 'identifiedByID': identifiedByID, 'basisOfRecord': basisOfRecord, 'datasetKey': datasetKey, 'eventDate': eventDate, 'catalogNumber': catalogNumber, 'year': year, 'month': month, 'decimalLatitude': decimalLatitude, 'decimalLongitude': decimalLongitude, 'elevation': elevation, 'depth': depth, 'institutionCode': institutionCode, 'collectionCode': collectionCode, 'hasGeospatialIssue': bool2str(hasGeospatialIssue), 'issue': issue, 'q': q, 'spellCheck': bool2str(spellCheck), 'mediatype': mediatype, 'limit': limit, 'offset': offset, 'establishmentMeans': establishmentMeans, 'facetMincount': facetMincount, 'facet': facet, 'facetMultiselect': bool2str(facetMultiselect)} gbif_kwargs = {key: kwargs[key] for key in kwargs if (key not in requests_argset)} if (gbif_kwargs is not None): xx = dict(zip([re.sub('_', '.', x) for x in gbif_kwargs.keys()], gbif_kwargs.values())) args.update(xx) kwargs = {key: kwargs[key] for key in kwargs if (key in requests_argset)} out = gbif_GET(url, args, **kwargs) return out
2,407,195,931,292,612,000
Search GBIF occurrences :param taxonKey: [int] A GBIF occurrence identifier :param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase. :param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter. IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False`` :param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in :param kingdomKey: [int] Kingdom classification key :param phylumKey: [int] Phylum classification key :param classKey: [int] Class classification key :param orderKey: [int] Order classification key :param familyKey: [int] Family classification key :param genusKey: [int] Genus classification key :param subgenusKey: [int] Subgenus classification key :param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search. :param datasetKey: [str] The occurrence dataset key (a uuid) :param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code. :param recordedBy: [str] The person who recorded the occurrence. :param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence :param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence. :param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution. :param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que. :param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 :param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are: - ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen. - ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people. - ``LIVING_SPECIMEN`` An occurrence record describing a living specimen. - ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine. - ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication. - ``OBSERVATION`` An occurrence record describing an observation. - ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis. - ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen. :param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param month: [int] The month of the year, starting with 1 for January. Supports range queries, smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work) :param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84. Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work) :param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84. Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work). :param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. :param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger (e.g., ``5,30``, whereas ``30,5`` wouldn't work) :param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``, whereas ``30,5`` wouldn't work) :param geometry: [str] Searches for occurrences inside a polygon described in Well Known Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq. Polygons must have counter-clockwise ordering of points. :param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE`` returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes only records without spatial issues. The absence of this parameter returns any record with or without spatial issues. :param issue: [str] One or more of many possible issues with each occurrence record. See Details. Issues passed to this parameter filter results by the issue. :param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or all records (``False``, default). :param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus :param recordNumber: [int] Number recorded by collector of the data, different from GBIF record number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info :param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work) :param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america`` (North America includes the Caribbean and reachies down and includes Panama), ``oceania``, or ``south_america`` :param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name, key, latitude, and longitude. Or specify each field you want returned by name, e.g. ``fields = c('name','latitude','elevation')``. :param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options: ``NULL``, ``MovingImage``, ``Sound``, and ``StillImage`` :param limit: [int] Number of results to return. Default: ``300`` :param offset: [int] Record to start at. Default: ``0`` :param facet: [str] a character vector of length 1 or greater :param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED, INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN :param facetMincount: [int] minimum number of records to be included in the faceting results :param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently filtered. See examples. Default: ``False`` :return: A dictionary Usage:: from pygbif import occurrences occurrences.search(taxonKey = 3329049) # Return 2 results, this is the default by the way occurrences.search(taxonKey=3329049, limit=2) # Instead of getting a taxon key first, you can search for a name directly # However, note that using this approach (with `scientificName="..."`) # you are getting synonyms too. The results for using `scientifcName` and # `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some # names they return different results occurrences.search(scientificName = 'Ursus americanus') from pygbif import species key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey'] occurrences.search(taxonKey = key) # Search by dataset key occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20) # Search by catalog number occurrences.search(catalogNumber="49366", limit=20) # occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20) # Use paging parameters (limit and offset) to page. Note the different results # for the two queries below. occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5) occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5) # Many dataset keys # occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20) # Search by collector name res = occurrences.search(recordedBy="smith", limit=20) [ x['recordedBy'] for x in res['results'] ] # Many collector names # occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20) # recordedByID occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3) # identifiedByID occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3) # Search for many species splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa'] keys = [ species.name_suggest(x)[0]['key'] for x in splist ] out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ] [ x['results'][0]['speciesKey'] for x in out ] # Search - q parameter occurrences.search(q = "kingfisher", limit=20) ## spell check - only works with the `search` parameter ### spelled correctly - same result as above call occurrences.search(q = "kingfisher", limit=20, spellCheck = True) ### spelled incorrectly - stops with suggested spelling occurrences.search(q = "kajsdkla", limit=20, spellCheck = True) ### spelled incorrectly - stops with many suggested spellings ### and number of results for each occurrences.search(q = "helir", limit=20, spellCheck = True) # Search on latitidue and longitude occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2) # Search on a bounding box ## in well known text format occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20) from pygbif import species key = species.name_suggest(q='Aesculus hippocastanum')[0]['key'] occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20) ## multipolygon wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))' occurrences.search(geometry = wkt, limit = 20) # Search on country occurrences.search(country='US', limit=20) occurrences.search(country='FR', limit=20) occurrences.search(country='DE', limit=20) # Get only occurrences with lat/long data occurrences.search(taxonKey=key, hasCoordinate=True, limit=20) # Get only occurrences that were recorded as living specimens occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20) # Get occurrences for a particular eventDate occurrences.search(taxonKey=key, eventDate="2013", limit=20) occurrences.search(taxonKey=key, year="2013", limit=20) occurrences.search(taxonKey=key, month="6", limit=20) # Get occurrences based on depth key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey'] occurrences.search(taxonKey=key, depth="5", limit=20) # Get occurrences based on elevation key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey'] occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20) # Get occurrences based on institutionCode occurrences.search(institutionCode="TLMF", limit=20) # Get occurrences based on collectionCode occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20) # Get only those occurrences with spatial issues occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20) # Search using a query string occurrences.search(q="kingfisher", limit=20) # Range queries ## See Detail for parameters that support range queries ### this is a range depth, with lower/upper limits in character string occurrences.search(depth='50,100') ## Range search with year occurrences.search(year='1999,2000', limit=20) ## Range search with latitude occurrences.search(decimalLatitude='29.59,29.6') # Search by specimen type status ## Look for possible values of the typeStatus parameter looking at the typestatus dataset occurrences.search(typeStatus = 'allotype') # Search by specimen record number ## This is the record number of the person/group that submitted the data, not GBIF's numbers ## You can see that many different groups have record number 1, so not super helpful occurrences.search(recordNumber = 1) # Search by last time interpreted: Date the record was last modified in GBIF ## The lastInterpreted parameter accepts ISO 8601 format dates, including ## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted occurrences.search(lastInterpreted = '2014-04-01') # Search by continent ## One of africa, antarctica, asia, europe, north_america, oceania, or south_america occurrences.search(continent = 'south_america') occurrences.search(continent = 'africa') occurrences.search(continent = 'oceania') occurrences.search(continent = 'antarctica') # Search for occurrences with images occurrences.search(mediatype = 'StillImage') occurrences.search(mediatype = 'MovingImage') x = occurrences.search(mediatype = 'Sound') [z['media'] for z in x['results']] # Query based on issues occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY') occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED']) # Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF # backbone properly: occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK']) # If you pass in an invalid polygon you get hopefully informative errors ### the WKT string is fine, but GBIF says bad polygon wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539, -147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625, -112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516, -82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985, -77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114, 179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227, 163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447, 127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688, 149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165, 178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))' occurrences.search(geometry = wkt) # Faceting ## return no occurrence records with limit=0 x = occurrences.search(facet = "country", limit = 0) x['facets'] ## also return occurrence records x = occurrences.search(facet = "establishmentMeans", limit = 10) x['facets'] x['results'] ## multiple facet variables x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10) x['results'] x['facets'] x['facets']['country'] x['facets']['basisOfRecord'] x['facets']['basisOfRecord']['count'] ## set a minimum facet count x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0) x['facets'] ## paging per each faceted variable ### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit" ### or "country" + "_facetOffset" = "country_facetOffset" x = occurrences.search( facet = ["country", "basisOfRecord", "hasCoordinate"], country_facetLimit = 3, basisOfRecord_facetLimit = 6, limit = 0 ) x['facets'] # requests package options ## There's an acceptable set of requests options (['timeout', 'cookies', 'auth', ## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass ## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds. x = occurrences.search(timeout = 1)
pygbif/occurrences/search.py
search
livatras/pygbif
python
def search(taxonKey=None, repatriated=None, kingdomKey=None, phylumKey=None, classKey=None, orderKey=None, familyKey=None, genusKey=None, subgenusKey=None, scientificName=None, country=None, publishingCountry=None, hasCoordinate=None, typeStatus=None, recordNumber=None, lastInterpreted=None, continent=None, geometry=None, recordedBy=None, recordedByID=None, identifiedByID=None, basisOfRecord=None, datasetKey=None, eventDate=None, catalogNumber=None, year=None, month=None, decimalLatitude=None, decimalLongitude=None, elevation=None, depth=None, institutionCode=None, collectionCode=None, hasGeospatialIssue=None, issue=None, q=None, spellCheck=None, mediatype=None, limit=300, offset=0, establishmentMeans=None, facet=None, facetMincount=None, facetMultiselect=None, timeout=60, **kwargs): '\n Search GBIF occurrences\n\n :param taxonKey: [int] A GBIF occurrence identifier\n :param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.\n :param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.\n IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``\n :param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in\n :param kingdomKey: [int] Kingdom classification key\n :param phylumKey: [int] Phylum classification key\n :param classKey: [int] Class classification key\n :param orderKey: [int] Order classification key\n :param familyKey: [int] Family classification key\n :param genusKey: [int] Genus classification key\n :param subgenusKey: [int] Subgenus classification key\n :param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.\n :param datasetKey: [str] The occurrence dataset key (a uuid)\n :param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.\n :param recordedBy: [str] The person who recorded the occurrence.\n :param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence\n :param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence.\n :param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.\n :param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.\n :param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2\n :param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:\n\n - ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.\n - ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.\n - ``LIVING_SPECIMEN`` An occurrence record describing a living specimen.\n - ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.\n - ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication.\n - ``OBSERVATION`` An occurrence record describing an observation.\n - ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis.\n - ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.\n\n :param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or\n MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``\n wouldn\'t work)\n :param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,\n smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn\'t work)\n :param month: [int] The month of the year, starting with 1 for January. Supports range queries,\n smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn\'t work)\n :param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.\n Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn\'t work)\n :param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.\n Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn\'t work).\n :param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the\n country in which the occurrence was recorded.\n :param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger\n (e.g., ``5,30``, whereas ``30,5`` wouldn\'t work)\n :param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a\n lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,\n whereas ``30,5`` wouldn\'t work)\n :param geometry: [str] Searches for occurrences inside a polygon described in Well Known\n Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING\n POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.\n Polygons must have counter-clockwise ordering of points.\n :param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial\n issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``\n returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes\n only records without spatial issues. The absence of this parameter returns any\n record with or without spatial issues.\n :param issue: [str] One or more of many possible issues with each occurrence record. See\n Details. Issues passed to this parameter filter results by the issue.\n :param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or\n all records (``False``, default).\n :param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus\n :param recordNumber: [int] Number recorded by collector of the data, different from GBIF record\n number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info\n :param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:\n yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,\n ``1990,1991``, whereas ``1991,1990`` wouldn\'t work)\n :param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``\n (North America includes the Caribbean and reachies down and includes Panama), ``oceania``,\n or ``south_america``\n :param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,\n key, latitude, and longitude. Or specify each field you want returned by name, e.g.\n ``fields = c(\'name\',\'latitude\',\'elevation\')``.\n :param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:\n ``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``\n :param limit: [int] Number of results to return. Default: ``300``\n :param offset: [int] Record to start at. Default: ``0``\n :param facet: [str] a character vector of length 1 or greater\n :param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,\n INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN\n :param facetMincount: [int] minimum number of records to be included in the faceting results\n :param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently\n filtered. See examples. Default: ``False``\n\n :return: A dictionary\n\n Usage::\n\n from pygbif import occurrences\n occurrences.search(taxonKey = 3329049)\n\n # Return 2 results, this is the default by the way\n occurrences.search(taxonKey=3329049, limit=2)\n\n # Instead of getting a taxon key first, you can search for a name directly\n # However, note that using this approach (with `scientificName="..."`)\n # you are getting synonyms too. The results for using `scientifcName` and\n # `taxonKey` parameters are the same in this case, but I wouldn\'t be surprised if for some\n # names they return different results\n occurrences.search(scientificName = \'Ursus americanus\')\n from pygbif import species\n key = species.name_backbone(name = \'Ursus americanus\', rank=\'species\')[\'usageKey\']\n occurrences.search(taxonKey = key)\n\n # Search by dataset key\n occurrences.search(datasetKey=\'7b5d6a48-f762-11e1-a439-00145eb45e9a\', limit=20)\n\n # Search by catalog number\n occurrences.search(catalogNumber="49366", limit=20)\n # occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)\n\n # Use paging parameters (limit and offset) to page. Note the different results\n # for the two queries below.\n occurrences.search(datasetKey=\'7b5d6a48-f762-11e1-a439-00145eb45e9a\', offset=10, limit=5)\n occurrences.search(datasetKey=\'7b5d6a48-f762-11e1-a439-00145eb45e9a\', offset=20, limit=5)\n\n # Many dataset keys\n # occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)\n\n # Search by collector name\n res = occurrences.search(recordedBy="smith", limit=20)\n [ x[\'recordedBy\'] for x in res[\'results\'] ]\n\n # Many collector names\n # occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)\n \n # recordedByID\n occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)\n\n # identifiedByID\n occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)\n\n # Search for many species\n splist = [\'Cyanocitta stelleri\', \'Junco hyemalis\', \'Aix sponsa\']\n keys = [ species.name_suggest(x)[0][\'key\'] for x in splist ]\n out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]\n [ x[\'results\'][0][\'speciesKey\'] for x in out ]\n\n # Search - q parameter\n occurrences.search(q = "kingfisher", limit=20)\n ## spell check - only works with the `search` parameter\n ### spelled correctly - same result as above call\n occurrences.search(q = "kingfisher", limit=20, spellCheck = True)\n ### spelled incorrectly - stops with suggested spelling\n occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)\n ### spelled incorrectly - stops with many suggested spellings\n ### and number of results for each\n occurrences.search(q = "helir", limit=20, spellCheck = True)\n\n # Search on latitidue and longitude\n occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)\n\n # Search on a bounding box\n ## in well known text format\n occurrences.search(geometry=\'POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))\', limit=20)\n from pygbif import species\n key = species.name_suggest(q=\'Aesculus hippocastanum\')[0][\'key\']\n occurrences.search(taxonKey=key, geometry=\'POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))\', limit=20)\n ## multipolygon\n wkt = \'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))\'\n occurrences.search(geometry = wkt, limit = 20)\n\n # Search on country\n occurrences.search(country=\'US\', limit=20)\n occurrences.search(country=\'FR\', limit=20)\n occurrences.search(country=\'DE\', limit=20)\n\n # Get only occurrences with lat/long data\n occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)\n\n # Get only occurrences that were recorded as living specimens\n occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)\n\n # Get occurrences for a particular eventDate\n occurrences.search(taxonKey=key, eventDate="2013", limit=20)\n occurrences.search(taxonKey=key, year="2013", limit=20)\n occurrences.search(taxonKey=key, month="6", limit=20)\n\n # Get occurrences based on depth\n key = species.name_backbone(name=\'Salmo salar\', kingdom=\'animals\')[\'usageKey\']\n occurrences.search(taxonKey=key, depth="5", limit=20)\n\n # Get occurrences based on elevation\n key = species.name_backbone(name=\'Puma concolor\', kingdom=\'animals\')[\'usageKey\']\n occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)\n\n # Get occurrences based on institutionCode\n occurrences.search(institutionCode="TLMF", limit=20)\n\n # Get occurrences based on collectionCode\n occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)\n\n # Get only those occurrences with spatial issues\n occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)\n\n # Search using a query string\n occurrences.search(q="kingfisher", limit=20)\n\n # Range queries\n ## See Detail for parameters that support range queries\n ### this is a range depth, with lower/upper limits in character string\n occurrences.search(depth=\'50,100\')\n\n ## Range search with year\n occurrences.search(year=\'1999,2000\', limit=20)\n\n ## Range search with latitude\n occurrences.search(decimalLatitude=\'29.59,29.6\')\n\n # Search by specimen type status\n ## Look for possible values of the typeStatus parameter looking at the typestatus dataset\n occurrences.search(typeStatus = \'allotype\')\n\n # Search by specimen record number\n ## This is the record number of the person/group that submitted the data, not GBIF\'s numbers\n ## You can see that many different groups have record number 1, so not super helpful\n occurrences.search(recordNumber = 1)\n\n # Search by last time interpreted: Date the record was last modified in GBIF\n ## The lastInterpreted parameter accepts ISO 8601 format dates, including\n ## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted\n occurrences.search(lastInterpreted = \'2014-04-01\')\n\n # Search by continent\n ## One of africa, antarctica, asia, europe, north_america, oceania, or south_america\n occurrences.search(continent = \'south_america\')\n occurrences.search(continent = \'africa\')\n occurrences.search(continent = \'oceania\')\n occurrences.search(continent = \'antarctica\')\n\n # Search for occurrences with images\n occurrences.search(mediatype = \'StillImage\')\n occurrences.search(mediatype = \'MovingImage\')\n x = occurrences.search(mediatype = \'Sound\')\n [z[\'media\'] for z in x[\'results\']]\n\n # Query based on issues\n occurrences.search(taxonKey=1, issue=\'DEPTH_UNLIKELY\')\n occurrences.search(taxonKey=1, issue=[\'DEPTH_UNLIKELY\',\'COORDINATE_ROUNDED\'])\n # Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF\n # backbone properly:\n occurrences.search(datasetKey=\'84c0e1a0-f762-11e1-a439-00145eb45e9a\', issue=[\'TAXON_MATCH_NONE\',\'TAXON_MATCH_HIGHERRANK\'])\n\n # If you pass in an invalid polygon you get hopefully informative errors\n ### the WKT string is fine, but GBIF says bad polygon\n wkt = \'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,\n -147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,\n -112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,\n -82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,\n -77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,\n 179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,\n 163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,\n 127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,\n 149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,\n 178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))\'\n occurrences.search(geometry = wkt)\n\n # Faceting\n ## return no occurrence records with limit=0\n x = occurrences.search(facet = "country", limit = 0)\n x[\'facets\']\n\n ## also return occurrence records\n x = occurrences.search(facet = "establishmentMeans", limit = 10)\n x[\'facets\']\n x[\'results\']\n\n ## multiple facet variables\n x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)\n x[\'results\']\n x[\'facets\']\n x[\'facets\'][\'country\']\n x[\'facets\'][\'basisOfRecord\']\n x[\'facets\'][\'basisOfRecord\'][\'count\']\n\n ## set a minimum facet count\n x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)\n x[\'facets\']\n\n ## paging per each faceted variable\n ### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"\n ### or "country" + "_facetOffset" = "country_facetOffset"\n x = occurrences.search(\n facet = ["country", "basisOfRecord", "hasCoordinate"],\n country_facetLimit = 3,\n basisOfRecord_facetLimit = 6,\n limit = 0\n )\n x[\'facets\']\n\n # requests package options\n ## There\'s an acceptable set of requests options ([\'timeout\', \'cookies\', \'auth\',\n ## \'allow_redirects\', \'proxies\', \'verify\', \'stream\', \'cert\']) you can pass\n ## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds.\n x = occurrences.search(timeout = 1)\n ' url = (gbif_baseurl + 'occurrence/search') args = {'taxonKey': taxonKey, 'repatriated': repatriated, 'kingdomKey': kingdomKey, 'phylumKey': phylumKey, 'classKey': classKey, 'orderKey': orderKey, 'familyKey': familyKey, 'genusKey': genusKey, 'subgenusKey': subgenusKey, 'scientificName': scientificName, 'country': country, 'publishingCountry': publishingCountry, 'hasCoordinate': bool2str(hasCoordinate), 'typeStatus': typeStatus, 'recordNumber': recordNumber, 'lastInterpreted': lastInterpreted, 'continent': continent, 'geometry': geometry, 'recordedBy': recordedBy, 'recordedByID': recordedByID, 'identifiedByID': identifiedByID, 'basisOfRecord': basisOfRecord, 'datasetKey': datasetKey, 'eventDate': eventDate, 'catalogNumber': catalogNumber, 'year': year, 'month': month, 'decimalLatitude': decimalLatitude, 'decimalLongitude': decimalLongitude, 'elevation': elevation, 'depth': depth, 'institutionCode': institutionCode, 'collectionCode': collectionCode, 'hasGeospatialIssue': bool2str(hasGeospatialIssue), 'issue': issue, 'q': q, 'spellCheck': bool2str(spellCheck), 'mediatype': mediatype, 'limit': limit, 'offset': offset, 'establishmentMeans': establishmentMeans, 'facetMincount': facetMincount, 'facet': facet, 'facetMultiselect': bool2str(facetMultiselect)} gbif_kwargs = {key: kwargs[key] for key in kwargs if (key not in requests_argset)} if (gbif_kwargs is not None): xx = dict(zip([re.sub('_', '.', x) for x in gbif_kwargs.keys()], gbif_kwargs.values())) args.update(xx) kwargs = {key: kwargs[key] for key in kwargs if (key in requests_argset)} out = gbif_GET(url, args, **kwargs) return out
def __init__(self, id=None, data=None): '\n DestinationIdSchema - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n ' self.swagger_types = {'id': 'str', 'data': 'DestinationSchema'} self.attribute_map = {'id': 'id', 'data': 'data'} self._id = id self._data = data
-1,036,892,525,717,189,400
DestinationIdSchema - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition.
rustici_software_cloud_v2/models/destination_id_schema.py
__init__
ryanhope2/scormcloud-api-v2-client-python
python
def __init__(self, id=None, data=None): '\n DestinationIdSchema - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n ' self.swagger_types = {'id': 'str', 'data': 'DestinationSchema'} self.attribute_map = {'id': 'id', 'data': 'data'} self._id = id self._data = data
@property def id(self): '\n Gets the id of this DestinationIdSchema.\n \n\n :return: The id of this DestinationIdSchema.\n :rtype: str\n ' return self._id
-4,756,714,808,268,342,000
Gets the id of this DestinationIdSchema. :return: The id of this DestinationIdSchema. :rtype: str
rustici_software_cloud_v2/models/destination_id_schema.py
id
ryanhope2/scormcloud-api-v2-client-python
python
@property def id(self): '\n Gets the id of this DestinationIdSchema.\n \n\n :return: The id of this DestinationIdSchema.\n :rtype: str\n ' return self._id
@id.setter def id(self, id): '\n Sets the id of this DestinationIdSchema.\n \n\n :param id: The id of this DestinationIdSchema.\n :type: str\n ' self._id = id
-2,075,493,263,701,627,100
Sets the id of this DestinationIdSchema. :param id: The id of this DestinationIdSchema. :type: str
rustici_software_cloud_v2/models/destination_id_schema.py
id
ryanhope2/scormcloud-api-v2-client-python
python
@id.setter def id(self, id): '\n Sets the id of this DestinationIdSchema.\n \n\n :param id: The id of this DestinationIdSchema.\n :type: str\n ' self._id = id
@property def data(self): '\n Gets the data of this DestinationIdSchema.\n\n :return: The data of this DestinationIdSchema.\n :rtype: DestinationSchema\n ' return self._data
6,491,617,717,676,747,000
Gets the data of this DestinationIdSchema. :return: The data of this DestinationIdSchema. :rtype: DestinationSchema
rustici_software_cloud_v2/models/destination_id_schema.py
data
ryanhope2/scormcloud-api-v2-client-python
python
@property def data(self): '\n Gets the data of this DestinationIdSchema.\n\n :return: The data of this DestinationIdSchema.\n :rtype: DestinationSchema\n ' return self._data
@data.setter def data(self, data): '\n Sets the data of this DestinationIdSchema.\n\n :param data: The data of this DestinationIdSchema.\n :type: DestinationSchema\n ' self._data = data
-7,656,567,686,224,259,000
Sets the data of this DestinationIdSchema. :param data: The data of this DestinationIdSchema. :type: DestinationSchema
rustici_software_cloud_v2/models/destination_id_schema.py
data
ryanhope2/scormcloud-api-v2-client-python
python
@data.setter def data(self, data): '\n Sets the data of this DestinationIdSchema.\n\n :param data: The data of this DestinationIdSchema.\n :type: DestinationSchema\n ' self._data = data
def to_dict(self): '\n Returns the model properties as a dict\n ' result = {} for (attr, _) in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
2,191,974,537,531,847,000
Returns the model properties as a dict
rustici_software_cloud_v2/models/destination_id_schema.py
to_dict
ryanhope2/scormcloud-api-v2-client-python
python
def to_dict(self): '\n \n ' result = {} for (attr, _) in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): '\n Returns the string representation of the model\n ' return pformat(self.to_dict())
-3,531,024,894,346,511,000
Returns the string representation of the model
rustici_software_cloud_v2/models/destination_id_schema.py
to_str
ryanhope2/scormcloud-api-v2-client-python
python
def to_str(self): '\n \n ' return pformat(self.to_dict())
def __repr__(self): '\n For `print` and `pprint`\n ' return self.to_str()
5,853,962,500,611,353,000
For `print` and `pprint`
rustici_software_cloud_v2/models/destination_id_schema.py
__repr__
ryanhope2/scormcloud-api-v2-client-python
python
def __repr__(self): '\n \n ' return self.to_str()
def __eq__(self, other): '\n Returns true if both objects are equal\n ' if (not isinstance(other, DestinationIdSchema)): return False return (self.__dict__ == other.__dict__)
5,572,630,834,681,360,000
Returns true if both objects are equal
rustici_software_cloud_v2/models/destination_id_schema.py
__eq__
ryanhope2/scormcloud-api-v2-client-python
python
def __eq__(self, other): '\n \n ' if (not isinstance(other, DestinationIdSchema)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): '\n Returns true if both objects are not equal\n ' return (not (self == other))
3,600,423,175,817,510,400
Returns true if both objects are not equal
rustici_software_cloud_v2/models/destination_id_schema.py
__ne__
ryanhope2/scormcloud-api-v2-client-python
python
def __ne__(self, other): '\n \n ' return (not (self == other))
def load_data(data_dir): 'Load the train/val data.' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'val': transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])} image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes return (dataloaders, dataset_sizes, class_names)
2,688,968,184,329,373,000
Load the train/val data.
azure-ml-pipelines/pytorch/training-folder/pytorch_train.py
load_data
hudua/azureml
python
def load_data(data_dir): data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'val': transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])} image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes return (dataloaders, dataset_sizes, class_names)
def train_model(model, criterion, optimizer, scheduler, num_epochs, data_dir): 'Train the model.' (dataloaders, dataset_sizes, class_names) = load_data(data_dir) device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, (num_epochs - 1))) print(('-' * 10)) for phase in ['train', 'val']: if (phase == 'train'): scheduler.step() model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for (inputs, labels) in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): outputs = model(inputs) (_, preds) = torch.max(outputs, 1) loss = criterion(outputs, labels) if (phase == 'train'): loss.backward() optimizer.step() running_loss += (loss.item() * inputs.size(0)) running_corrects += torch.sum((preds == labels.data)) epoch_loss = (running_loss / dataset_sizes[phase]) epoch_acc = (running_corrects.double() / dataset_sizes[phase]) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) if ((phase == 'val') and (epoch_acc > best_acc)): best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) run.log('best_val_acc', np.float(best_acc)) print() time_elapsed = (time.time() - since) print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) print('Best val Acc: {:4f}'.format(best_acc)) model.load_state_dict(best_model_wts) return model
-1,306,633,351,554,523,000
Train the model.
azure-ml-pipelines/pytorch/training-folder/pytorch_train.py
train_model
hudua/azureml
python
def train_model(model, criterion, optimizer, scheduler, num_epochs, data_dir): (dataloaders, dataset_sizes, class_names) = load_data(data_dir) device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, (num_epochs - 1))) print(('-' * 10)) for phase in ['train', 'val']: if (phase == 'train'): scheduler.step() model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for (inputs, labels) in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled((phase == 'train')): outputs = model(inputs) (_, preds) = torch.max(outputs, 1) loss = criterion(outputs, labels) if (phase == 'train'): loss.backward() optimizer.step() running_loss += (loss.item() * inputs.size(0)) running_corrects += torch.sum((preds == labels.data)) epoch_loss = (running_loss / dataset_sizes[phase]) epoch_acc = (running_corrects.double() / dataset_sizes[phase]) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) if ((phase == 'val') and (epoch_acc > best_acc)): best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) run.log('best_val_acc', np.float(best_acc)) print() time_elapsed = (time.time() - since) print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) print('Best val Acc: {:4f}'.format(best_acc)) model.load_state_dict(best_model_wts) return model
def fine_tune_model(num_epochs, data_dir, learning_rate, momentum): 'Load a pretrained model and reset the final fully connected layer.' run.log('lr', np.float(learning_rate)) run.log('momentum', np.float(momentum)) model_ft = models.resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, 2) device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(), lr=learning_rate, momentum=momentum) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) model = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs, data_dir) return model
7,668,973,453,640,657,000
Load a pretrained model and reset the final fully connected layer.
azure-ml-pipelines/pytorch/training-folder/pytorch_train.py
fine_tune_model
hudua/azureml
python
def fine_tune_model(num_epochs, data_dir, learning_rate, momentum): run.log('lr', np.float(learning_rate)) run.log('momentum', np.float(momentum)) model_ft = models.resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs, 2) device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(), lr=learning_rate, momentum=momentum) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) model = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs, data_dir) return model
def load_model(model_path: str) -> None: '\n 模型加载\n @param model_path: 模型文件夹路径\n @return:\n ' global kge_model, entity2id, id2entity, relation2id, all_true_triples, args args = DotMap(json.load(codecs.open(os.path.join(model_path, 'config.json'), 'r', encoding='utf-8'))) (entity2id, id2entity, relation2id, id2relation, all_true_triples) = get_entity_relation_with_id(args.data_path) kge_model = KGEModel(model_name=args.model, nentity=args.nentity, nrelation=args.nrelation, hidden_dim=args.hidden_dim, gamma=args.gamma, double_entity_embedding=args.double_entity_embedding, double_relation_embedding=args.double_relation_embedding) if args.cuda: kge_model = kge_model.cuda() checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint')) kge_model.load_state_dict(checkpoint['model_state_dict'])
-1,381,956,851,112,199,000
模型加载 @param model_path: 模型文件夹路径 @return:
project/knowledge_graph_embedding/project_distmult_rotate_transe/service.py
load_model
Jianhan-Liu/solid_ai_waddle
python
def load_model(model_path: str) -> None: '\n 模型加载\n @param model_path: 模型文件夹路径\n @return:\n ' global kge_model, entity2id, id2entity, relation2id, all_true_triples, args args = DotMap(json.load(codecs.open(os.path.join(model_path, 'config.json'), 'r', encoding='utf-8'))) (entity2id, id2entity, relation2id, id2relation, all_true_triples) = get_entity_relation_with_id(args.data_path) kge_model = KGEModel(model_name=args.model, nentity=args.nentity, nrelation=args.nrelation, hidden_dim=args.hidden_dim, gamma=args.gamma, double_entity_embedding=args.double_entity_embedding, double_relation_embedding=args.double_relation_embedding) if args.cuda: kge_model = kge_model.cuda() checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint')) kge_model.load_state_dict(checkpoint['model_state_dict'])
def inference(target_triple: str) -> Dict: "\n 推理函数\n @param target_triple: 目标需预测三元组:'头实体 关系 尾实体'\n @return: 头尾实体的10个预测结果\n " if (kge_model is None): return {'预测结果': '提醒:模型未加载'} try: target_triple = target_triple.split() head = entity2id[target_triple[0]] tail = entity2id[target_triple[2]] relation = relation2id[target_triple[1]] target_triple = [(head, relation, tail)] except KeyError as e: return {'预测结果': f'实体或者关系 <{e}> 不存在,请确保输入的实体或者关系已存在。'} prediction = kge_model.test_step(kge_model, target_triple, all_true_triples, args, True) head_entity_prediction = [id2entity[str(idx)] for idx in prediction['head_predict']] tail_entity_prediction = [id2entity[str(idx)] for idx in prediction['tail_predict']] result = {'头实体预测结果': head_entity_prediction, '尾实体预测结果': tail_entity_prediction} return result
-4,453,249,678,169,298,400
推理函数 @param target_triple: 目标需预测三元组:'头实体 关系 尾实体' @return: 头尾实体的10个预测结果
project/knowledge_graph_embedding/project_distmult_rotate_transe/service.py
inference
Jianhan-Liu/solid_ai_waddle
python
def inference(target_triple: str) -> Dict: "\n 推理函数\n @param target_triple: 目标需预测三元组:'头实体 关系 尾实体'\n @return: 头尾实体的10个预测结果\n " if (kge_model is None): return {'预测结果': '提醒:模型未加载'} try: target_triple = target_triple.split() head = entity2id[target_triple[0]] tail = entity2id[target_triple[2]] relation = relation2id[target_triple[1]] target_triple = [(head, relation, tail)] except KeyError as e: return {'预测结果': f'实体或者关系 <{e}> 不存在,请确保输入的实体或者关系已存在。'} prediction = kge_model.test_step(kge_model, target_triple, all_true_triples, args, True) head_entity_prediction = [id2entity[str(idx)] for idx in prediction['head_predict']] tail_entity_prediction = [id2entity[str(idx)] for idx in prediction['tail_predict']] result = {'头实体预测结果': head_entity_prediction, '尾实体预测结果': tail_entity_prediction} return result
def _looks_like_asgi3(app): '\n Try to figure out if an application object supports ASGI3.\n\n This is how uvicorn figures out the application version as well.\n ' if inspect.isclass(app): return hasattr(app, '__await__') elif inspect.isfunction(app): return asyncio.iscoroutinefunction(app) else: call = getattr(app, '__call__', None) return asyncio.iscoroutinefunction(call)
-1,719,280,184,835,323,600
Try to figure out if an application object supports ASGI3. This is how uvicorn figures out the application version as well.
sentry_sdk/integrations/asgi.py
_looks_like_asgi3
cuenca-mx/sentry-python
python
def _looks_like_asgi3(app): '\n Try to figure out if an application object supports ASGI3.\n\n This is how uvicorn figures out the application version as well.\n ' if inspect.isclass(app): return hasattr(app, '__await__') elif inspect.isfunction(app): return asyncio.iscoroutinefunction(app) else: call = getattr(app, '__call__', None) return asyncio.iscoroutinefunction(call)
def __init__(self, app, unsafe_context_data=False): '\n Instrument an ASGI application with Sentry. Provides HTTP/websocket\n data to sent events and basic handling for exceptions bubbling up\n through the middleware.\n\n :param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.\n ' if ((not unsafe_context_data) and (not HAS_REAL_CONTEXTVARS)): raise RuntimeError(('The ASGI middleware for Sentry requires Python 3.7+ or the aiocontextvars package.' + CONTEXTVARS_ERROR_MESSAGE)) self.app = app if _looks_like_asgi3(app): self.__call__ = self._run_asgi3 else: self.__call__ = self._run_asgi2
4,878,474,847,215,512,000
Instrument an ASGI application with Sentry. Provides HTTP/websocket data to sent events and basic handling for exceptions bubbling up through the middleware. :param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
sentry_sdk/integrations/asgi.py
__init__
cuenca-mx/sentry-python
python
def __init__(self, app, unsafe_context_data=False): '\n Instrument an ASGI application with Sentry. Provides HTTP/websocket\n data to sent events and basic handling for exceptions bubbling up\n through the middleware.\n\n :param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.\n ' if ((not unsafe_context_data) and (not HAS_REAL_CONTEXTVARS)): raise RuntimeError(('The ASGI middleware for Sentry requires Python 3.7+ or the aiocontextvars package.' + CONTEXTVARS_ERROR_MESSAGE)) self.app = app if _looks_like_asgi3(app): self.__call__ = self._run_asgi3 else: self.__call__ = self._run_asgi2
def _get_url(self, scope, default_scheme, host): '\n Extract URL from the ASGI scope, without also including the querystring.\n ' scheme = scope.get('scheme', default_scheme) server = scope.get('server', None) path = (scope.get('root_path', '') + scope.get('path', '')) if host: return ('%s://%s%s' % (scheme, host, path)) if (server is not None): (host, port) = server default_port = {'http': 80, 'https': 443, 'ws': 80, 'wss': 443}[scheme] if (port != default_port): return ('%s://%s:%s%s' % (scheme, host, port, path)) return ('%s://%s%s' % (scheme, host, path)) return path
456,696,316,022,871,940
Extract URL from the ASGI scope, without also including the querystring.
sentry_sdk/integrations/asgi.py
_get_url
cuenca-mx/sentry-python
python
def _get_url(self, scope, default_scheme, host): '\n \n ' scheme = scope.get('scheme', default_scheme) server = scope.get('server', None) path = (scope.get('root_path', ) + scope.get('path', )) if host: return ('%s://%s%s' % (scheme, host, path)) if (server is not None): (host, port) = server default_port = {'http': 80, 'https': 443, 'ws': 80, 'wss': 443}[scheme] if (port != default_port): return ('%s://%s:%s%s' % (scheme, host, port, path)) return ('%s://%s%s' % (scheme, host, path)) return path
def _get_query(self, scope): '\n Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.\n ' qs = scope.get('query_string') if (not qs): return None return urllib.parse.unquote(qs.decode('latin-1'))
-3,749,750,348,190,675,000
Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
sentry_sdk/integrations/asgi.py
_get_query
cuenca-mx/sentry-python
python
def _get_query(self, scope): '\n \n ' qs = scope.get('query_string') if (not qs): return None return urllib.parse.unquote(qs.decode('latin-1'))
def _get_headers(self, scope): '\n Extract headers from the ASGI scope, in the format that the Sentry protocol expects.\n ' headers = {} for (raw_key, raw_value) in scope['headers']: key = raw_key.decode('latin-1') value = raw_value.decode('latin-1') if (key in headers): headers[key] = ((headers[key] + ', ') + value) else: headers[key] = value return headers
-2,763,036,242,865,239,000
Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
sentry_sdk/integrations/asgi.py
_get_headers
cuenca-mx/sentry-python
python
def _get_headers(self, scope): '\n \n ' headers = {} for (raw_key, raw_value) in scope['headers']: key = raw_key.decode('latin-1') value = raw_value.decode('latin-1') if (key in headers): headers[key] = ((headers[key] + ', ') + value) else: headers[key] = value return headers
def text2phone(text, language): '\n Convert graphemes to phonemes.\n ' seperator = phonemizer.separator.Separator(' |', '', '|') punctuations = re.findall(PHONEME_PUNCTUATION_PATTERN, text) if (version.parse(phonemizer.__version__) < version.parse('2.1')): ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language) ph = ph[:(- 1)].strip() if punctuations: if (text[(- 1)] == punctuations[(- 1)]): for punct in punctuations[:(- 1)]: ph = ph.replace('| |\n', (('|' + punct) + '| |'), 1) ph = (ph + punctuations[(- 1)]) else: for punct in punctuations: ph = ph.replace('| |\n', (('|' + punct) + '| |'), 1) elif (version.parse(phonemizer.__version__) >= version.parse('2.1')): ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language, preserve_punctuation=True) if punctuations: for punctuation in punctuations: ph = ph.replace(f'| |{punctuation} ', f'|{punctuation}| |').replace(f'| |{punctuation}', f'|{punctuation}| |') ph = ph[:(- 3)] else: raise RuntimeError(" [!] Use 'phonemizer' version 2.1 or older.") return ph
-8,971,153,413,979,716,000
Convert graphemes to phonemes.
utils/text/__init__.py
text2phone
DanBmh/TTS
python
def text2phone(text, language): '\n \n ' seperator = phonemizer.separator.Separator(' |', , '|') punctuations = re.findall(PHONEME_PUNCTUATION_PATTERN, text) if (version.parse(phonemizer.__version__) < version.parse('2.1')): ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language) ph = ph[:(- 1)].strip() if punctuations: if (text[(- 1)] == punctuations[(- 1)]): for punct in punctuations[:(- 1)]: ph = ph.replace('| |\n', (('|' + punct) + '| |'), 1) ph = (ph + punctuations[(- 1)]) else: for punct in punctuations: ph = ph.replace('| |\n', (('|' + punct) + '| |'), 1) elif (version.parse(phonemizer.__version__) >= version.parse('2.1')): ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language, preserve_punctuation=True) if punctuations: for punctuation in punctuations: ph = ph.replace(f'| |{punctuation} ', f'|{punctuation}| |').replace(f'| |{punctuation}', f'|{punctuation}| |') ph = ph[:(- 3)] else: raise RuntimeError(" [!] Use 'phonemizer' version 2.1 or older.") return ph
def sequence_to_phoneme(sequence, tp=None): 'Converts a sequence of IDs back to a string' global _id_to_phonemes result = '' if tp: (_, _phonemes) = make_symbols(**tp) _id_to_phonemes = {i: s for (i, s) in enumerate(_phonemes)} for symbol_id in sequence: if (symbol_id in _id_to_phonemes): s = _id_to_phonemes[symbol_id] result += s return result.replace('}{', ' ')
6,434,912,201,758,388,000
Converts a sequence of IDs back to a string
utils/text/__init__.py
sequence_to_phoneme
DanBmh/TTS
python
def sequence_to_phoneme(sequence, tp=None): global _id_to_phonemes result = if tp: (_, _phonemes) = make_symbols(**tp) _id_to_phonemes = {i: s for (i, s) in enumerate(_phonemes)} for symbol_id in sequence: if (symbol_id in _id_to_phonemes): s = _id_to_phonemes[symbol_id] result += s return result.replace('}{', ' ')
def text_to_sequence(text, cleaner_names, tp=None): 'Converts a string of text to a sequence of IDs corresponding to the symbols in the text.\n\n The text can optionally have ARPAbet sequences enclosed in curly braces embedded\n in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."\n\n Args:\n text: string to convert to a sequence\n cleaner_names: names of the cleaner functions to run the text through\n\n Returns:\n List of integers corresponding to the symbols in the text\n ' global _symbol_to_id if tp: (_symbols, _) = make_symbols(**tp) _symbol_to_id = {s: i for (i, s) in enumerate(_symbols)} sequence = [] while text: m = _CURLY_RE.match(text) if (not m): sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) return sequence
-863,534,234,504,303,600
Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text
utils/text/__init__.py
text_to_sequence
DanBmh/TTS
python
def text_to_sequence(text, cleaner_names, tp=None): 'Converts a string of text to a sequence of IDs corresponding to the symbols in the text.\n\n The text can optionally have ARPAbet sequences enclosed in curly braces embedded\n in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."\n\n Args:\n text: string to convert to a sequence\n cleaner_names: names of the cleaner functions to run the text through\n\n Returns:\n List of integers corresponding to the symbols in the text\n ' global _symbol_to_id if tp: (_symbols, _) = make_symbols(**tp) _symbol_to_id = {s: i for (i, s) in enumerate(_symbols)} sequence = [] while text: m = _CURLY_RE.match(text) if (not m): sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) return sequence
def sequence_to_text(sequence, tp=None): 'Converts a sequence of IDs back to a string' global _id_to_symbol if tp: (_symbols, _) = make_symbols(**tp) _id_to_symbol = {i: s for (i, s) in enumerate(_symbols)} result = '' for symbol_id in sequence: if (symbol_id in _id_to_symbol): s = _id_to_symbol[symbol_id] if ((len(s) > 1) and (s[0] == '@')): s = ('{%s}' % s[1:]) result += s return result.replace('}{', ' ')
-3,614,943,435,538,793,500
Converts a sequence of IDs back to a string
utils/text/__init__.py
sequence_to_text
DanBmh/TTS
python
def sequence_to_text(sequence, tp=None): global _id_to_symbol if tp: (_symbols, _) = make_symbols(**tp) _id_to_symbol = {i: s for (i, s) in enumerate(_symbols)} result = for symbol_id in sequence: if (symbol_id in _id_to_symbol): s = _id_to_symbol[symbol_id] if ((len(s) > 1) and (s[0] == '@')): s = ('{%s}' % s[1:]) result += s return result.replace('}{', ' ')
def validate_subdirectory_string(subdirectory_str): ' Validate subdirectory string ' if (not subdirectory_str.isascii()): raise argparse.ArgumentTypeError(('%s contains non ascii characters' % subdirectory_str)) if subdirectory_str.startswith('/'): subdirectory_str = subdirectory_str[1:] if subdirectory_str.endswith('/'): subdirectory_str = subdirectory_str[:(- 1)] site_config.set_subdirectory(subdirectory_str) return subdirectory_str
9,133,025,839,659,418,000
Validate subdirectory string
update-attack.py
validate_subdirectory_string
Alexander-RB/attack-website
python
def validate_subdirectory_string(subdirectory_str): ' ' if (not subdirectory_str.isascii()): raise argparse.ArgumentTypeError(('%s contains non ascii characters' % subdirectory_str)) if subdirectory_str.startswith('/'): subdirectory_str = subdirectory_str[1:] if subdirectory_str.endswith('/'): subdirectory_str = subdirectory_str[:(- 1)] site_config.set_subdirectory(subdirectory_str) return subdirectory_str
def get_parsed_args(): 'Create argument parser and parse arguments' parser = argparse.ArgumentParser(description='Build the ATT&CK website.\nAll flags are optional. If you run the build without flags, the modules that pertain to the ATT&CK dataset will be ran. If you would like to run extra modules, opt-in these modules with the--extras flag.') parser.add_argument('--refresh', '-r', action='store_true', help='Pull down the current STIX data from the MITRE/CTI GitHub respository') parser.add_argument('--no-stix-link-replacement', action='store_true', help='If this flag is absent, links to attack.mitre.org/[page] in the STIX data will be replaced with /[page]. Add this flag to preserve links to attack.mitre.org.') parser.add_argument('--modules', '-m', nargs='+', type=str, choices=module_choices, help="Run specific modules by selecting from the list and leaving one space in between them. For example: '-m clean techniques tactics'.Will run all the modules if flag is not called, or selected without arguments.") parser.add_argument('--extras', '-e', nargs='*', type=str, choices=extras, help="Run extra modules that do not pertain to the ATT&CK dataset. Select from the list and leaving one space in between them. For example: '-m resources blog'.\nThese modules will only run if the user adds this flag. Calling this flag without arguments will select all the extra modules.") parser.add_argument('--test', '-t', nargs='+', choices=test_choices, dest='tests', help="Run specific tests by selecting from the list and leaving one space in between them. For example: '-t output links'. Tests: size (size of output directory against github pages limit); links (dead internal hyperlinks and relative hyperlinks); external_links (dead external hyperlinks); citations (unparsed citation text).") parser.add_argument('--attack-brand', action='store_true', help='Applies ATT&CK brand colors. See also the --extras flag.') parser.add_argument('--proxy', help='set proxy') parser.add_argument('--subdirectory', help='If you intend to host the site from a sub-directory, specify the directory using this flag.', type=validate_subdirectory_string) parser.add_argument('--print-tests', dest='print_tests', action='store_true', help='Force test output to print to stdout even if the results are very long.') parser.add_argument('--no-test-exitstatus', dest='override_exit_status', action='store_true', help='Forces application to exit with success status codes even if tests fail.') args = parser.parse_args() if (not args.modules): args.modules = module_choices if ((not args.extras) and isinstance(args.extras, list)): args.extras = extras site_config.args = args return args
3,227,462,813,491,606,500
Create argument parser and parse arguments
update-attack.py
get_parsed_args
Alexander-RB/attack-website
python
def get_parsed_args(): parser = argparse.ArgumentParser(description='Build the ATT&CK website.\nAll flags are optional. If you run the build without flags, the modules that pertain to the ATT&CK dataset will be ran. If you would like to run extra modules, opt-in these modules with the--extras flag.') parser.add_argument('--refresh', '-r', action='store_true', help='Pull down the current STIX data from the MITRE/CTI GitHub respository') parser.add_argument('--no-stix-link-replacement', action='store_true', help='If this flag is absent, links to attack.mitre.org/[page] in the STIX data will be replaced with /[page]. Add this flag to preserve links to attack.mitre.org.') parser.add_argument('--modules', '-m', nargs='+', type=str, choices=module_choices, help="Run specific modules by selecting from the list and leaving one space in between them. For example: '-m clean techniques tactics'.Will run all the modules if flag is not called, or selected without arguments.") parser.add_argument('--extras', '-e', nargs='*', type=str, choices=extras, help="Run extra modules that do not pertain to the ATT&CK dataset. Select from the list and leaving one space in between them. For example: '-m resources blog'.\nThese modules will only run if the user adds this flag. Calling this flag without arguments will select all the extra modules.") parser.add_argument('--test', '-t', nargs='+', choices=test_choices, dest='tests', help="Run specific tests by selecting from the list and leaving one space in between them. For example: '-t output links'. Tests: size (size of output directory against github pages limit); links (dead internal hyperlinks and relative hyperlinks); external_links (dead external hyperlinks); citations (unparsed citation text).") parser.add_argument('--attack-brand', action='store_true', help='Applies ATT&CK brand colors. See also the --extras flag.') parser.add_argument('--proxy', help='set proxy') parser.add_argument('--subdirectory', help='If you intend to host the site from a sub-directory, specify the directory using this flag.', type=validate_subdirectory_string) parser.add_argument('--print-tests', dest='print_tests', action='store_true', help='Force test output to print to stdout even if the results are very long.') parser.add_argument('--no-test-exitstatus', dest='override_exit_status', action='store_true', help='Forces application to exit with success status codes even if tests fail.') args = parser.parse_args() if (not args.modules): args.modules = module_choices if ((not args.extras) and isinstance(args.extras, list)): args.extras = extras site_config.args = args return args
def remove_from_build(arg_modules, arg_extras): ' Given a list of modules from command line, remove modules that appear in module\n directory that are not in list.\n ' def remove_from_running_pool(): ' Remove modules from running pool if they are not in modules list from argument ' copy_of_modules = [] for module in modules.run_ptr: if (module['name'].lower() in arg_modules): copy_of_modules.append(module) modules.run_ptr = copy_of_modules def remove_from_menu(): ' Remove modules from menu if they are not in modules list from argument ' copy_of_menu = [] for module in modules.menu_ptr: if (module['name'].lower() in arg_modules): copy_of_menu.append(module) modules.menu_ptr = copy_of_menu if arg_extras: arg_modules = (arg_modules + arg_extras) remove_from_running_pool() remove_from_menu()
-4,698,552,695,145,996,000
Given a list of modules from command line, remove modules that appear in module directory that are not in list.
update-attack.py
remove_from_build
Alexander-RB/attack-website
python
def remove_from_build(arg_modules, arg_extras): ' Given a list of modules from command line, remove modules that appear in module\n directory that are not in list.\n ' def remove_from_running_pool(): ' Remove modules from running pool if they are not in modules list from argument ' copy_of_modules = [] for module in modules.run_ptr: if (module['name'].lower() in arg_modules): copy_of_modules.append(module) modules.run_ptr = copy_of_modules def remove_from_menu(): ' Remove modules from menu if they are not in modules list from argument ' copy_of_menu = [] for module in modules.menu_ptr: if (module['name'].lower() in arg_modules): copy_of_menu.append(module) modules.menu_ptr = copy_of_menu if arg_extras: arg_modules = (arg_modules + arg_extras) remove_from_running_pool() remove_from_menu()
def remove_from_running_pool(): ' Remove modules from running pool if they are not in modules list from argument ' copy_of_modules = [] for module in modules.run_ptr: if (module['name'].lower() in arg_modules): copy_of_modules.append(module) modules.run_ptr = copy_of_modules
-6,515,811,293,937,717,000
Remove modules from running pool if they are not in modules list from argument
update-attack.py
remove_from_running_pool
Alexander-RB/attack-website
python
def remove_from_running_pool(): ' ' copy_of_modules = [] for module in modules.run_ptr: if (module['name'].lower() in arg_modules): copy_of_modules.append(module) modules.run_ptr = copy_of_modules
def remove_from_menu(): ' Remove modules from menu if they are not in modules list from argument ' copy_of_menu = [] for module in modules.menu_ptr: if (module['name'].lower() in arg_modules): copy_of_menu.append(module) modules.menu_ptr = copy_of_menu
-6,787,491,815,310,537,000
Remove modules from menu if they are not in modules list from argument
update-attack.py
remove_from_menu
Alexander-RB/attack-website
python
def remove_from_menu(): ' ' copy_of_menu = [] for module in modules.menu_ptr: if (module['name'].lower() in arg_modules): copy_of_menu.append(module) modules.menu_ptr = copy_of_menu
def run_command(command, display_cmd=False, ignore_error=False, print_to_console=True): 'Run bash command and print output to stdout\n ' if (display_cmd == True): click.echo((click.style('Running command: ', fg='cyan') + click.style(command, fg='green'))) proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) (out, err) = proc.communicate() if ((len(out) > 0) and print_to_console): click.echo(out) if ((proc.returncode != 0) and (not ignore_error)): sys.exit(proc.returncode) return (out, err)
3,621,573,615,239,375,400
Run bash command and print output to stdout
show/plugins/mlnx.py
run_command
AshokDaparthi/sonic-utilities
python
def run_command(command, display_cmd=False, ignore_error=False, print_to_console=True): '\n ' if (display_cmd == True): click.echo((click.style('Running command: ', fg='cyan') + click.style(command, fg='green'))) proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) (out, err) = proc.communicate() if ((len(out) > 0) and print_to_console): click.echo(out) if ((proc.returncode != 0) and (not ignore_error)): sys.exit(proc.returncode) return (out, err)
@click.group() def mlnx(): ' Show Mellanox platform information ' pass
-8,046,836,575,544,220,000
Show Mellanox platform information
show/plugins/mlnx.py
mlnx
AshokDaparthi/sonic-utilities
python
@click.group() def mlnx(): ' ' pass
def is_issu_status_enabled(): ' This function parses the SAI XML profile used for mlnx to\n get whether ISSU is enabled or disabled\n @return: True/False\n ' issu_enabled = False sai_profile_path = '/{}/sai.profile'.format(HWSKU_PATH) DOCKER_CAT_COMMAND = 'docker exec {container_name} cat {path}' command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_profile_path) (sai_profile_content, _) = run_command(command, print_to_console=False) sai_profile_kvs = {} for line in sai_profile_content.split('\n'): if (not (SAI_PROFILE_DELIMITER in line)): continue (key, value) = line.split(SAI_PROFILE_DELIMITER) sai_profile_kvs[key] = value.strip() try: sai_xml_path = sai_profile_kvs['SAI_INIT_CONFIG_FILE'] except KeyError: click.echo('Failed to get SAI XML from sai profile', err=True) sys.exit(1) command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_xml_path) (sai_xml_content, _) = run_command(command, print_to_console=False) try: root = ET.fromstring(sai_xml_content) except ET.ParseError: click.echo('Failed to parse SAI xml', err=True) sys.exit(1) el = root.find('platform_info').find('issu-enabled') if (el is not None): issu_enabled = (int(el.text) == 1) return issu_enabled
8,463,833,370,658,066,000
This function parses the SAI XML profile used for mlnx to get whether ISSU is enabled or disabled @return: True/False
show/plugins/mlnx.py
is_issu_status_enabled
AshokDaparthi/sonic-utilities
python
def is_issu_status_enabled(): ' This function parses the SAI XML profile used for mlnx to\n get whether ISSU is enabled or disabled\n @return: True/False\n ' issu_enabled = False sai_profile_path = '/{}/sai.profile'.format(HWSKU_PATH) DOCKER_CAT_COMMAND = 'docker exec {container_name} cat {path}' command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_profile_path) (sai_profile_content, _) = run_command(command, print_to_console=False) sai_profile_kvs = {} for line in sai_profile_content.split('\n'): if (not (SAI_PROFILE_DELIMITER in line)): continue (key, value) = line.split(SAI_PROFILE_DELIMITER) sai_profile_kvs[key] = value.strip() try: sai_xml_path = sai_profile_kvs['SAI_INIT_CONFIG_FILE'] except KeyError: click.echo('Failed to get SAI XML from sai profile', err=True) sys.exit(1) command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_xml_path) (sai_xml_content, _) = run_command(command, print_to_console=False) try: root = ET.fromstring(sai_xml_content) except ET.ParseError: click.echo('Failed to parse SAI xml', err=True) sys.exit(1) el = root.find('platform_info').find('issu-enabled') if (el is not None): issu_enabled = (int(el.text) == 1) return issu_enabled
@mlnx.command('sniffer') def sniffer_status(): ' Show sniffer status ' components = ['sdk'] env_variable_strings = [ENV_VARIABLE_SX_SNIFFER] for index in range(len(components)): enabled = sniffer_status_get(env_variable_strings[index]) if (enabled is True): click.echo((components[index] + ' sniffer is enabled')) else: click.echo((components[index] + ' sniffer is disabled'))
131,115,497,342,275,740
Show sniffer status
show/plugins/mlnx.py
sniffer_status
AshokDaparthi/sonic-utilities
python
@mlnx.command('sniffer') def sniffer_status(): ' ' components = ['sdk'] env_variable_strings = [ENV_VARIABLE_SX_SNIFFER] for index in range(len(components)): enabled = sniffer_status_get(env_variable_strings[index]) if (enabled is True): click.echo((components[index] + ' sniffer is enabled')) else: click.echo((components[index] + ' sniffer is disabled'))
@mlnx.command('issu') def issu_status(): ' Show ISSU status ' res = is_issu_status_enabled() click.echo(('ISSU is enabled' if res else 'ISSU is disabled'))
-331,031,383,470,014,140
Show ISSU status
show/plugins/mlnx.py
issu_status
AshokDaparthi/sonic-utilities
python
@mlnx.command('issu') def issu_status(): ' ' res = is_issu_status_enabled() click.echo(('ISSU is enabled' if res else 'ISSU is disabled'))
def make_meshgrid(x, y, h=0.02): 'Create a mesh of points to plot in\n\n Parameters\n ----------\n x: data to base x-axis meshgrid on\n y: data to base y-axis meshgrid on\n h: stepsize for meshgrid, optional\n\n Returns\n -------\n xx, yy : ndarray\n ' (x_min, x_max) = ((x.min() - 1), (x.max() + 1)) (y_min, y_max) = ((y.min() - 1), (y.max() + 1)) (xx, yy) = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) return (xx, yy)
-4,317,779,463,543,041,500
Create a mesh of points to plot in Parameters ---------- x: data to base x-axis meshgrid on y: data to base y-axis meshgrid on h: stepsize for meshgrid, optional Returns ------- xx, yy : ndarray
main.py
make_meshgrid
MartimChaves/ret_detect
python
def make_meshgrid(x, y, h=0.02): 'Create a mesh of points to plot in\n\n Parameters\n ----------\n x: data to base x-axis meshgrid on\n y: data to base y-axis meshgrid on\n h: stepsize for meshgrid, optional\n\n Returns\n -------\n xx, yy : ndarray\n ' (x_min, x_max) = ((x.min() - 1), (x.max() + 1)) (y_min, y_max) = ((y.min() - 1), (y.max() + 1)) (xx, yy) = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) return (xx, yy)
def plot_contours(ax, clf, xx, yy, proba=False, **params): 'Plot the decision boundaries for a classifier.\n\n Parameters\n ----------\n ax: matplotlib axes object\n clf: a classifier\n xx: meshgrid ndarray\n yy: meshgrid ndarray\n params: dictionary of params to pass to contourf, optional\n ' if proba: Z = clf.predict_proba(np.c_[(xx.ravel(), yy.ravel())])[:, (- 1)] else: Z = clf.predict(np.c_[(xx.ravel(), yy.ravel())]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, 20, **params) return out
-8,037,834,054,018,846,000
Plot the decision boundaries for a classifier. Parameters ---------- ax: matplotlib axes object clf: a classifier xx: meshgrid ndarray yy: meshgrid ndarray params: dictionary of params to pass to contourf, optional
main.py
plot_contours
MartimChaves/ret_detect
python
def plot_contours(ax, clf, xx, yy, proba=False, **params): 'Plot the decision boundaries for a classifier.\n\n Parameters\n ----------\n ax: matplotlib axes object\n clf: a classifier\n xx: meshgrid ndarray\n yy: meshgrid ndarray\n params: dictionary of params to pass to contourf, optional\n ' if proba: Z = clf.predict_proba(np.c_[(xx.ravel(), yy.ravel())])[:, (- 1)] else: Z = clf.predict(np.c_[(xx.ravel(), yy.ravel())]) Z = Z.reshape(xx.shape) out = ax.contourf(xx, yy, Z, 20, **params) return out
def apply(self, context, clear, split, check_for_existing=True, **kwargs): 'Extract Candidates from a Context' if (not isinstance(context, Sentence)): raise NotImplementedError(('%s is currently only implemented for Sentence contexts.' % self.__name__)) entity_idxs = dict(((et, defaultdict(list)) for et in set(self.entity_types))) L = len(context.words) for i in range(L): if (context.entity_types[i] is not None): ets = context.entity_types[i].split(self.entity_sep) cids = context.entity_cids[i].split(self.entity_sep) for (et, cid) in zip(ets, cids): if (et in entity_idxs): entity_idxs[et][cid].append(i) entity_spans = defaultdict(list) entity_cids = {} for (et, cid_idxs) in iteritems(entity_idxs): for (cid, idxs) in iteritems(entity_idxs[et]): while (len(idxs) > 0): i = idxs.pop(0) char_start = context.char_offsets[i] char_end = ((char_start + len(context.words[i])) - 1) while ((len(idxs) > 0) and (idxs[0] == (i + 1))): i = idxs.pop(0) char_end = ((context.char_offsets[i] + len(context.words[i])) - 1) tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context) tc.load_id_or_insert(self.session) entity_cids[tc.id] = cid entity_spans[et].append(tc) candidate_args = {'split': split} for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]): if (self.arity == 2): (ai, a) = args[0] (bi, b) = args[1] if ((not self.self_relations) and (a == b)): continue elif ((not self.nested_relations) and ((a in b) or (b in a))): continue elif ((not self.symmetric_relations) and (ai > bi)): continue for (i, arg_name) in enumerate(self.candidate_class.__argnames__): candidate_args[(arg_name + '_id')] = args[i][1].id candidate_args[(arg_name + '_cid')] = entity_cids[args[i][1].id] if check_for_existing: q = select([self.candidate_class.id]) for (key, value) in iteritems(candidate_args): q = q.where((getattr(self.candidate_class, key) == value)) candidate_id = self.session.execute(q).first() if (candidate_id is not None): continue (yield self.candidate_class(**candidate_args))
7,479,512,621,843,678,000
Extract Candidates from a Context
snorkel/candidates.py
apply
ailabx/snorkel
python
def apply(self, context, clear, split, check_for_existing=True, **kwargs): if (not isinstance(context, Sentence)): raise NotImplementedError(('%s is currently only implemented for Sentence contexts.' % self.__name__)) entity_idxs = dict(((et, defaultdict(list)) for et in set(self.entity_types))) L = len(context.words) for i in range(L): if (context.entity_types[i] is not None): ets = context.entity_types[i].split(self.entity_sep) cids = context.entity_cids[i].split(self.entity_sep) for (et, cid) in zip(ets, cids): if (et in entity_idxs): entity_idxs[et][cid].append(i) entity_spans = defaultdict(list) entity_cids = {} for (et, cid_idxs) in iteritems(entity_idxs): for (cid, idxs) in iteritems(entity_idxs[et]): while (len(idxs) > 0): i = idxs.pop(0) char_start = context.char_offsets[i] char_end = ((char_start + len(context.words[i])) - 1) while ((len(idxs) > 0) and (idxs[0] == (i + 1))): i = idxs.pop(0) char_end = ((context.char_offsets[i] + len(context.words[i])) - 1) tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context) tc.load_id_or_insert(self.session) entity_cids[tc.id] = cid entity_spans[et].append(tc) candidate_args = {'split': split} for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]): if (self.arity == 2): (ai, a) = args[0] (bi, b) = args[1] if ((not self.self_relations) and (a == b)): continue elif ((not self.nested_relations) and ((a in b) or (b in a))): continue elif ((not self.symmetric_relations) and (ai > bi)): continue for (i, arg_name) in enumerate(self.candidate_class.__argnames__): candidate_args[(arg_name + '_id')] = args[i][1].id candidate_args[(arg_name + '_cid')] = entity_cids[args[i][1].id] if check_for_existing: q = select([self.candidate_class.id]) for (key, value) in iteritems(candidate_args): q = q.where((getattr(self.candidate_class, key) == value)) candidate_id = self.session.execute(q).first() if (candidate_id is not None): continue (yield self.candidate_class(**candidate_args))
def read_inputs(self, name: str): '\n read circuit graphs\n ' top_ports = [] ports_weight = {} for (node, attr) in self.G.nodes(data=True): if ('source' in attr['inst_type']): for source_nets in self.G.neighbors(node): top_ports.append(source_nets) elif ('net_type' in attr): if (attr['net_type'] == 'external'): top_ports.append(node) ports_weight[node] = [] for nbr in list(self.G.neighbors(node)): ports_weight[node].append(self.G.get_edge_data(node, nbr)['weight']) logger.debug('Merging nested graph hierarchies to dictionary: ') const = self.const_parse.read_user_const(name) self.hier_graph_dict[name] = {'graph': self.G, 'ports': top_ports, 'ports_weight': ports_weight, 'const': const} self._traverse_hier_in_graph(self.G) logger.debug(f'read graph {self.hier_graph_dict}') return self.hier_graph_dict
-3,506,295,908,788,865,500
read circuit graphs
align/compiler/create_database.py
read_inputs
mabrains/ALIGN-public
python
def read_inputs(self, name: str): '\n \n ' top_ports = [] ports_weight = {} for (node, attr) in self.G.nodes(data=True): if ('source' in attr['inst_type']): for source_nets in self.G.neighbors(node): top_ports.append(source_nets) elif ('net_type' in attr): if (attr['net_type'] == 'external'): top_ports.append(node) ports_weight[node] = [] for nbr in list(self.G.neighbors(node)): ports_weight[node].append(self.G.get_edge_data(node, nbr)['weight']) logger.debug('Merging nested graph hierarchies to dictionary: ') const = self.const_parse.read_user_const(name) self.hier_graph_dict[name] = {'graph': self.G, 'ports': top_ports, 'ports_weight': ports_weight, 'const': const} self._traverse_hier_in_graph(self.G) logger.debug(f'read graph {self.hier_graph_dict}') return self.hier_graph_dict
def _traverse_hier_in_graph(self, G): '\n Recusively reads all hierachies in the graph and convert them to dictionary\n ' for (node, attr) in G.nodes(data=True): if (('sub_graph' in attr) and attr['sub_graph']): logger.debug(f"Traversing sub graph: {node} {attr['inst_type']} {attr['ports']}") sub_ports = [] ports_weight = {} for (sub_node, sub_attr) in attr['sub_graph'].nodes(data=True): if ('net_type' in sub_attr): if (sub_attr['net_type'] == 'external'): sub_ports.append(sub_node) ports_weight[sub_node] = [] for nbr in list(attr['sub_graph'].neighbors(sub_node)): ports_weight[sub_node].append(attr['sub_graph'].get_edge_data(sub_node, nbr)['weight']) logger.debug(f"external ports: {sub_ports}, {attr['connection']}, {ports_weight}") const = self.const_parse.read_user_const(attr['inst_type']) self.hier_graph_dict[attr['inst_type']] = {'graph': attr['sub_graph'], 'ports': sub_ports, 'const': const, 'ports_weight': ports_weight} self._traverse_hier_in_graph(attr['sub_graph'])
66,405,325,679,430,640
Recusively reads all hierachies in the graph and convert them to dictionary
align/compiler/create_database.py
_traverse_hier_in_graph
mabrains/ALIGN-public
python
def _traverse_hier_in_graph(self, G): '\n \n ' for (node, attr) in G.nodes(data=True): if (('sub_graph' in attr) and attr['sub_graph']): logger.debug(f"Traversing sub graph: {node} {attr['inst_type']} {attr['ports']}") sub_ports = [] ports_weight = {} for (sub_node, sub_attr) in attr['sub_graph'].nodes(data=True): if ('net_type' in sub_attr): if (sub_attr['net_type'] == 'external'): sub_ports.append(sub_node) ports_weight[sub_node] = [] for nbr in list(attr['sub_graph'].neighbors(sub_node)): ports_weight[sub_node].append(attr['sub_graph'].get_edge_data(sub_node, nbr)['weight']) logger.debug(f"external ports: {sub_ports}, {attr['connection']}, {ports_weight}") const = self.const_parse.read_user_const(attr['inst_type']) self.hier_graph_dict[attr['inst_type']] = {'graph': attr['sub_graph'], 'ports': sub_ports, 'const': const, 'ports_weight': ports_weight} self._traverse_hier_in_graph(attr['sub_graph'])
def run(self, loaderFunc): 'Called when execution of a feeder element is desired.' if (loaderFunc == Type.kIntake): if ((self.xboxMap.getDriveLeftTrig() > 0) and (self.xboxMap.getDriveRightTrig() == 0)): self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kForwards) log.debug('right trig intake', self.xboxMap.getMechRightTrig()) elif ((self.xboxMap.getDriveRightTrig() > 0) and (self.xboxMap.getDriveLeftTrig() == 0)): self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kBackwards) log.debug('left trig intake', self.xboxMap.getMechLeftTrig()) else: self.intakeMotor.runIntake(0, Direction.kForwards) if (loaderFunc == Type.kHopper): if ((self.xboxMap.getDriveLeftTrig() > 0) and (self.xboxMap.getDriveRightTrig() == 0)): self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kForwards) self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kForwards) log.debug('right trig manual', self.xboxMap.getMechRightTrig()) elif ((self.xboxMap.getDriveRightTrig() > 0) and (self.xboxMap.getDriveLeftTrig() == 0)): self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kBackwards) self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kBackwards) log.debug('left trig manual', self.xboxMap.getMechLeftTrig()) else: self.hopperMotor.stopHopperMotorBackside() self.hopperMotor.stopHopperMotorForeside()
4,227,365,644,385,983,500
Called when execution of a feeder element is desired.
components/Actuators/HighLevel/feederMap.py
run
Raptacon/Robot-2022
python
def run(self, loaderFunc): if (loaderFunc == Type.kIntake): if ((self.xboxMap.getDriveLeftTrig() > 0) and (self.xboxMap.getDriveRightTrig() == 0)): self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kForwards) log.debug('right trig intake', self.xboxMap.getMechRightTrig()) elif ((self.xboxMap.getDriveRightTrig() > 0) and (self.xboxMap.getDriveLeftTrig() == 0)): self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kBackwards) log.debug('left trig intake', self.xboxMap.getMechLeftTrig()) else: self.intakeMotor.runIntake(0, Direction.kForwards) if (loaderFunc == Type.kHopper): if ((self.xboxMap.getDriveLeftTrig() > 0) and (self.xboxMap.getDriveRightTrig() == 0)): self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kForwards) self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kForwards) log.debug('right trig manual', self.xboxMap.getMechRightTrig()) elif ((self.xboxMap.getDriveRightTrig() > 0) and (self.xboxMap.getDriveLeftTrig() == 0)): self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kBackwards) self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kBackwards) log.debug('left trig manual', self.xboxMap.getMechLeftTrig()) else: self.hopperMotor.stopHopperMotorBackside() self.hopperMotor.stopHopperMotorForeside()
def __init__(self, graph=None): '\n Initializes the CASE document.\n Args:\n graph: The graph to populate (instance of rdflib.Graph)\n If not provided, a graph in memory will be used.\n ' if (not graph): graph = rdflib.Graph() graph.namespace_manager.bind('case', CASE) self.graph = graph
-570,383,764,642,344,300
Initializes the CASE document. Args: graph: The graph to populate (instance of rdflib.Graph) If not provided, a graph in memory will be used.
example/case_example.py
__init__
casework/CASE-API-Python
python
def __init__(self, graph=None): '\n Initializes the CASE document.\n Args:\n graph: The graph to populate (instance of rdflib.Graph)\n If not provided, a graph in memory will be used.\n ' if (not graph): graph = rdflib.Graph() graph.namespace_manager.bind('case', CASE) self.graph = graph
def _sanitize_triple(self, triple): 'Santizes the triple to contains pure rdflib terms.' (s, p, o) = triple if isinstance(s, Node): s = s._node if isinstance(o, Node): o = o._node elif ((o is not None) and (not isinstance(o, rdflib.term.Node))): o = rdflib.Literal(o) if ((p is not None) and (not isinstance(p, rdflib.term.Node))): p = CASE[p] return (s, p, o)
-2,340,009,076,956,215,300
Santizes the triple to contains pure rdflib terms.
example/case_example.py
_sanitize_triple
casework/CASE-API-Python
python
def _sanitize_triple(self, triple): (s, p, o) = triple if isinstance(s, Node): s = s._node if isinstance(o, Node): o = o._node elif ((o is not None) and (not isinstance(o, rdflib.term.Node))): o = rdflib.Literal(o) if ((p is not None) and (not isinstance(p, rdflib.term.Node))): p = CASE[p] return (s, p, o)
def __iter__(self): 'Wrapper for iterating over all triples in the graph' return iter(self.graph)
3,017,913,809,145,363,000
Wrapper for iterating over all triples in the graph
example/case_example.py
__iter__
casework/CASE-API-Python
python
def __iter__(self): return iter(self.graph)
def __contains__(self, triple): 'Wrapper for checking if triple is contained in the graph.' return (self._sanitize_triple(triple) in self.graph)
-3,107,036,654,600,936,000
Wrapper for checking if triple is contained in the graph.
example/case_example.py
__contains__
casework/CASE-API-Python
python
def __contains__(self, triple): return (self._sanitize_triple(triple) in self.graph)
def triples(self, triple): 'Generator over the triple store in graph.' return self.graph.triples(self._sanitize_triple(triple))
1,302,646,430,989,216,500
Generator over the triple store in graph.
example/case_example.py
triples
casework/CASE-API-Python
python
def triples(self, triple): return self.graph.triples(self._sanitize_triple(triple))
def serialize(self, format='json-ld', **kwargs): "Serializes the document's graph to a destination.\n (Follows same arguments as rdflib.Graph().serialize())" if (format == 'json-ld'): if ('context' not in kwargs): kwargs['context'] = self._json_ld_context() if ('auto_compact' not in kwargs): kwargs['auto_compact'] = True return self.graph.serialize(format=format, **kwargs)
-4,027,732,136,207,101,400
Serializes the document's graph to a destination. (Follows same arguments as rdflib.Graph().serialize())
example/case_example.py
serialize
casework/CASE-API-Python
python
def serialize(self, format='json-ld', **kwargs): "Serializes the document's graph to a destination.\n (Follows same arguments as rdflib.Graph().serialize())" if (format == 'json-ld'): if ('context' not in kwargs): kwargs['context'] = self._json_ld_context() if ('auto_compact' not in kwargs): kwargs['auto_compact'] = True return self.graph.serialize(format=format, **kwargs)
def create_CoreObject(self, _type=None, **kwargs): '\n Creates and returns a CoreObject.\n ' return CoreObject(self.graph, rdf_type=_type, **kwargs)
-7,912,393,568,977,087,000
Creates and returns a CoreObject.
example/case_example.py
create_CoreObject
casework/CASE-API-Python
python
def create_CoreObject(self, _type=None, **kwargs): '\n \n ' return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs): '\n Creates and returns a Context.\n This class may not have PropertyBundles.\n ' return ContextObject(self.graph, rdf_type=_type, **kwargs)
-7,159,098,150,300,702,000
Creates and returns a Context. This class may not have PropertyBundles.
example/case_example.py
create_ContextObject
casework/CASE-API-Python
python
def create_ContextObject(self, _type=None, **kwargs): '\n Creates and returns a Context.\n This class may not have PropertyBundles.\n ' return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs): '\n Creates and returns a Sub.\n This class is for children of one of the above CASE classes.\n This class may not have PropertyBundles.\n ' return SubObject(self.graph, rdf_type=_type, **kwargs)
6,910,321,356,426,898,000
Creates and returns a Sub. This class is for children of one of the above CASE classes. This class may not have PropertyBundles.
example/case_example.py
create_SubObject
casework/CASE-API-Python
python
def create_SubObject(self, _type=None, **kwargs): '\n Creates and returns a Sub.\n This class is for children of one of the above CASE classes.\n This class may not have PropertyBundles.\n ' return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs): '\n Creates and returns a Duck.\n These lonely Ducks have no parents and are fully duck-typed.\n This class may not have PropertyBundles.\n ' return DuckObject(self.graph, rdf_type=_type, **kwargs)
4,973,963,120,371,535,000
Creates and returns a Duck. These lonely Ducks have no parents and are fully duck-typed. This class may not have PropertyBundles.
example/case_example.py
create_DuckObject
casework/CASE-API-Python
python
def create_DuckObject(self, _type=None, **kwargs): '\n Creates and returns a Duck.\n These lonely Ducks have no parents and are fully duck-typed.\n This class may not have PropertyBundles.\n ' return DuckObject(self.graph, rdf_type=_type, **kwargs)
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n\n NOTE: At least the type or a property must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n uri: Optional string to set th URI to. (If not provided a UUID will be generated.)\n bnode: Whether to create a blank node or a uri reference.\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' super(Node, self).__init__() if uri: self.uri = uri else: self.uri = str(uuid.uuid4()) if bnode: self._node = rdflib.BNode(self.uri) else: self._node = rdflib.URIRef(self.uri) self._graph = graph if (not rdf_type): rdf_type = self.RDF_TYPE if (not isinstance(rdf_type, rdflib.term.Node)): rdf_type = self.NAMESPACE[rdf_type] self.add(RDF.type, rdf_type) for (key, value) in iter(kwargs.items()): self.add(key, value)
8,967,509,059,899,799,000
Initializes and adds a node to the graph. NOTE: At least the type or a property must be supplied for the Node to exist in the graph. Args: graph: The graph to add this node to. (instance of rdflib.Graph) uri: Optional string to set th URI to. (If not provided a UUID will be generated.) bnode: Whether to create a blank node or a uri reference. rdf_type: The RDF type to set this node to. properties: Extra properties to add to this node. (More properties can be set after initialization by using the add() function.)
example/case_example.py
__init__
casework/CASE-API-Python
python
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n\n NOTE: At least the type or a property must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n uri: Optional string to set th URI to. (If not provided a UUID will be generated.)\n bnode: Whether to create a blank node or a uri reference.\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' super(Node, self).__init__() if uri: self.uri = uri else: self.uri = str(uuid.uuid4()) if bnode: self._node = rdflib.BNode(self.uri) else: self._node = rdflib.URIRef(self.uri) self._graph = graph if (not rdf_type): rdf_type = self.RDF_TYPE if (not isinstance(rdf_type, rdflib.term.Node)): rdf_type = self.NAMESPACE[rdf_type] self.add(RDF.type, rdf_type) for (key, value) in iter(kwargs.items()): self.add(key, value)
def add(self, property, value): 'Adds a property and its value to the node.' if (value is None): return if isinstance(value, (list, tuple, set)): for item in value: self.add(property, item) return if isinstance(value, Node): value = value._node elif (not isinstance(value, rdflib.term.Node)): value = rdflib.Literal(value) if (not isinstance(property, rdflib.term.Node)): property = self.NAMESPACE[property] self._graph.add((self._node, property, value))
4,964,973,318,960,611,000
Adds a property and its value to the node.
example/case_example.py
add
casework/CASE-API-Python
python
def add(self, property, value): if (value is None): return if isinstance(value, (list, tuple, set)): for item in value: self.add(property, item) return if isinstance(value, Node): value = value._node elif (not isinstance(value, rdflib.term.Node)): value = rdflib.Literal(value) if (not isinstance(property, rdflib.term.Node)): property = self.NAMESPACE[property] self._graph.add((self._node, property, value))
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type or a property must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('CoreObjectCreationTime', datetime.datetime.utcnow()) self.pb = ''
7,103,185,099,151,627,000
Initializes and adds a node to the graph. NOTE: At least the type or a property must be supplied for the Node to exist in the graph. Args: graph: The graph to add this node to. (instance of rdflib.Graph) rdf_type: The RDF type to set this node to. properties: Extra properties to add to this node. (More properties can be set after initialization by using the add() function.)
example/case_example.py
__init__
casework/CASE-API-Python
python
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type or a property must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('CoreObjectCreationTime', datetime.datetime.utcnow()) self.pb =
def create_PropertyBundle(self, prop_type=None, **kwargs): 'Convenience function for adding property bundles to this Trace.\n\n Args:\n type: The @type of property bundle (can be of type rdflib.URIRef or string).\n properties: Properties to add to the created property bundle.\n\n Returns:\n The property bundle created (instance of PropertyBundle).\n ' self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs) self.add(CASE.propertyBundle, self.pb) return self.pb
8,454,463,926,833,009,000
Convenience function for adding property bundles to this Trace. Args: type: The @type of property bundle (can be of type rdflib.URIRef or string). properties: Properties to add to the created property bundle. Returns: The property bundle created (instance of PropertyBundle).
example/case_example.py
create_PropertyBundle
casework/CASE-API-Python
python
def create_PropertyBundle(self, prop_type=None, **kwargs): 'Convenience function for adding property bundles to this Trace.\n\n Args:\n type: The @type of property bundle (can be of type rdflib.URIRef or string).\n properties: Properties to add to the created property bundle.\n\n Returns:\n The property bundle created (instance of PropertyBundle).\n ' self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs) self.add(CASE.propertyBundle, self.pb) return self.pb
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type or a property must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type self.propObj = kwargs super(PropertyBundle, self).__init__(graph, bnode=True, rdf_type=rdf_type, **kwargs)
26,501,929,538,819,850
Initializes and adds a node to the graph. NOTE: At least the type or a property must be supplied for the Node to exist in the graph. Args: graph: The graph to add this node to. (instance of rdflib.Graph) rdf_type: The RDF type to set this node to. properties: Extra properties to add to this node. (More properties can be set after initialization by using the add() function.)
example/case_example.py
__init__
casework/CASE-API-Python
python
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type or a property must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type self.propObj = kwargs super(PropertyBundle, self).__init__(graph, bnode=True, rdf_type=rdf_type, **kwargs)
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
-5,985,738,002,460,461,000
Initializes and adds a node to the graph. NOTE: At least the type must be supplied for the Node to exist in the graph. Args: graph: The graph to add this node to. (instance of rdflib.Graph) rdf_type: The RDF type to set this node to. properties: Extra properties to add to this node. (More properties can be set after initialization by using the add() function.)
example/case_example.py
__init__
casework/CASE-API-Python
python
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('SubObjectCreationTime', datetime.datetime.utcnow())
3,458,609,786,989,587,000
Initializes and adds a node to the graph. NOTE: At least the type must be supplied for the Node to exist in the graph. Args: graph: The graph to add this node to. (instance of rdflib.Graph) rdf_type: The RDF type to set this node to. properties: Extra properties to add to this node. (More properties can be set after initialization by using the add() function.)
example/case_example.py
__init__
casework/CASE-API-Python
python
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('SubObjectCreationTime', datetime.datetime.utcnow())
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
458,313,680,131,040,830
Initializes and adds a node to the graph. NOTE: At least the type must be supplied for the Node to exist in the graph. Args: graph: The graph to add this node to. (instance of rdflib.Graph) rdf_type: The RDF type to set this node to. properties: Extra properties to add to this node. (More properties can be set after initialization by using the add() function.)
example/case_example.py
__init__
casework/CASE-API-Python
python
def __init__(self, graph, rdf_type=None, **kwargs): 'Initializes and adds a node to the graph.\n NOTE: At least the type must be supplied for the Node\n to exist in the graph.\n\n Args:\n graph: The graph to add this node to. (instance of rdflib.Graph)\n rdf_type: The RDF type to set this node to.\n properties: Extra properties to add to this node.\n (More properties can be set after initialization by using the add() function.)\n ' self.type = rdf_type super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs) self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
def create_map_job(config, internal_storage, executor_id, job_id, map_function, iterdata, runtime_meta, runtime_memory, extra_env, include_modules, exclude_modules, execution_timeout, extra_args=None, obj_chunk_size=None, obj_chunk_number=None, invoke_pool_threads=128): '\n Wrapper to create a map job. It integrates COS logic to process objects.\n ' host_job_meta = {'host_job_create_tstamp': time.time()} map_iterdata = utils.verify_args(map_function, iterdata, extra_args) if config['lithops'].get('rabbitmq_monitor', False): rabbit_amqp_url = config['rabbitmq'].get('amqp_url') utils.create_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id) parts_per_object = None if is_object_processing_function(map_function): create_partitions_start = time.time() logger.debug('ExecutorID {} | JobID {} - Calling map on partitions from object storage flow'.format(executor_id, job_id)) (map_iterdata, parts_per_object) = create_partitions(config, internal_storage, map_iterdata, obj_chunk_size, obj_chunk_number) host_job_meta['host_job_create_partitions_time'] = round((time.time() - create_partitions_start), 6) job = _create_job(config=config, internal_storage=internal_storage, executor_id=executor_id, job_id=job_id, func=map_function, iterdata=map_iterdata, runtime_meta=runtime_meta, runtime_memory=runtime_memory, extra_env=extra_env, include_modules=include_modules, exclude_modules=exclude_modules, execution_timeout=execution_timeout, host_job_meta=host_job_meta, invoke_pool_threads=invoke_pool_threads) if parts_per_object: job.parts_per_object = parts_per_object return job
-4,410,323,720,239,119,000
Wrapper to create a map job. It integrates COS logic to process objects.
lithops/job/job.py
create_map_job
pablogs98/lithops
python
def create_map_job(config, internal_storage, executor_id, job_id, map_function, iterdata, runtime_meta, runtime_memory, extra_env, include_modules, exclude_modules, execution_timeout, extra_args=None, obj_chunk_size=None, obj_chunk_number=None, invoke_pool_threads=128): '\n \n ' host_job_meta = {'host_job_create_tstamp': time.time()} map_iterdata = utils.verify_args(map_function, iterdata, extra_args) if config['lithops'].get('rabbitmq_monitor', False): rabbit_amqp_url = config['rabbitmq'].get('amqp_url') utils.create_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id) parts_per_object = None if is_object_processing_function(map_function): create_partitions_start = time.time() logger.debug('ExecutorID {} | JobID {} - Calling map on partitions from object storage flow'.format(executor_id, job_id)) (map_iterdata, parts_per_object) = create_partitions(config, internal_storage, map_iterdata, obj_chunk_size, obj_chunk_number) host_job_meta['host_job_create_partitions_time'] = round((time.time() - create_partitions_start), 6) job = _create_job(config=config, internal_storage=internal_storage, executor_id=executor_id, job_id=job_id, func=map_function, iterdata=map_iterdata, runtime_meta=runtime_meta, runtime_memory=runtime_memory, extra_env=extra_env, include_modules=include_modules, exclude_modules=exclude_modules, execution_timeout=execution_timeout, host_job_meta=host_job_meta, invoke_pool_threads=invoke_pool_threads) if parts_per_object: job.parts_per_object = parts_per_object return job
def create_reduce_job(config, internal_storage, executor_id, reduce_job_id, reduce_function, map_job, map_futures, runtime_meta, runtime_memory, reducer_one_per_object, extra_env, include_modules, exclude_modules, execution_timeout=None): '\n Wrapper to create a reduce job. Apply a function across all map futures.\n ' host_job_meta = {'host_job_create_tstamp': time.time()} iterdata = [(map_futures,)] if (hasattr(map_job, 'parts_per_object') and reducer_one_per_object): prev_total_partitons = 0 iterdata = [] for total_partitions in map_job.parts_per_object: iterdata.append((map_futures[prev_total_partitons:(prev_total_partitons + total_partitions)],)) prev_total_partitons += total_partitions reduce_job_env = {'__LITHOPS_REDUCE_JOB': True} if (extra_env is None): ext_env = reduce_job_env else: ext_env = extra_env.copy() ext_env.update(reduce_job_env) iterdata = utils.verify_args(reduce_function, iterdata, None) return _create_job(config=config, internal_storage=internal_storage, executor_id=executor_id, job_id=reduce_job_id, func=reduce_function, iterdata=iterdata, runtime_meta=runtime_meta, runtime_memory=runtime_memory, extra_env=ext_env, include_modules=include_modules, exclude_modules=exclude_modules, execution_timeout=execution_timeout, host_job_meta=host_job_meta)
266,928,933,511,763,460
Wrapper to create a reduce job. Apply a function across all map futures.
lithops/job/job.py
create_reduce_job
pablogs98/lithops
python
def create_reduce_job(config, internal_storage, executor_id, reduce_job_id, reduce_function, map_job, map_futures, runtime_meta, runtime_memory, reducer_one_per_object, extra_env, include_modules, exclude_modules, execution_timeout=None): '\n \n ' host_job_meta = {'host_job_create_tstamp': time.time()} iterdata = [(map_futures,)] if (hasattr(map_job, 'parts_per_object') and reducer_one_per_object): prev_total_partitons = 0 iterdata = [] for total_partitions in map_job.parts_per_object: iterdata.append((map_futures[prev_total_partitons:(prev_total_partitons + total_partitions)],)) prev_total_partitons += total_partitions reduce_job_env = {'__LITHOPS_REDUCE_JOB': True} if (extra_env is None): ext_env = reduce_job_env else: ext_env = extra_env.copy() ext_env.update(reduce_job_env) iterdata = utils.verify_args(reduce_function, iterdata, None) return _create_job(config=config, internal_storage=internal_storage, executor_id=executor_id, job_id=reduce_job_id, func=reduce_function, iterdata=iterdata, runtime_meta=runtime_meta, runtime_memory=runtime_memory, extra_env=ext_env, include_modules=include_modules, exclude_modules=exclude_modules, execution_timeout=execution_timeout, host_job_meta=host_job_meta)
def _create_job(config, internal_storage, executor_id, job_id, func, iterdata, runtime_meta, runtime_memory, extra_env, include_modules, exclude_modules, execution_timeout, host_job_meta, invoke_pool_threads=128): '\n :param func: the function to map over the data\n :param iterdata: An iterable of input data\n :param extra_env: Additional environment variables for CF environment. Default None.\n :param extra_meta: Additional metadata to pass to CF. Default None.\n :param remote_invocation: Enable remote invocation. Default False.\n :param invoke_pool_threads: Number of threads to use to invoke.\n :param data_all_as_one: upload the data as a single object. Default True\n :param overwrite_invoke_args: Overwrite other args. Mainly used for testing.\n :param exclude_modules: Explicitly keep these modules from pickled dependencies.\n :return: A list with size `len(iterdata)` of futures for each job\n :rtype: list of futures.\n ' ext_env = ({} if (extra_env is None) else extra_env.copy()) if ext_env: ext_env = utils.convert_bools_to_string(ext_env) logger.debug('Extra environment vars {}'.format(ext_env)) job = SimpleNamespace() job.executor_id = executor_id job.job_id = job_id job.extra_env = ext_env job.execution_timeout = (execution_timeout or config['lithops']['execution_timeout']) job.function_name = func.__name__ job.total_calls = len(iterdata) mode = config['lithops']['mode'] if (mode == SERVERLESS): job.invoke_pool_threads = invoke_pool_threads job.runtime_memory = (runtime_memory or config['serverless']['runtime_memory']) job.runtime_timeout = config['serverless']['runtime_timeout'] if (job.execution_timeout >= job.runtime_timeout): job.execution_timeout = (job.runtime_timeout - 5) elif (mode == STANDALONE): job.runtime_memory = None runtime_timeout = config['standalone']['hard_dismantle_timeout'] if (job.execution_timeout >= runtime_timeout): job.execution_timeout = (runtime_timeout - 10) elif (mode == LOCALHOST): job.runtime_memory = None job.runtime_timeout = execution_timeout exclude_modules_cfg = config['lithops'].get('exclude_modules', []) include_modules_cfg = config['lithops'].get('include_modules', []) exc_modules = set() inc_modules = set() if exclude_modules_cfg: exc_modules.update(exclude_modules_cfg) if exclude_modules: exc_modules.update(exclude_modules) if (include_modules_cfg is not None): inc_modules.update(include_modules_cfg) if ((include_modules_cfg is None) and (not include_modules)): inc_modules = None if ((include_modules is not None) and include_modules): inc_modules.update(include_modules) if (include_modules is None): inc_modules = None logger.debug('ExecutorID {} | JobID {} - Serializing function and data'.format(executor_id, job_id)) job_serialize_start = time.time() serializer = SerializeIndependent(runtime_meta['preinstalls']) (func_and_data_ser, mod_paths) = serializer(([func] + iterdata), inc_modules, exc_modules) data_strs = func_and_data_ser[1:] data_size_bytes = sum((len(x) for x in data_strs)) module_data = create_module_data(mod_paths) func_str = func_and_data_ser[0] func_module_str = pickle.dumps({'func': func_str, 'module_data': module_data}, (- 1)) func_module_size_bytes = len(func_module_str) total_size = utils.sizeof_fmt((data_size_bytes + func_module_size_bytes)) host_job_meta['host_job_serialize_time'] = round((time.time() - job_serialize_start), 6) host_job_meta['data_size_bytes'] = data_size_bytes host_job_meta['func_module_size_bytes'] = func_module_size_bytes if ('data_limit' in config['lithops']): data_limit = config['lithops']['data_limit'] else: data_limit = MAX_AGG_DATA_SIZE if (data_limit and (data_size_bytes > (data_limit * (1024 ** 2)))): log_msg = 'ExecutorID {} | JobID {} - Total data exceeded maximum size of {}'.format(executor_id, job_id, sizeof_fmt((data_limit * (1024 ** 2)))) raise Exception(log_msg) logger.info('ExecutorID {} | JobID {} - Uploading function and data - Total: {}'.format(executor_id, job_id, total_size)) data_key = create_agg_data_key(JOBS_PREFIX, executor_id, job_id) job.data_key = data_key (data_bytes, data_ranges) = utils.agg_data(data_strs) job.data_ranges = data_ranges data_upload_start = time.time() internal_storage.put_data(data_key, data_bytes) data_upload_end = time.time() host_job_meta['host_data_upload_time'] = round((data_upload_end - data_upload_start), 6) func_upload_start = time.time() if config[mode].get('customized_runtime'): function_file = func.__code__.co_filename function_hash = hashlib.md5(open(function_file, 'rb').read()).hexdigest()[:16] mod_hash = hashlib.md5(repr(sorted(mod_paths)).encode('utf-8')).hexdigest()[:16] uuid = f'{function_hash}{mod_hash}' func_key = create_func_key(JOBS_PREFIX, uuid, '') _store_func_and_modules(func_key, func_str, module_data) job.ext_runtime_uuid = uuid else: func_key = create_func_key(JOBS_PREFIX, executor_id, job_id) internal_storage.put_func(func_key, func_module_str) job.func_key = func_key func_upload_end = time.time() host_job_meta['host_func_upload_time'] = round((func_upload_end - func_upload_start), 6) host_job_meta['host_job_created_time'] = round((time.time() - host_job_meta['host_job_create_tstamp']), 6) job.metadata = host_job_meta return job
5,071,671,900,706,446,000
:param func: the function to map over the data :param iterdata: An iterable of input data :param extra_env: Additional environment variables for CF environment. Default None. :param extra_meta: Additional metadata to pass to CF. Default None. :param remote_invocation: Enable remote invocation. Default False. :param invoke_pool_threads: Number of threads to use to invoke. :param data_all_as_one: upload the data as a single object. Default True :param overwrite_invoke_args: Overwrite other args. Mainly used for testing. :param exclude_modules: Explicitly keep these modules from pickled dependencies. :return: A list with size `len(iterdata)` of futures for each job :rtype: list of futures.
lithops/job/job.py
_create_job
pablogs98/lithops
python
def _create_job(config, internal_storage, executor_id, job_id, func, iterdata, runtime_meta, runtime_memory, extra_env, include_modules, exclude_modules, execution_timeout, host_job_meta, invoke_pool_threads=128): '\n :param func: the function to map over the data\n :param iterdata: An iterable of input data\n :param extra_env: Additional environment variables for CF environment. Default None.\n :param extra_meta: Additional metadata to pass to CF. Default None.\n :param remote_invocation: Enable remote invocation. Default False.\n :param invoke_pool_threads: Number of threads to use to invoke.\n :param data_all_as_one: upload the data as a single object. Default True\n :param overwrite_invoke_args: Overwrite other args. Mainly used for testing.\n :param exclude_modules: Explicitly keep these modules from pickled dependencies.\n :return: A list with size `len(iterdata)` of futures for each job\n :rtype: list of futures.\n ' ext_env = ({} if (extra_env is None) else extra_env.copy()) if ext_env: ext_env = utils.convert_bools_to_string(ext_env) logger.debug('Extra environment vars {}'.format(ext_env)) job = SimpleNamespace() job.executor_id = executor_id job.job_id = job_id job.extra_env = ext_env job.execution_timeout = (execution_timeout or config['lithops']['execution_timeout']) job.function_name = func.__name__ job.total_calls = len(iterdata) mode = config['lithops']['mode'] if (mode == SERVERLESS): job.invoke_pool_threads = invoke_pool_threads job.runtime_memory = (runtime_memory or config['serverless']['runtime_memory']) job.runtime_timeout = config['serverless']['runtime_timeout'] if (job.execution_timeout >= job.runtime_timeout): job.execution_timeout = (job.runtime_timeout - 5) elif (mode == STANDALONE): job.runtime_memory = None runtime_timeout = config['standalone']['hard_dismantle_timeout'] if (job.execution_timeout >= runtime_timeout): job.execution_timeout = (runtime_timeout - 10) elif (mode == LOCALHOST): job.runtime_memory = None job.runtime_timeout = execution_timeout exclude_modules_cfg = config['lithops'].get('exclude_modules', []) include_modules_cfg = config['lithops'].get('include_modules', []) exc_modules = set() inc_modules = set() if exclude_modules_cfg: exc_modules.update(exclude_modules_cfg) if exclude_modules: exc_modules.update(exclude_modules) if (include_modules_cfg is not None): inc_modules.update(include_modules_cfg) if ((include_modules_cfg is None) and (not include_modules)): inc_modules = None if ((include_modules is not None) and include_modules): inc_modules.update(include_modules) if (include_modules is None): inc_modules = None logger.debug('ExecutorID {} | JobID {} - Serializing function and data'.format(executor_id, job_id)) job_serialize_start = time.time() serializer = SerializeIndependent(runtime_meta['preinstalls']) (func_and_data_ser, mod_paths) = serializer(([func] + iterdata), inc_modules, exc_modules) data_strs = func_and_data_ser[1:] data_size_bytes = sum((len(x) for x in data_strs)) module_data = create_module_data(mod_paths) func_str = func_and_data_ser[0] func_module_str = pickle.dumps({'func': func_str, 'module_data': module_data}, (- 1)) func_module_size_bytes = len(func_module_str) total_size = utils.sizeof_fmt((data_size_bytes + func_module_size_bytes)) host_job_meta['host_job_serialize_time'] = round((time.time() - job_serialize_start), 6) host_job_meta['data_size_bytes'] = data_size_bytes host_job_meta['func_module_size_bytes'] = func_module_size_bytes if ('data_limit' in config['lithops']): data_limit = config['lithops']['data_limit'] else: data_limit = MAX_AGG_DATA_SIZE if (data_limit and (data_size_bytes > (data_limit * (1024 ** 2)))): log_msg = 'ExecutorID {} | JobID {} - Total data exceeded maximum size of {}'.format(executor_id, job_id, sizeof_fmt((data_limit * (1024 ** 2)))) raise Exception(log_msg) logger.info('ExecutorID {} | JobID {} - Uploading function and data - Total: {}'.format(executor_id, job_id, total_size)) data_key = create_agg_data_key(JOBS_PREFIX, executor_id, job_id) job.data_key = data_key (data_bytes, data_ranges) = utils.agg_data(data_strs) job.data_ranges = data_ranges data_upload_start = time.time() internal_storage.put_data(data_key, data_bytes) data_upload_end = time.time() host_job_meta['host_data_upload_time'] = round((data_upload_end - data_upload_start), 6) func_upload_start = time.time() if config[mode].get('customized_runtime'): function_file = func.__code__.co_filename function_hash = hashlib.md5(open(function_file, 'rb').read()).hexdigest()[:16] mod_hash = hashlib.md5(repr(sorted(mod_paths)).encode('utf-8')).hexdigest()[:16] uuid = f'{function_hash}{mod_hash}' func_key = create_func_key(JOBS_PREFIX, uuid, ) _store_func_and_modules(func_key, func_str, module_data) job.ext_runtime_uuid = uuid else: func_key = create_func_key(JOBS_PREFIX, executor_id, job_id) internal_storage.put_func(func_key, func_module_str) job.func_key = func_key func_upload_end = time.time() host_job_meta['host_func_upload_time'] = round((func_upload_end - func_upload_start), 6) host_job_meta['host_job_created_time'] = round((time.time() - host_job_meta['host_job_create_tstamp']), 6) job.metadata = host_job_meta return job
def untar(path, fname, deleteTar=True): '\n Unpacks the given archive file to the same directory, then (by default)\n deletes the archive file.\n ' print(('unpacking ' + fname)) fullpath = os.path.join(path, fname) shutil.unpack_archive(fullpath, path) if deleteTar: os.remove(fullpath)
-8,229,018,165,018,037,000
Unpacks the given archive file to the same directory, then (by default) deletes the archive file.
cogdl/datasets/gtn_data.py
untar
AlvinWen428/cogdl
python
def untar(path, fname, deleteTar=True): '\n Unpacks the given archive file to the same directory, then (by default)\n deletes the archive file.\n ' print(('unpacking ' + fname)) fullpath = os.path.join(path, fname) shutil.unpack_archive(fullpath, path) if deleteTar: os.remove(fullpath)
@property @abc.abstractmethod def uuid(self) -> Optional[str]: 'Return the unique identifier of the repository.'
4,545,748,468,575,769,000
Return the unique identifier of the repository.
aiida/repository/backend/abstract.py
uuid
azadoks/aiida-core
python
@property @abc.abstractmethod def uuid(self) -> Optional[str]:
@property @abc.abstractmethod def key_format(self) -> Optional[str]: 'Return the format for the keys of the repository.\n\n Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is\n necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match\n with the repository).\n '
-7,728,492,979,033,497,000
Return the format for the keys of the repository. Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match with the repository).
aiida/repository/backend/abstract.py
key_format
azadoks/aiida-core
python
@property @abc.abstractmethod def key_format(self) -> Optional[str]: 'Return the format for the keys of the repository.\n\n Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is\n necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match\n with the repository).\n '
@abc.abstractmethod def initialise(self, **kwargs) -> None: "Initialise the repository if it hasn't already been initialised.\n\n :param kwargs: parameters for the initialisation.\n "
-6,842,518,518,233,794,000
Initialise the repository if it hasn't already been initialised. :param kwargs: parameters for the initialisation.
aiida/repository/backend/abstract.py
initialise
azadoks/aiida-core
python
@abc.abstractmethod def initialise(self, **kwargs) -> None: "Initialise the repository if it hasn't already been initialised.\n\n :param kwargs: parameters for the initialisation.\n "
@property @abc.abstractmethod def is_initialised(self) -> bool: 'Return whether the repository has been initialised.'
-8,991,978,196,976,154,000
Return whether the repository has been initialised.
aiida/repository/backend/abstract.py
is_initialised
azadoks/aiida-core
python
@property @abc.abstractmethod def is_initialised(self) -> bool:
@abc.abstractmethod def erase(self) -> None: 'Delete the repository itself and all its contents.\n\n .. note:: This should not merely delete the contents of the repository but any resources it created. For\n example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not\n just its contents.\n '
8,501,606,104,825,531,000
Delete the repository itself and all its contents. .. note:: This should not merely delete the contents of the repository but any resources it created. For example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not just its contents.
aiida/repository/backend/abstract.py
erase
azadoks/aiida-core
python
@abc.abstractmethod def erase(self) -> None: 'Delete the repository itself and all its contents.\n\n .. note:: This should not merely delete the contents of the repository but any resources it created. For\n example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not\n just its contents.\n '
def put_object_from_filelike(self, handle: BinaryIO) -> str: 'Store the byte contents of a file in the repository.\n\n :param handle: filelike object with the byte content to be stored.\n :return: the generated fully qualified identifier for the object within the repository.\n :raises TypeError: if the handle is not a byte stream.\n ' if ((not isinstance(handle, io.BufferedIOBase)) and (not self.is_readable_byte_stream(handle))): raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.') return self._put_object_from_filelike(handle)
9,115,440,169,624,832,000
Store the byte contents of a file in the repository. :param handle: filelike object with the byte content to be stored. :return: the generated fully qualified identifier for the object within the repository. :raises TypeError: if the handle is not a byte stream.
aiida/repository/backend/abstract.py
put_object_from_filelike
azadoks/aiida-core
python
def put_object_from_filelike(self, handle: BinaryIO) -> str: 'Store the byte contents of a file in the repository.\n\n :param handle: filelike object with the byte content to be stored.\n :return: the generated fully qualified identifier for the object within the repository.\n :raises TypeError: if the handle is not a byte stream.\n ' if ((not isinstance(handle, io.BufferedIOBase)) and (not self.is_readable_byte_stream(handle))): raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.') return self._put_object_from_filelike(handle)
def put_object_from_file(self, filepath: Union[(str, pathlib.Path)]) -> str: 'Store a new object with contents of the file located at `filepath` on this file system.\n\n :param filepath: absolute path of file whose contents to copy to the repository.\n :return: the generated fully qualified identifier for the object within the repository.\n :raises TypeError: if the handle is not a byte stream.\n ' with open(filepath, mode='rb') as handle: return self.put_object_from_filelike(handle)
-7,005,207,975,189,346,000
Store a new object with contents of the file located at `filepath` on this file system. :param filepath: absolute path of file whose contents to copy to the repository. :return: the generated fully qualified identifier for the object within the repository. :raises TypeError: if the handle is not a byte stream.
aiida/repository/backend/abstract.py
put_object_from_file
azadoks/aiida-core
python
def put_object_from_file(self, filepath: Union[(str, pathlib.Path)]) -> str: 'Store a new object with contents of the file located at `filepath` on this file system.\n\n :param filepath: absolute path of file whose contents to copy to the repository.\n :return: the generated fully qualified identifier for the object within the repository.\n :raises TypeError: if the handle is not a byte stream.\n ' with open(filepath, mode='rb') as handle: return self.put_object_from_filelike(handle)
@abc.abstractmethod def has_objects(self, keys: List[str]) -> List[bool]: 'Return whether the repository has an object with the given key.\n\n :param keys:\n list of fully qualified identifiers for objects within the repository.\n :return:\n list of logicals, in the same order as the keys provided, with value True if the respective\n object exists and False otherwise.\n '
-506,592,450,390,231,200
Return whether the repository has an object with the given key. :param keys: list of fully qualified identifiers for objects within the repository. :return: list of logicals, in the same order as the keys provided, with value True if the respective object exists and False otherwise.
aiida/repository/backend/abstract.py
has_objects
azadoks/aiida-core
python
@abc.abstractmethod def has_objects(self, keys: List[str]) -> List[bool]: 'Return whether the repository has an object with the given key.\n\n :param keys:\n list of fully qualified identifiers for objects within the repository.\n :return:\n list of logicals, in the same order as the keys provided, with value True if the respective\n object exists and False otherwise.\n '
def has_object(self, key: str) -> bool: 'Return whether the repository has an object with the given key.\n\n :param key: fully qualified identifier for the object within the repository.\n :return: True if the object exists, False otherwise.\n ' return self.has_objects([key])[0]
8,852,391,107,573,442,000
Return whether the repository has an object with the given key. :param key: fully qualified identifier for the object within the repository. :return: True if the object exists, False otherwise.
aiida/repository/backend/abstract.py
has_object
azadoks/aiida-core
python
def has_object(self, key: str) -> bool: 'Return whether the repository has an object with the given key.\n\n :param key: fully qualified identifier for the object within the repository.\n :return: True if the object exists, False otherwise.\n ' return self.has_objects([key])[0]
@abc.abstractmethod def list_objects(self) -> Iterable[str]: 'Return iterable that yields all available objects by key.\n\n :return: An iterable for all the available object keys.\n '
-939,318,421,040,364,300
Return iterable that yields all available objects by key. :return: An iterable for all the available object keys.
aiida/repository/backend/abstract.py
list_objects
azadoks/aiida-core
python
@abc.abstractmethod def list_objects(self) -> Iterable[str]: 'Return iterable that yields all available objects by key.\n\n :return: An iterable for all the available object keys.\n '
@contextlib.contextmanager def open(self, key: str) -> Iterator[BinaryIO]: 'Open a file handle to an object stored under the given key.\n\n .. note:: this should only be used to open a handle to read an existing file. To write a new file use the method\n ``put_object_from_filelike`` instead.\n\n :param key: fully qualified identifier for the object within the repository.\n :return: yield a byte stream object.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be opened.\n ' if (not self.has_object(key)): raise FileNotFoundError(f'object with key `{key}` does not exist.')
3,297,650,485,873,668,000
Open a file handle to an object stored under the given key. .. note:: this should only be used to open a handle to read an existing file. To write a new file use the method ``put_object_from_filelike`` instead. :param key: fully qualified identifier for the object within the repository. :return: yield a byte stream object. :raise FileNotFoundError: if the file does not exist. :raise OSError: if the file could not be opened.
aiida/repository/backend/abstract.py
open
azadoks/aiida-core
python
@contextlib.contextmanager def open(self, key: str) -> Iterator[BinaryIO]: 'Open a file handle to an object stored under the given key.\n\n .. note:: this should only be used to open a handle to read an existing file. To write a new file use the method\n ``put_object_from_filelike`` instead.\n\n :param key: fully qualified identifier for the object within the repository.\n :return: yield a byte stream object.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be opened.\n ' if (not self.has_object(key)): raise FileNotFoundError(f'object with key `{key}` does not exist.')
def get_object_content(self, key: str) -> bytes: 'Return the content of a object identified by key.\n\n :param key: fully qualified identifier for the object within the repository.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be opened.\n ' with self.open(key) as handle: return handle.read()
8,643,959,129,101,297,000
Return the content of a object identified by key. :param key: fully qualified identifier for the object within the repository. :raise FileNotFoundError: if the file does not exist. :raise OSError: if the file could not be opened.
aiida/repository/backend/abstract.py
get_object_content
azadoks/aiida-core
python
def get_object_content(self, key: str) -> bytes: 'Return the content of a object identified by key.\n\n :param key: fully qualified identifier for the object within the repository.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be opened.\n ' with self.open(key) as handle: return handle.read()
@abc.abstractmethod def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[(str, BinaryIO)]]: 'Return an iterator over the (read-only) byte streams of objects identified by key.\n\n .. note:: handles should only be read within the context of this iterator.\n\n :param keys: fully qualified identifiers for the objects within the repository.\n :return: an iterator over the object byte streams.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if a file could not be opened.\n '
-8,532,632,070,989,044,000
Return an iterator over the (read-only) byte streams of objects identified by key. .. note:: handles should only be read within the context of this iterator. :param keys: fully qualified identifiers for the objects within the repository. :return: an iterator over the object byte streams. :raise FileNotFoundError: if the file does not exist. :raise OSError: if a file could not be opened.
aiida/repository/backend/abstract.py
iter_object_streams
azadoks/aiida-core
python
@abc.abstractmethod def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[(str, BinaryIO)]]: 'Return an iterator over the (read-only) byte streams of objects identified by key.\n\n .. note:: handles should only be read within the context of this iterator.\n\n :param keys: fully qualified identifiers for the objects within the repository.\n :return: an iterator over the object byte streams.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if a file could not be opened.\n '
def get_object_hash(self, key: str) -> str: 'Return the SHA-256 hash of an object stored under the given key.\n\n .. important::\n A SHA-256 hash should always be returned,\n to ensure consistency across different repository implementations.\n\n :param key: fully qualified identifier for the object within the repository.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be opened.\n ' with self.open(key) as handle: return chunked_file_hash(handle, hashlib.sha256)
-5,363,301,719,493,803,000
Return the SHA-256 hash of an object stored under the given key. .. important:: A SHA-256 hash should always be returned, to ensure consistency across different repository implementations. :param key: fully qualified identifier for the object within the repository. :raise FileNotFoundError: if the file does not exist. :raise OSError: if the file could not be opened.
aiida/repository/backend/abstract.py
get_object_hash
azadoks/aiida-core
python
def get_object_hash(self, key: str) -> str: 'Return the SHA-256 hash of an object stored under the given key.\n\n .. important::\n A SHA-256 hash should always be returned,\n to ensure consistency across different repository implementations.\n\n :param key: fully qualified identifier for the object within the repository.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be opened.\n ' with self.open(key) as handle: return chunked_file_hash(handle, hashlib.sha256)
@abc.abstractmethod def delete_objects(self, keys: List[str]) -> None: 'Delete the objects from the repository.\n\n :param keys: list of fully qualified identifiers for the objects within the repository.\n :raise FileNotFoundError: if any of the files does not exist.\n :raise OSError: if any of the files could not be deleted.\n ' keys_exist = self.has_objects(keys) if (not all(keys_exist)): error_message = 'some of the keys provided do not correspond to any object in the repository:\n' for (indx, key_exists) in enumerate(keys_exist): if (not key_exists): error_message += f''' > object with key `{keys[indx]}` does not exist. ''' raise FileNotFoundError(error_message)
6,488,864,614,444,447,000
Delete the objects from the repository. :param keys: list of fully qualified identifiers for the objects within the repository. :raise FileNotFoundError: if any of the files does not exist. :raise OSError: if any of the files could not be deleted.
aiida/repository/backend/abstract.py
delete_objects
azadoks/aiida-core
python
@abc.abstractmethod def delete_objects(self, keys: List[str]) -> None: 'Delete the objects from the repository.\n\n :param keys: list of fully qualified identifiers for the objects within the repository.\n :raise FileNotFoundError: if any of the files does not exist.\n :raise OSError: if any of the files could not be deleted.\n ' keys_exist = self.has_objects(keys) if (not all(keys_exist)): error_message = 'some of the keys provided do not correspond to any object in the repository:\n' for (indx, key_exists) in enumerate(keys_exist): if (not key_exists): error_message += f' > object with key `{keys[indx]}` does not exist. ' raise FileNotFoundError(error_message)
def delete_object(self, key: str) -> None: 'Delete the object from the repository.\n\n :param key: fully qualified identifier for the object within the repository.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be deleted.\n ' return self.delete_objects([key])
2,549,091,568,805,183,000
Delete the object from the repository. :param key: fully qualified identifier for the object within the repository. :raise FileNotFoundError: if the file does not exist. :raise OSError: if the file could not be deleted.
aiida/repository/backend/abstract.py
delete_object
azadoks/aiida-core
python
def delete_object(self, key: str) -> None: 'Delete the object from the repository.\n\n :param key: fully qualified identifier for the object within the repository.\n :raise FileNotFoundError: if the file does not exist.\n :raise OSError: if the file could not be deleted.\n ' return self.delete_objects([key])
@classmethod def setUpClass(cls): 'Launch the webdriver of choice with selected options(see browserconfig.py).\n Then login using pickled cookies(see tests/pickledlogin.py).' if (browserconfig.current_browser in ['chrome', 'firefox']): cls.driver = browserconfig.driver_runner(executable_path=browserconfig.driver_path, desired_capabilities=browserconfig.capabilities) elif (browserconfig.current_browser == 'edge'): cls.driver = browserconfig.driver_runner(executable_path=browserconfig.driver_path, capabilities=browserconfig.capabilities) tests.pickledlogin.pickled_login(cls.driver)
547,812,806,824,385,660
Launch the webdriver of choice with selected options(see browserconfig.py). Then login using pickled cookies(see tests/pickledlogin.py).
tests/test_headerpage.py
setUpClass
BradleyPelton/NetflixSelenium
python
@classmethod def setUpClass(cls): 'Launch the webdriver of choice with selected options(see browserconfig.py).\n Then login using pickled cookies(see tests/pickledlogin.py).' if (browserconfig.current_browser in ['chrome', 'firefox']): cls.driver = browserconfig.driver_runner(executable_path=browserconfig.driver_path, desired_capabilities=browserconfig.capabilities) elif (browserconfig.current_browser == 'edge'): cls.driver = browserconfig.driver_runner(executable_path=browserconfig.driver_path, capabilities=browserconfig.capabilities) tests.pickledlogin.pickled_login(cls.driver)
@classmethod def tearDownClass(cls): 'Closes the browser and shuts down the driver executable.' cls.driver.quit()
1,645,581,262,967,605,800
Closes the browser and shuts down the driver executable.
tests/test_headerpage.py
tearDownClass
BradleyPelton/NetflixSelenium
python
@classmethod def tearDownClass(cls): cls.driver.quit()
def setUp(self): 'Return to the home page, netflix.com/browse, the staging place for header tests.' self.driver.get('https://netflix.com/browse')
3,288,124,154,217,542,700
Return to the home page, netflix.com/browse, the staging place for header tests.
tests/test_headerpage.py
setUp
BradleyPelton/NetflixSelenium
python
def setUp(self): self.driver.get('https://netflix.com/browse')
def test_logout_from_header(self): 'Logout from the header.' header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.logout() self.assertIn('logout', self.driver.current_url) tests.pickledlogin.pickled_login(self.driver)
2,353,134,763,560,939,000
Logout from the header.
tests/test_headerpage.py
test_logout_from_header
BradleyPelton/NetflixSelenium
python
def test_logout_from_header(self): header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.logout() self.assertIn('logout', self.driver.current_url) tests.pickledlogin.pickled_login(self.driver)
def test_navigate_home_from_my_list(self): 'Using the giant Netflix logo in the top left, navigate to the home page /browse/\n from the my-list page.' self.driver.get('https://www.netflix.com/browse/my-list') header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.navigate_to_home() self.assertEqual('https://www.netflix.com/browse', self.driver.current_url)
-5,558,719,885,448,803,000
Using the giant Netflix logo in the top left, navigate to the home page /browse/ from the my-list page.
tests/test_headerpage.py
test_navigate_home_from_my_list
BradleyPelton/NetflixSelenium
python
def test_navigate_home_from_my_list(self): 'Using the giant Netflix logo in the top left, navigate to the home page /browse/\n from the my-list page.' self.driver.get('https://www.netflix.com/browse/my-list') header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.navigate_to_home() self.assertEqual('https://www.netflix.com/browse', self.driver.current_url)
def test_navigate_to_manage_profile(self): 'Using the header account dropdown, navigate to the manage profile page.' header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.navigate_to_manage_profile() self.assertIn('profiles/manage', self.driver.current_url)
-6,285,243,719,116,505,000
Using the header account dropdown, navigate to the manage profile page.
tests/test_headerpage.py
test_navigate_to_manage_profile
BradleyPelton/NetflixSelenium
python
def test_navigate_to_manage_profile(self): header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.navigate_to_manage_profile() self.assertIn('profiles/manage', self.driver.current_url)
def test_search_for_shawshank(self): "Using the search field, search for 'shawshank' and assert that shawshank was found." header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.search('shawshank') self.assertIn('The Shawshank Redemption', self.driver.page_source)
428,388,069,566,930,800
Using the search field, search for 'shawshank' and assert that shawshank was found.
tests/test_headerpage.py
test_search_for_shawshank
BradleyPelton/NetflixSelenium
python
def test_search_for_shawshank(self): header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.search('shawshank') self.assertIn('The Shawshank Redemption', self.driver.page_source)
def test_click_top_notification(self): 'Click the top notification and assert that the page has changed.' header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.click_top_notification() self.assertTrue((('title' in self.driver.current_url) or ('notification' in self.driver.current_url)))
-6,941,320,859,743,506,000
Click the top notification and assert that the page has changed.
tests/test_headerpage.py
test_click_top_notification
BradleyPelton/NetflixSelenium
python
def test_click_top_notification(self): header_page = pagemodels.headerpage.HeaderPage(self.driver) header_page.click_top_notification() self.assertTrue((('title' in self.driver.current_url) or ('notification' in self.driver.current_url)))
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None): 'Creates fully connected deep neural network subgraph.\n\n Args:\n tensor_in: tensor or placeholder for input features.\n hidden_units: list of counts of hidden units in each layer.\n activation: activation function between layers. Can be None.\n dropout: if not None, will add a dropout layer with given probability.\n\n Returns:\n A tensor which would be a deep neural network.\n ' with vs.variable_scope('dnn'): for (i, n_units) in enumerate(hidden_units): with vs.variable_scope(('layer%d' % i)): tensor_in = rnn_cell.linear(tensor_in, n_units, True) if (activation is not None): tensor_in = activation(tensor_in) if (dropout is not None): tensor_in = dropout_ops.dropout(tensor_in, prob=(1.0 - dropout)) return tensor_in
-8,516,419,455,471,346,000
Creates fully connected deep neural network subgraph. Args: tensor_in: tensor or placeholder for input features. hidden_units: list of counts of hidden units in each layer. activation: activation function between layers. Can be None. dropout: if not None, will add a dropout layer with given probability. Returns: A tensor which would be a deep neural network.
tensorflow/contrib/learn/python/learn/ops/dnn_ops.py
dnn
InfoPrice/tensorflow
python
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None): 'Creates fully connected deep neural network subgraph.\n\n Args:\n tensor_in: tensor or placeholder for input features.\n hidden_units: list of counts of hidden units in each layer.\n activation: activation function between layers. Can be None.\n dropout: if not None, will add a dropout layer with given probability.\n\n Returns:\n A tensor which would be a deep neural network.\n ' with vs.variable_scope('dnn'): for (i, n_units) in enumerate(hidden_units): with vs.variable_scope(('layer%d' % i)): tensor_in = rnn_cell.linear(tensor_in, n_units, True) if (activation is not None): tensor_in = activation(tensor_in) if (dropout is not None): tensor_in = dropout_ops.dropout(tensor_in, prob=(1.0 - dropout)) return tensor_in
def __init__(self, parent=None, orientation='bottom', *args, **kargs): "\n The *orientation* argument may be 'bottom', 'top', 'left', or 'right' \n indicating whether the gradient is displayed horizontally (top, bottom)\n or vertically (left, right) and on what side of the gradient the editable \n ticks will appear.\n \n All other arguments are passed to \n :func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`.\n \n Note: For convenience, this class wraps methods from \n :class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.\n " GraphicsView.__init__(self, parent, useOpenGL=False, background=None) self.maxDim = 31 kargs['tickPen'] = 'k' self.item = GradientEditorItem(*args, **kargs) self.item.sigGradientChanged.connect(self.sigGradientChanged) self.item.sigGradientChangeFinished.connect(self.sigGradientChangeFinished) self.setCentralItem(self.item) self.setOrientation(orientation) self.setCacheMode(self.CacheNone) self.setRenderHints((QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing)) self.setFrameStyle((QtGui.QFrame.NoFrame | QtGui.QFrame.Plain))
3,786,569,015,888,706,600
The *orientation* argument may be 'bottom', 'top', 'left', or 'right' indicating whether the gradient is displayed horizontally (top, bottom) or vertically (left, right) and on what side of the gradient the editable ticks will appear. All other arguments are passed to :func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`. Note: For convenience, this class wraps methods from :class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
scripts/pyqtgraph-develop/pyqtgraph/widgets/GradientWidget.py
__init__
kuldeepaman/tf-pose
python
def __init__(self, parent=None, orientation='bottom', *args, **kargs): "\n The *orientation* argument may be 'bottom', 'top', 'left', or 'right' \n indicating whether the gradient is displayed horizontally (top, bottom)\n or vertically (left, right) and on what side of the gradient the editable \n ticks will appear.\n \n All other arguments are passed to \n :func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`.\n \n Note: For convenience, this class wraps methods from \n :class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.\n " GraphicsView.__init__(self, parent, useOpenGL=False, background=None) self.maxDim = 31 kargs['tickPen'] = 'k' self.item = GradientEditorItem(*args, **kargs) self.item.sigGradientChanged.connect(self.sigGradientChanged) self.item.sigGradientChangeFinished.connect(self.sigGradientChangeFinished) self.setCentralItem(self.item) self.setOrientation(orientation) self.setCacheMode(self.CacheNone) self.setRenderHints((QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing)) self.setFrameStyle((QtGui.QFrame.NoFrame | QtGui.QFrame.Plain))