body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def get_all(self): '\n Retrieve all Products\n\n :rtype: iter[datacube.model.DatasetType]\n ' return (self._make(record) for record in self._db.get_all_dataset_types())
-7,092,304,018,140,173,000
Retrieve all Products :rtype: iter[datacube.model.DatasetType]
datacube/index/_datasets.py
get_all
cronosnull/agdc-v2
python
def get_all(self): '\n Retrieve all Products\n\n :rtype: iter[datacube.model.DatasetType]\n ' return (self._make(record) for record in self._db.get_all_dataset_types())
def _make(self, query_row): '\n :rtype datacube.model.DatasetType\n ' return DatasetType(definition=query_row['definition'], metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']), id_=query_row['id'])
-5,606,596,896,980,163,000
:rtype datacube.model.DatasetType
datacube/index/_datasets.py
_make
cronosnull/agdc-v2
python
def _make(self, query_row): '\n \n ' return DatasetType(definition=query_row['definition'], metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']), id_=query_row['id'])
def __init__(self, db, dataset_type_resource): '\n :type db: datacube.index.postgres._api.PostgresDb\n :type dataset_type_resource: datacube.index._datasets.DatasetTypeResource\n ' self._db = db self.types = dataset_type_resource
5,608,404,397,755,472,000
:type db: datacube.index.postgres._api.PostgresDb :type dataset_type_resource: datacube.index._datasets.DatasetTypeResource
datacube/index/_datasets.py
__init__
cronosnull/agdc-v2
python
def __init__(self, db, dataset_type_resource): '\n :type db: datacube.index.postgres._api.PostgresDb\n :type dataset_type_resource: datacube.index._datasets.DatasetTypeResource\n ' self._db = db self.types = dataset_type_resource
def get(self, id_, include_sources=False): '\n Get dataset by id\n\n :param uuid id_: id of the dataset to retrieve\n :param bool include_sources: get the full provenance graph?\n :rtype: datacube.model.Dataset\n ' if (not include_sources): return self._make(self._db.get_dataset(id_), full_info=True) datasets = {result['id']: (self._make(result, full_info=True), result) for result in self._db.get_dataset_sources(id_)} for (dataset, result) in datasets.values(): dataset.metadata_doc['lineage']['source_datasets'] = {classifier: datasets[str(source)][0].metadata_doc for (source, classifier) in zip(result['sources'], result['classes']) if source} dataset.sources = {classifier: datasets[str(source)][0] for (source, classifier) in zip(result['sources'], result['classes']) if source} return datasets[id_][0]
6,263,144,244,050,827,000
Get dataset by id :param uuid id_: id of the dataset to retrieve :param bool include_sources: get the full provenance graph? :rtype: datacube.model.Dataset
datacube/index/_datasets.py
get
cronosnull/agdc-v2
python
def get(self, id_, include_sources=False): '\n Get dataset by id\n\n :param uuid id_: id of the dataset to retrieve\n :param bool include_sources: get the full provenance graph?\n :rtype: datacube.model.Dataset\n ' if (not include_sources): return self._make(self._db.get_dataset(id_), full_info=True) datasets = {result['id']: (self._make(result, full_info=True), result) for result in self._db.get_dataset_sources(id_)} for (dataset, result) in datasets.values(): dataset.metadata_doc['lineage']['source_datasets'] = {classifier: datasets[str(source)][0].metadata_doc for (source, classifier) in zip(result['sources'], result['classes']) if source} dataset.sources = {classifier: datasets[str(source)][0] for (source, classifier) in zip(result['sources'], result['classes']) if source} return datasets[id_][0]
def get_derived(self, id_): '\n Get drived datasets\n\n :param uuid id_: dataset id\n :rtype: list[datacube.model.Dataset]\n ' return [self._make(result) for result in self._db.get_derived_datasets(id_)]
-7,787,538,984,594,398,000
Get drived datasets :param uuid id_: dataset id :rtype: list[datacube.model.Dataset]
datacube/index/_datasets.py
get_derived
cronosnull/agdc-v2
python
def get_derived(self, id_): '\n Get drived datasets\n\n :param uuid id_: dataset id\n :rtype: list[datacube.model.Dataset]\n ' return [self._make(result) for result in self._db.get_derived_datasets(id_)]
def has(self, dataset): '\n Have we already indexed this dataset?\n\n :param datacube.model.Dataset dataset: dataset to check\n :rtype: bool\n ' return self._db.contains_dataset(dataset.id)
7,644,583,408,200,586,000
Have we already indexed this dataset? :param datacube.model.Dataset dataset: dataset to check :rtype: bool
datacube/index/_datasets.py
has
cronosnull/agdc-v2
python
def has(self, dataset): '\n Have we already indexed this dataset?\n\n :param datacube.model.Dataset dataset: dataset to check\n :rtype: bool\n ' return self._db.contains_dataset(dataset.id)
def add(self, dataset, skip_sources=False): "\n Ensure a dataset is in the index. Add it if not present.\n\n :param datacube.model.Dataset dataset: dataset to add\n :param bool skip_sources: don't attempt to index source (use when sources are already indexed)\n :rtype: datacube.model.Dataset\n " if (not skip_sources): for source in dataset.sources.values(): self.add(source) was_inserted = False sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources dataset.type.dataset_reader(dataset.metadata_doc).sources = {} try: _LOG.info('Indexing %s', dataset.id) with self._db.begin() as transaction: try: was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, dataset.type.id) for (classifier, source_dataset) in dataset.sources.items(): transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id) if dataset.local_uri: transaction.ensure_dataset_location(dataset.id, dataset.local_uri) except DuplicateRecordError as e: _LOG.warning(str(e)) if (not was_inserted): existing = self.get(dataset.id) if existing: check_doc_unchanged(existing.metadata_doc, jsonify_document(dataset.metadata_doc), 'Dataset {}'.format(dataset.id)) if dataset.local_uri: try: self._db.ensure_dataset_location(dataset.id, dataset.local_uri) except DuplicateRecordError as e: _LOG.warning(str(e)) finally: dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp return dataset
5,261,391,490,470,212,000
Ensure a dataset is in the index. Add it if not present. :param datacube.model.Dataset dataset: dataset to add :param bool skip_sources: don't attempt to index source (use when sources are already indexed) :rtype: datacube.model.Dataset
datacube/index/_datasets.py
add
cronosnull/agdc-v2
python
def add(self, dataset, skip_sources=False): "\n Ensure a dataset is in the index. Add it if not present.\n\n :param datacube.model.Dataset dataset: dataset to add\n :param bool skip_sources: don't attempt to index source (use when sources are already indexed)\n :rtype: datacube.model.Dataset\n " if (not skip_sources): for source in dataset.sources.values(): self.add(source) was_inserted = False sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources dataset.type.dataset_reader(dataset.metadata_doc).sources = {} try: _LOG.info('Indexing %s', dataset.id) with self._db.begin() as transaction: try: was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, dataset.type.id) for (classifier, source_dataset) in dataset.sources.items(): transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id) if dataset.local_uri: transaction.ensure_dataset_location(dataset.id, dataset.local_uri) except DuplicateRecordError as e: _LOG.warning(str(e)) if (not was_inserted): existing = self.get(dataset.id) if existing: check_doc_unchanged(existing.metadata_doc, jsonify_document(dataset.metadata_doc), 'Dataset {}'.format(dataset.id)) if dataset.local_uri: try: self._db.ensure_dataset_location(dataset.id, dataset.local_uri) except DuplicateRecordError as e: _LOG.warning(str(e)) finally: dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp return dataset
def archive(self, ids): '\n Mark datasets as archived\n\n :param list[uuid] ids: list of dataset ids to archive\n ' with self._db.begin() as transaction: for id_ in ids: transaction.archive_dataset(id_)
3,311,493,167,582,657,500
Mark datasets as archived :param list[uuid] ids: list of dataset ids to archive
datacube/index/_datasets.py
archive
cronosnull/agdc-v2
python
def archive(self, ids): '\n Mark datasets as archived\n\n :param list[uuid] ids: list of dataset ids to archive\n ' with self._db.begin() as transaction: for id_ in ids: transaction.archive_dataset(id_)
def restore(self, ids): '\n Mark datasets as not archived\n\n :param list[uuid] ids: list of dataset ids to restore\n ' with self._db.begin() as transaction: for id_ in ids: transaction.restore_dataset(id_)
7,723,542,168,741,103,000
Mark datasets as not archived :param list[uuid] ids: list of dataset ids to restore
datacube/index/_datasets.py
restore
cronosnull/agdc-v2
python
def restore(self, ids): '\n Mark datasets as not archived\n\n :param list[uuid] ids: list of dataset ids to restore\n ' with self._db.begin() as transaction: for id_ in ids: transaction.restore_dataset(id_)
def get_field_names(self, type_name=None): '\n :param str type_name:\n :rtype: __generator[str]\n ' if (type_name is None): types = self.types.get_all() else: types = [self.types.get_by_name(type_name)] for type_ in types: for name in type_.metadata_type.dataset_fields: (yield name)
-9,014,035,561,054,024,000
:param str type_name: :rtype: __generator[str]
datacube/index/_datasets.py
get_field_names
cronosnull/agdc-v2
python
def get_field_names(self, type_name=None): '\n :param str type_name:\n :rtype: __generator[str]\n ' if (type_name is None): types = self.types.get_all() else: types = [self.types.get_by_name(type_name)] for type_ in types: for name in type_.metadata_type.dataset_fields: (yield name)
def get_locations(self, dataset): '\n :param datacube.model.Dataset dataset: dataset\n :rtype: list[str]\n ' return self._db.get_locations(dataset.id)
-8,915,735,042,510,128,000
:param datacube.model.Dataset dataset: dataset :rtype: list[str]
datacube/index/_datasets.py
get_locations
cronosnull/agdc-v2
python
def get_locations(self, dataset): '\n :param datacube.model.Dataset dataset: dataset\n :rtype: list[str]\n ' return self._db.get_locations(dataset.id)
def _make(self, dataset_res, full_info=False): '\n :rtype datacube.model.Dataset\n\n :param bool full_info: Include all available fields\n ' return Dataset(self.types.get(dataset_res.dataset_type_ref), dataset_res.metadata, dataset_res.local_uri, indexed_by=(dataset_res.added_by if full_info else None), indexed_time=(dataset_res.added if full_info else None))
-5,285,444,197,718,200,000
:rtype datacube.model.Dataset :param bool full_info: Include all available fields
datacube/index/_datasets.py
_make
cronosnull/agdc-v2
python
def _make(self, dataset_res, full_info=False): '\n :rtype datacube.model.Dataset\n\n :param bool full_info: Include all available fields\n ' return Dataset(self.types.get(dataset_res.dataset_type_ref), dataset_res.metadata, dataset_res.local_uri, indexed_by=(dataset_res.added_by if full_info else None), indexed_time=(dataset_res.added if full_info else None))
def _make_many(self, query_result): '\n :rtype list[datacube.model.Dataset]\n ' return (self._make(dataset) for dataset in query_result)
-1,847,021,940,425,939,000
:rtype list[datacube.model.Dataset]
datacube/index/_datasets.py
_make_many
cronosnull/agdc-v2
python
def _make_many(self, query_result): '\n \n ' return (self._make(dataset) for dataset in query_result)
def search_by_metadata(self, metadata): '\n Perform a search using arbitrary metadata, returning results as Dataset objects.\n\n Caution – slow! This will usually not use indexes.\n\n :param dict metadata:\n :rtype: list[datacube.model.Dataset]\n ' return self._make_many(self._db.search_datasets_by_metadata(metadata))
-5,142,680,656,802,113,000
Perform a search using arbitrary metadata, returning results as Dataset objects. Caution – slow! This will usually not use indexes. :param dict metadata: :rtype: list[datacube.model.Dataset]
datacube/index/_datasets.py
search_by_metadata
cronosnull/agdc-v2
python
def search_by_metadata(self, metadata): '\n Perform a search using arbitrary metadata, returning results as Dataset objects.\n\n Caution – slow! This will usually not use indexes.\n\n :param dict metadata:\n :rtype: list[datacube.model.Dataset]\n ' return self._make_many(self._db.search_datasets_by_metadata(metadata))
def search(self, **query): '\n Perform a search, returning results as Dataset objects.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: __generator[datacube.model.Dataset]\n ' for (dataset_type, datasets) in self._do_search_by_product(query): for dataset in self._make_many(datasets): (yield dataset)
-5,254,226,837,874,405,000
Perform a search, returning results as Dataset objects. :param dict[str,str|float|datacube.model.Range] query: :rtype: __generator[datacube.model.Dataset]
datacube/index/_datasets.py
search
cronosnull/agdc-v2
python
def search(self, **query): '\n Perform a search, returning results as Dataset objects.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: __generator[datacube.model.Dataset]\n ' for (dataset_type, datasets) in self._do_search_by_product(query): for dataset in self._make_many(datasets): (yield dataset)
def search_by_product(self, **query): '\n Perform a search, returning datasets grouped by product type.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: __generator[(datacube.model.DatasetType, __generator[datacube.model.Dataset])]]\n ' for (dataset_type, datasets) in self._do_search_by_product(query): (yield (dataset_type, self._make_many(datasets)))
-4,136,558,941,424,093,000
Perform a search, returning datasets grouped by product type. :param dict[str,str|float|datacube.model.Range] query: :rtype: __generator[(datacube.model.DatasetType, __generator[datacube.model.Dataset])]]
datacube/index/_datasets.py
search_by_product
cronosnull/agdc-v2
python
def search_by_product(self, **query): '\n Perform a search, returning datasets grouped by product type.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: __generator[(datacube.model.DatasetType, __generator[datacube.model.Dataset])]]\n ' for (dataset_type, datasets) in self._do_search_by_product(query): (yield (dataset_type, self._make_many(datasets)))
def count(self, **query): '\n Perform a search, returning count of results.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: int\n ' result = 0 for (product_type, count) in self._do_count_by_product(query): result += count return result
2,938,830,149,098,601,500
Perform a search, returning count of results. :param dict[str,str|float|datacube.model.Range] query: :rtype: int
datacube/index/_datasets.py
count
cronosnull/agdc-v2
python
def count(self, **query): '\n Perform a search, returning count of results.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: int\n ' result = 0 for (product_type, count) in self._do_count_by_product(query): result += count return result
def count_by_product(self, **query): '\n Perform a search, returning a count of for each matching product type.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :returns: Sequence of (product, count)\n :rtype: __generator[(datacube.model.DatasetType, int)]]\n ' return self._do_count_by_product(query)
-1,485,780,272,995,107,000
Perform a search, returning a count of for each matching product type. :param dict[str,str|float|datacube.model.Range] query: :returns: Sequence of (product, count) :rtype: __generator[(datacube.model.DatasetType, int)]]
datacube/index/_datasets.py
count_by_product
cronosnull/agdc-v2
python
def count_by_product(self, **query): '\n Perform a search, returning a count of for each matching product type.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :returns: Sequence of (product, count)\n :rtype: __generator[(datacube.model.DatasetType, int)]]\n ' return self._do_count_by_product(query)
def count_by_product_through_time(self, period, **query): "\n Perform a search, returning counts for each product grouped in time slices\n of the given period.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :param str period: Time range for each slice: '1 month', '1 day' etc.\n :returns: For each matching product type, a list of time ranges and their count.\n :rtype: __generator[(datacube.model.DatasetType, list[(datetime.datetime, datetime.datetime), int)]]\n " return self._do_time_count(period, query)
-1,946,640,937,008,705,300
Perform a search, returning counts for each product grouped in time slices of the given period. :param dict[str,str|float|datacube.model.Range] query: :param str period: Time range for each slice: '1 month', '1 day' etc. :returns: For each matching product type, a list of time ranges and their count. :rtype: __generator[(datacube.model.DatasetType, list[(datetime.datetime, datetime.datetime), int)]]
datacube/index/_datasets.py
count_by_product_through_time
cronosnull/agdc-v2
python
def count_by_product_through_time(self, period, **query): "\n Perform a search, returning counts for each product grouped in time slices\n of the given period.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :param str period: Time range for each slice: '1 month', '1 day' etc.\n :returns: For each matching product type, a list of time ranges and their count.\n :rtype: __generator[(datacube.model.DatasetType, list[(datetime.datetime, datetime.datetime), int)]]\n " return self._do_time_count(period, query)
def count_product_through_time(self, period, **query): "\n Perform a search, returning counts for a single product grouped in time slices\n of the given period.\n\n Will raise an error if the search terms match more than one product.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :param str period: Time range for each slice: '1 month', '1 day' etc.\n :returns: For each matching product type, a list of time ranges and their count.\n :rtype: list[(str, list[(datetime.datetime, datetime.datetime), int)]]\n " return next(self._do_time_count(period, query, ensure_single=True))[1]
681,976,642,990,149,500
Perform a search, returning counts for a single product grouped in time slices of the given period. Will raise an error if the search terms match more than one product. :param dict[str,str|float|datacube.model.Range] query: :param str period: Time range for each slice: '1 month', '1 day' etc. :returns: For each matching product type, a list of time ranges and their count. :rtype: list[(str, list[(datetime.datetime, datetime.datetime), int)]]
datacube/index/_datasets.py
count_product_through_time
cronosnull/agdc-v2
python
def count_product_through_time(self, period, **query): "\n Perform a search, returning counts for a single product grouped in time slices\n of the given period.\n\n Will raise an error if the search terms match more than one product.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :param str period: Time range for each slice: '1 month', '1 day' etc.\n :returns: For each matching product type, a list of time ranges and their count.\n :rtype: list[(str, list[(datetime.datetime, datetime.datetime), int)]]\n " return next(self._do_time_count(period, query, ensure_single=True))[1]
def search_summaries(self, **query): '\n Perform a search, returning just the search fields of each dataset.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: dict\n ' for (dataset_type, results) in self._do_search_by_product(query, return_fields=True): for columns in results: (yield dict(columns))
-2,494,574,499,400,412,700
Perform a search, returning just the search fields of each dataset. :param dict[str,str|float|datacube.model.Range] query: :rtype: dict
datacube/index/_datasets.py
search_summaries
cronosnull/agdc-v2
python
def search_summaries(self, **query): '\n Perform a search, returning just the search fields of each dataset.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: dict\n ' for (dataset_type, results) in self._do_search_by_product(query, return_fields=True): for columns in results: (yield dict(columns))
def search_eager(self, **query): '\n Perform a search, returning results as Dataset objects.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: list[datacube.model.Dataset]\n ' return list(self.search(**query))
2,361,861,580,404,206,000
Perform a search, returning results as Dataset objects. :param dict[str,str|float|datacube.model.Range] query: :rtype: list[datacube.model.Dataset]
datacube/index/_datasets.py
search_eager
cronosnull/agdc-v2
python
def search_eager(self, **query): '\n Perform a search, returning results as Dataset objects.\n\n :param dict[str,str|float|datacube.model.Range] query:\n :rtype: list[datacube.model.Dataset]\n ' return list(self.search(**query))
def ultimate_replace(app, docname, source): 'Replaces variables in docs, including code blocks.\n\n From: https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229\n ' result = source[0] for key in app.config.ultimate_replacements: result = result.replace(key, app.config.ultimate_replacements[key]) source[0] = result
4,424,882,896,295,911,400
Replaces variables in docs, including code blocks. From: https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229
docs/source/conf.py
ultimate_replace
Aeolun/sqlfluff
python
def ultimate_replace(app, docname, source): 'Replaces variables in docs, including code blocks.\n\n From: https://github.com/sphinx-doc/sphinx/issues/4054#issuecomment-329097229\n ' result = source[0] for key in app.config.ultimate_replacements: result = result.replace(key, app.config.ultimate_replacements[key]) source[0] = result
def setup(app): 'Configures the documentation app.' app.add_config_value('ultimate_replacements', {}, True) app.connect('source-read', ultimate_replace)
8,226,218,855,073,592,000
Configures the documentation app.
docs/source/conf.py
setup
Aeolun/sqlfluff
python
def setup(app): app.add_config_value('ultimate_replacements', {}, True) app.connect('source-read', ultimate_replace)
@infer_dtype(np.hypot) def hypot(x1, x2, out=None, where=None, **kwargs): '\n Given the "legs" of a right triangle, return its hypotenuse.\n\n Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or\n `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),\n it is broadcast for use with each element of the other argument.\n (See Examples)\n\n Parameters\n ----------\n x1, x2 : array_like\n Leg of the triangle(s).\n out : Tensor, None, or tuple of Tensor and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n where : array_like, optional\n Values of True indicate to calculate the ufunc at that position, values\n of False indicate to leave the value in the output alone.\n **kwargs\n\n Returns\n -------\n z : Tensor\n The hypotenuse of the triangle(s).\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.hypot(3*mt.ones((3, 3)), 4*mt.ones((3, 3))).execute()\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n\n Example showing broadcast of scalar_like argument:\n\n >>> mt.hypot(3*mt.ones((3, 3)), [4]).execute()\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n ' op = TensorHypot(**kwargs) return op(x1, x2, out=out, where=where)
3,499,966,831,407,212,000
Given the "legs" of a right triangle, return its hypotenuse. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type), it is broadcast for use with each element of the other argument. (See Examples) Parameters ---------- x1, x2 : array_like Leg of the triangle(s). out : Tensor, None, or tuple of Tensor and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. where : array_like, optional Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. **kwargs Returns ------- z : Tensor The hypotenuse of the triangle(s). Examples -------- >>> import mars.tensor as mt >>> mt.hypot(3*mt.ones((3, 3)), 4*mt.ones((3, 3))).execute() array([[ 5., 5., 5.], [ 5., 5., 5.], [ 5., 5., 5.]]) Example showing broadcast of scalar_like argument: >>> mt.hypot(3*mt.ones((3, 3)), [4]).execute() array([[ 5., 5., 5.], [ 5., 5., 5.], [ 5., 5., 5.]])
mars/tensor/arithmetic/hypot.py
hypot
Alfa-Shashank/mars
python
@infer_dtype(np.hypot) def hypot(x1, x2, out=None, where=None, **kwargs): '\n Given the "legs" of a right triangle, return its hypotenuse.\n\n Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or\n `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),\n it is broadcast for use with each element of the other argument.\n (See Examples)\n\n Parameters\n ----------\n x1, x2 : array_like\n Leg of the triangle(s).\n out : Tensor, None, or tuple of Tensor and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n where : array_like, optional\n Values of True indicate to calculate the ufunc at that position, values\n of False indicate to leave the value in the output alone.\n **kwargs\n\n Returns\n -------\n z : Tensor\n The hypotenuse of the triangle(s).\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.hypot(3*mt.ones((3, 3)), 4*mt.ones((3, 3))).execute()\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n\n Example showing broadcast of scalar_like argument:\n\n >>> mt.hypot(3*mt.ones((3, 3)), [4]).execute()\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n ' op = TensorHypot(**kwargs) return op(x1, x2, out=out, where=where)
def sample_analyze_entities(gcs_content_uri): '\n Analyzing Entities in text file stored in Cloud Storage\n\n Args:\n gcs_content_uri Google Cloud Storage URI where the file content is located.\n e.g. gs://[Your Bucket]/[Path to File]\n ' client = language_v1.LanguageServiceClient() type_ = enums.Document.Type.PLAIN_TEXT language = 'en' document = {'gcs_content_uri': gcs_content_uri, 'type': type_, 'language': language} encoding_type = enums.EncodingType.UTF8 response = client.analyze_entities(document, encoding_type=encoding_type) for entity in response.entities: print(u'Representative name for the entity: {}'.format(entity.name)) print(u'Entity type: {}'.format(enums.Entity.Type(entity.type).name)) print(u'Salience score: {}'.format(entity.salience)) for (metadata_name, metadata_value) in entity.metadata.items(): print(u'{}: {}'.format(metadata_name, metadata_value)) for mention in entity.mentions: print(u'Mention text: {}'.format(mention.text.content)) print(u'Mention type: {}'.format(enums.EntityMention.Type(mention.type).name)) print(u'Language of the text: {}'.format(response.language))
-7,760,454,302,406,207,000
Analyzing Entities in text file stored in Cloud Storage Args: gcs_content_uri Google Cloud Storage URI where the file content is located. e.g. gs://[Your Bucket]/[Path to File]
samples/v1/language_entities_gcs.py
sample_analyze_entities
MShaffar19/python-language
python
def sample_analyze_entities(gcs_content_uri): '\n Analyzing Entities in text file stored in Cloud Storage\n\n Args:\n gcs_content_uri Google Cloud Storage URI where the file content is located.\n e.g. gs://[Your Bucket]/[Path to File]\n ' client = language_v1.LanguageServiceClient() type_ = enums.Document.Type.PLAIN_TEXT language = 'en' document = {'gcs_content_uri': gcs_content_uri, 'type': type_, 'language': language} encoding_type = enums.EncodingType.UTF8 response = client.analyze_entities(document, encoding_type=encoding_type) for entity in response.entities: print(u'Representative name for the entity: {}'.format(entity.name)) print(u'Entity type: {}'.format(enums.Entity.Type(entity.type).name)) print(u'Salience score: {}'.format(entity.salience)) for (metadata_name, metadata_value) in entity.metadata.items(): print(u'{}: {}'.format(metadata_name, metadata_value)) for mention in entity.mentions: print(u'Mention text: {}'.format(mention.text.content)) print(u'Mention type: {}'.format(enums.EntityMention.Type(mention.type).name)) print(u'Language of the text: {}'.format(response.language))
def np2th(weights, conv=False): 'Possibly convert HWIO to OIHW.' if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights)
2,480,537,502,387,815,000
Possibly convert HWIO to OIHW.
ViT-V-Net/models.py
np2th
junyuchen245/ViT-V-Net_for_3D_Image_Registration
python
def np2th(weights, conv=False): if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights)
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' return (bool, date, datetime, dict, float, int, list, str, none_type)
7,810,842,306,960,415,000
This must be a method because a model may have properties that are of type self, this must run after the class is loaded
sdks/python/client/openapi_client/model/fc_volume_source.py
additional_properties_type
2kindsofcs/argo-workflows
python
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' return (bool, date, datetime, dict, float, int, list, str, none_type)
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'fs_type': (str,), 'lun': (int,), 'read_only': (bool,), 'target_wwns': ([str],), 'wwids': ([str],)}
-7,114,574,193,233,615,000
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
sdks/python/client/openapi_client/model/fc_volume_source.py
openapi_types
2kindsofcs/argo-workflows
python
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' return {'fs_type': (str,), 'lun': (int,), 'read_only': (bool,), 'target_wwns': ([str],), 'wwids': ([str],)}
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): 'FCVolumeSource - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.. [optional] # noqa: E501\n lun (int): Optional: FC target lun number. [optional] # noqa: E501\n read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501\n target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501\n wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
-4,200,118,095,630,329,000
FCVolumeSource - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.. [optional] # noqa: E501 lun (int): Optional: FC target lun number. [optional] # noqa: E501 read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501 wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501
sdks/python/client/openapi_client/model/fc_volume_source.py
_from_openapi_data
2kindsofcs/argo-workflows
python
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): 'FCVolumeSource - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.. [optional] # noqa: E501\n lun (int): Optional: FC target lun number. [optional] # noqa: E501\n read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501\n target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501\n wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'FCVolumeSource - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.. [optional] # noqa: E501\n lun (int): Optional: FC target lun number. [optional] # noqa: E501\n read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501\n target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501\n wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
6,153,289,572,221,368,000
FCVolumeSource - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.. [optional] # noqa: E501 lun (int): Optional: FC target lun number. [optional] # noqa: E501 read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501 target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501 wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501
sdks/python/client/openapi_client/model/fc_volume_source.py
__init__
2kindsofcs/argo-workflows
python
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'FCVolumeSource - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.. [optional] # noqa: E501\n lun (int): Optional: FC target lun number. [optional] # noqa: E501\n read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501\n target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501\n wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
def IsLabBlocked(lab_name): 'Check if the lab is blocked.\n\n Args:\n lab_name: lab name\n Returns:\n true if the lab is blocked, otherwise false.\n ' device_blocklists = datastore_entities.DeviceBlocklist.query().filter((datastore_entities.DeviceBlocklist.lab_name == lab_name)).fetch(1) return bool(device_blocklists)
-7,152,990,764,287,082,000
Check if the lab is blocked. Args: lab_name: lab name Returns: true if the lab is blocked, otherwise false.
tradefed_cluster/device_blocker.py
IsLabBlocked
maksonlee/tradefed_cluster
python
def IsLabBlocked(lab_name): 'Check if the lab is blocked.\n\n Args:\n lab_name: lab name\n Returns:\n true if the lab is blocked, otherwise false.\n ' device_blocklists = datastore_entities.DeviceBlocklist.query().filter((datastore_entities.DeviceBlocklist.lab_name == lab_name)).fetch(1) return bool(device_blocklists)
def __init__(self, oracle: QuantumCircuit, state_preparation: Optional[QuantumCircuit]=None, zero_reflection: Optional[Union[(QuantumCircuit, Operator)]]=None, reflection_qubits: Optional[List[int]]=None, insert_barriers: bool=False, mcx_mode: str='noancilla', name: str='Q') -> None: "\n Args:\n oracle: The phase oracle implementing a reflection about the bad state. Note that this\n is not a bitflip oracle, see the docstring for more information.\n state_preparation: The operator preparing the good and bad state.\n For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude\n amplification or estimation the operator :math:`\\mathcal{A}`.\n zero_reflection: The reflection about the zero state, :math:`\\mathcal{S}_0`.\n reflection_qubits: Qubits on which the zero reflection acts on.\n insert_barriers: Whether barriers should be inserted between the reflections and A.\n mcx_mode: The mode to use for building the default zero reflection.\n name: The name of the circuit.\n " super().__init__(name=name) self._oracle = oracle self._zero_reflection = zero_reflection self._reflection_qubits = reflection_qubits self._state_preparation = state_preparation self._insert_barriers = insert_barriers self._mcx_mode = mcx_mode self._build()
-7,204,777,035,918,854,000
Args: oracle: The phase oracle implementing a reflection about the bad state. Note that this is not a bitflip oracle, see the docstring for more information. state_preparation: The operator preparing the good and bad state. For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude amplification or estimation the operator :math:`\mathcal{A}`. zero_reflection: The reflection about the zero state, :math:`\mathcal{S}_0`. reflection_qubits: Qubits on which the zero reflection acts on. insert_barriers: Whether barriers should be inserted between the reflections and A. mcx_mode: The mode to use for building the default zero reflection. name: The name of the circuit.
qiskit/circuit/library/grover_operator.py
__init__
SpinQTech/SpinQKit
python
def __init__(self, oracle: QuantumCircuit, state_preparation: Optional[QuantumCircuit]=None, zero_reflection: Optional[Union[(QuantumCircuit, Operator)]]=None, reflection_qubits: Optional[List[int]]=None, insert_barriers: bool=False, mcx_mode: str='noancilla', name: str='Q') -> None: "\n Args:\n oracle: The phase oracle implementing a reflection about the bad state. Note that this\n is not a bitflip oracle, see the docstring for more information.\n state_preparation: The operator preparing the good and bad state.\n For Grover's algorithm, this is a n-qubit Hadamard gate and for amplitude\n amplification or estimation the operator :math:`\\mathcal{A}`.\n zero_reflection: The reflection about the zero state, :math:`\\mathcal{S}_0`.\n reflection_qubits: Qubits on which the zero reflection acts on.\n insert_barriers: Whether barriers should be inserted between the reflections and A.\n mcx_mode: The mode to use for building the default zero reflection.\n name: The name of the circuit.\n " super().__init__(name=name) self._oracle = oracle self._zero_reflection = zero_reflection self._reflection_qubits = reflection_qubits self._state_preparation = state_preparation self._insert_barriers = insert_barriers self._mcx_mode = mcx_mode self._build()
@property def reflection_qubits(self): 'Reflection qubits, on which S0 is applied (if S0 is not user-specified).' if (self._reflection_qubits is not None): return self._reflection_qubits num_state_qubits = (self.oracle.num_qubits - self.oracle.num_ancillas) return list(range(num_state_qubits))
7,827,234,141,069,266,000
Reflection qubits, on which S0 is applied (if S0 is not user-specified).
qiskit/circuit/library/grover_operator.py
reflection_qubits
SpinQTech/SpinQKit
python
@property def reflection_qubits(self): if (self._reflection_qubits is not None): return self._reflection_qubits num_state_qubits = (self.oracle.num_qubits - self.oracle.num_ancillas) return list(range(num_state_qubits))
@property def zero_reflection(self) -> QuantumCircuit: 'The subcircuit implementing the reflection about 0.' if (self._zero_reflection is not None): return self._zero_reflection num_state_qubits = (self.oracle.num_qubits - self.oracle.num_ancillas) return _zero_reflection(num_state_qubits, self.reflection_qubits, self._mcx_mode)
5,482,765,425,153,560,000
The subcircuit implementing the reflection about 0.
qiskit/circuit/library/grover_operator.py
zero_reflection
SpinQTech/SpinQKit
python
@property def zero_reflection(self) -> QuantumCircuit: if (self._zero_reflection is not None): return self._zero_reflection num_state_qubits = (self.oracle.num_qubits - self.oracle.num_ancillas) return _zero_reflection(num_state_qubits, self.reflection_qubits, self._mcx_mode)
@property def state_preparation(self) -> QuantumCircuit: 'The subcircuit implementing the A operator or Hadamards.' if (self._state_preparation is not None): return self._state_preparation num_state_qubits = (self.oracle.num_qubits - self.oracle.num_ancillas) hadamards = QuantumCircuit(num_state_qubits, name='H') hadamards.h(self.reflection_qubits) return hadamards
2,549,075,357,350,532,000
The subcircuit implementing the A operator or Hadamards.
qiskit/circuit/library/grover_operator.py
state_preparation
SpinQTech/SpinQKit
python
@property def state_preparation(self) -> QuantumCircuit: if (self._state_preparation is not None): return self._state_preparation num_state_qubits = (self.oracle.num_qubits - self.oracle.num_ancillas) hadamards = QuantumCircuit(num_state_qubits, name='H') hadamards.h(self.reflection_qubits) return hadamards
@property def oracle(self): 'The oracle implementing a reflection about the bad state.' return self._oracle
-1,036,016,382,031,906,400
The oracle implementing a reflection about the bad state.
qiskit/circuit/library/grover_operator.py
oracle
SpinQTech/SpinQKit
python
@property def oracle(self): return self._oracle
def __init__(self, path, name): '\n Initizlize.\n :param path: path to the storage file;\n empty means the current direcory.\n :param name: file name, json file; may include a path.\n ' if path: os.makedirs(path, exist_ok=True) self.file = os.path.normpath(os.path.join(path, name)) try: with open(self.file) as data_file: self.data = json.load(data_file) except FileNotFoundError: self.data = dict() self.dump()
-9,043,549,866,801,133,000
Initizlize. :param path: path to the storage file; empty means the current direcory. :param name: file name, json file; may include a path.
netdata/workers/json_storage.py
__init__
mincode/netdata
python
def __init__(self, path, name): '\n Initizlize.\n :param path: path to the storage file;\n empty means the current direcory.\n :param name: file name, json file; may include a path.\n ' if path: os.makedirs(path, exist_ok=True) self.file = os.path.normpath(os.path.join(path, name)) try: with open(self.file) as data_file: self.data = json.load(data_file) except FileNotFoundError: self.data = dict() self.dump()
def dump(self): '\n Dump data into storage file.\n ' with open(self.file, 'w') as out_file: json.dump(self.data, out_file, indent=self.indent)
-7,103,314,947,930,550,000
Dump data into storage file.
netdata/workers/json_storage.py
dump
mincode/netdata
python
def dump(self): '\n \n ' with open(self.file, 'w') as out_file: json.dump(self.data, out_file, indent=self.indent)
def get(self, item): '\n Get stored item.\n :param item: name, string, of item to get.\n :return: stored item; raises a KeyError if item does not exist.\n ' return self.data[item]
-6,757,231,109,967,430,000
Get stored item. :param item: name, string, of item to get. :return: stored item; raises a KeyError if item does not exist.
netdata/workers/json_storage.py
get
mincode/netdata
python
def get(self, item): '\n Get stored item.\n :param item: name, string, of item to get.\n :return: stored item; raises a KeyError if item does not exist.\n ' return self.data[item]
def set(self, item, value): "\n Set item's value; causes the data to be dumped into the storage file.\n :param item: name, string of item to set.\n :param value: value to set.\n " self.data[item] = value self.dump()
4,817,724,891,476,354,000
Set item's value; causes the data to be dumped into the storage file. :param item: name, string of item to set. :param value: value to set.
netdata/workers/json_storage.py
set
mincode/netdata
python
def set(self, item, value): "\n Set item's value; causes the data to be dumped into the storage file.\n :param item: name, string of item to set.\n :param value: value to set.\n " self.data[item] = value self.dump()
def __getattr__(self, item): '\n Get stored item with .-notation if not defined as a class member.\n :param item: name, string of item compatible\n with Python class member name.\n :return value of item.\n ' if (item in self.data): return self.data[item] else: raise AttributeError
8,440,963,905,404,084,000
Get stored item with .-notation if not defined as a class member. :param item: name, string of item compatible with Python class member name. :return value of item.
netdata/workers/json_storage.py
__getattr__
mincode/netdata
python
def __getattr__(self, item): '\n Get stored item with .-notation if not defined as a class member.\n :param item: name, string of item compatible\n with Python class member name.\n :return value of item.\n ' if (item in self.data): return self.data[item] else: raise AttributeError
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool: 'Request locks\n\n Parameters\n ----------\n locks: List[str]\n Names of the locks to request.\n id: Hashable\n Identifier of the `MultiLock` instance requesting the locks.\n num_locks: int\n Number of locks in `locks` requesting\n\n Return\n ------\n result: bool\n Whether `num_locks` requested locks are free immediately or not.\n ' assert (id not in self.requests) self.requests[id] = set(locks) assert ((len(locks) >= num_locks) and (num_locks > 0)) self.requests_left[id] = num_locks locks = sorted(locks, key=(lambda x: len(self.locks[x]))) for (i, lock) in enumerate(locks): self.locks[lock].append(id) if (len(self.locks[lock]) == 1): self.requests_left[id] -= 1 if (self.requests_left[id] == 0): self.requests[id] -= set(locks[(i + 1):]) return True return False
-8,840,431,474,768,864,000
Request locks Parameters ---------- locks: List[str] Names of the locks to request. id: Hashable Identifier of the `MultiLock` instance requesting the locks. num_locks: int Number of locks in `locks` requesting Return ------ result: bool Whether `num_locks` requested locks are free immediately or not.
distributed/multi_lock.py
_request_locks
bryanwweber/distributed
python
def _request_locks(self, locks: list[str], id: Hashable, num_locks: int) -> bool: 'Request locks\n\n Parameters\n ----------\n locks: List[str]\n Names of the locks to request.\n id: Hashable\n Identifier of the `MultiLock` instance requesting the locks.\n num_locks: int\n Number of locks in `locks` requesting\n\n Return\n ------\n result: bool\n Whether `num_locks` requested locks are free immediately or not.\n ' assert (id not in self.requests) self.requests[id] = set(locks) assert ((len(locks) >= num_locks) and (num_locks > 0)) self.requests_left[id] = num_locks locks = sorted(locks, key=(lambda x: len(self.locks[x]))) for (i, lock) in enumerate(locks): self.locks[lock].append(id) if (len(self.locks[lock]) == 1): self.requests_left[id] -= 1 if (self.requests_left[id] == 0): self.requests[id] -= set(locks[(i + 1):]) return True return False
def _refain_locks(self, locks, id): 'Cancel/release previously requested/acquired locks\n\n Parameters\n ----------\n locks: List[str]\n Names of the locks to refain.\n id: Hashable\n Identifier of the `MultiLock` instance refraining the locks.\n ' waiters_ready = set() for lock in locks: if (self.locks[lock][0] == id): self.locks[lock].pop(0) if self.locks[lock]: new_first = self.locks[lock][0] self.requests_left[new_first] -= 1 if (self.requests_left[new_first] <= 0): self.requests_left[new_first] = 0 waiters_ready.add(new_first) else: self.locks[lock].remove(id) assert (id not in self.locks[lock]) del self.requests[id] del self.requests_left[id] for waiter in waiters_ready: self.scheduler.loop.add_callback(self.events[waiter].set)
4,364,352,498,279,683,600
Cancel/release previously requested/acquired locks Parameters ---------- locks: List[str] Names of the locks to refain. id: Hashable Identifier of the `MultiLock` instance refraining the locks.
distributed/multi_lock.py
_refain_locks
bryanwweber/distributed
python
def _refain_locks(self, locks, id): 'Cancel/release previously requested/acquired locks\n\n Parameters\n ----------\n locks: List[str]\n Names of the locks to refain.\n id: Hashable\n Identifier of the `MultiLock` instance refraining the locks.\n ' waiters_ready = set() for lock in locks: if (self.locks[lock][0] == id): self.locks[lock].pop(0) if self.locks[lock]: new_first = self.locks[lock][0] self.requests_left[new_first] -= 1 if (self.requests_left[new_first] <= 0): self.requests_left[new_first] = 0 waiters_ready.add(new_first) else: self.locks[lock].remove(id) assert (id not in self.locks[lock]) del self.requests[id] del self.requests_left[id] for waiter in waiters_ready: self.scheduler.loop.add_callback(self.events[waiter].set)
def acquire(self, blocking=True, timeout=None, num_locks=None): 'Acquire the lock\n\n Parameters\n ----------\n blocking : bool, optional\n If false, don\'t wait on the lock in the scheduler at all.\n timeout : string or number or timedelta, optional\n Seconds to wait on the lock in the scheduler. This does not\n include local coroutine time, network transfer time, etc..\n It is forbidden to specify a timeout when blocking is false.\n Instead of number of seconds, it is also possible to specify\n a timedelta in string format, e.g. "200ms".\n num_locks : int, optional\n Number of locks needed. If None, all locks are needed\n\n Examples\n --------\n >>> lock = MultiLock([\'x\', \'y\']) # doctest: +SKIP\n >>> lock.acquire(timeout="1s") # doctest: +SKIP\n\n Returns\n -------\n True or False whether or not it successfully acquired the lock\n ' timeout = parse_timedelta(timeout) if (not blocking): if (timeout is not None): raise ValueError("can't specify a timeout for a non-blocking call") timeout = 0 result = self.client.sync(self.client.scheduler.multi_lock_acquire, locks=self.names, id=self.id, timeout=timeout, num_locks=(num_locks or len(self.names))) self._locked = True return result
-4,150,933,186,845,028,400
Acquire the lock Parameters ---------- blocking : bool, optional If false, don't wait on the lock in the scheduler at all. timeout : string or number or timedelta, optional Seconds to wait on the lock in the scheduler. This does not include local coroutine time, network transfer time, etc.. It is forbidden to specify a timeout when blocking is false. Instead of number of seconds, it is also possible to specify a timedelta in string format, e.g. "200ms". num_locks : int, optional Number of locks needed. If None, all locks are needed Examples -------- >>> lock = MultiLock(['x', 'y']) # doctest: +SKIP >>> lock.acquire(timeout="1s") # doctest: +SKIP Returns ------- True or False whether or not it successfully acquired the lock
distributed/multi_lock.py
acquire
bryanwweber/distributed
python
def acquire(self, blocking=True, timeout=None, num_locks=None): 'Acquire the lock\n\n Parameters\n ----------\n blocking : bool, optional\n If false, don\'t wait on the lock in the scheduler at all.\n timeout : string or number or timedelta, optional\n Seconds to wait on the lock in the scheduler. This does not\n include local coroutine time, network transfer time, etc..\n It is forbidden to specify a timeout when blocking is false.\n Instead of number of seconds, it is also possible to specify\n a timedelta in string format, e.g. "200ms".\n num_locks : int, optional\n Number of locks needed. If None, all locks are needed\n\n Examples\n --------\n >>> lock = MultiLock([\'x\', \'y\']) # doctest: +SKIP\n >>> lock.acquire(timeout="1s") # doctest: +SKIP\n\n Returns\n -------\n True or False whether or not it successfully acquired the lock\n ' timeout = parse_timedelta(timeout) if (not blocking): if (timeout is not None): raise ValueError("can't specify a timeout for a non-blocking call") timeout = 0 result = self.client.sync(self.client.scheduler.multi_lock_acquire, locks=self.names, id=self.id, timeout=timeout, num_locks=(num_locks or len(self.names))) self._locked = True return result
def release(self): 'Release the lock if already acquired' if (not self.locked()): raise ValueError('Lock is not yet acquired') ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id) self._locked = False return ret
3,468,605,964,698,990,600
Release the lock if already acquired
distributed/multi_lock.py
release
bryanwweber/distributed
python
def release(self): if (not self.locked()): raise ValueError('Lock is not yet acquired') ret = self.client.sync(self.client.scheduler.multi_lock_release, id=self.id) self._locked = False return ret
def display_and_save_batch(title, batch, data, save=True, display=True): 'Display and save batch of image using plt' im = torchvision.utils.make_grid(batch, nrow=int((batch.shape[0] ** 0.5))) plt.title(title) plt.imshow(np.transpose(im.cpu().numpy(), (1, 2, 0)), cmap='gray') if save: plt.savefig(((('results/' + title) + data) + '.png'), transparent=True, bbox_inches='tight') if display: plt.show()
-7,719,224,254,690,918,000
Display and save batch of image using plt
Implementations/Conditional-Variational-Autoencoder/plot_utils.py
display_and_save_batch
jaywonchung/Learning-ML
python
def display_and_save_batch(title, batch, data, save=True, display=True): im = torchvision.utils.make_grid(batch, nrow=int((batch.shape[0] ** 0.5))) plt.title(title) plt.imshow(np.transpose(im.cpu().numpy(), (1, 2, 0)), cmap='gray') if save: plt.savefig(((('results/' + title) + data) + '.png'), transparent=True, bbox_inches='tight') if display: plt.show()
def display_and_save_latent(batch, label, data, save=True, display=True): 'Display and save batch of 2-D latent variable using plt' colors = ['black', 'red', 'green', 'blue', 'yellow', 'cyan', 'magenta', 'pink', 'violet', 'grey'] z = batch.cpu().detach().numpy() l = label.cpu().numpy() plt.title('Latent variables') plt.scatter(z[:, 0], z[:, 1], c=l, cmap=matplotlib.colors.ListedColormap(colors)) plt.xlim((- 3), 3) plt.ylim((- 3), 3) if save: plt.savefig((('results/latent-variable' + data) + '.png'), transparent=True, bbox_inches='tight') if display: plt.show()
7,770,787,316,878,940,000
Display and save batch of 2-D latent variable using plt
Implementations/Conditional-Variational-Autoencoder/plot_utils.py
display_and_save_latent
jaywonchung/Learning-ML
python
def display_and_save_latent(batch, label, data, save=True, display=True): colors = ['black', 'red', 'green', 'blue', 'yellow', 'cyan', 'magenta', 'pink', 'violet', 'grey'] z = batch.cpu().detach().numpy() l = label.cpu().numpy() plt.title('Latent variables') plt.scatter(z[:, 0], z[:, 1], c=l, cmap=matplotlib.colors.ListedColormap(colors)) plt.xlim((- 3), 3) plt.ylim((- 3), 3) if save: plt.savefig((('results/latent-variable' + data) + '.png'), transparent=True, bbox_inches='tight') if display: plt.show()
@staticmethod def Args(parser): 'Args is called by calliope to gather arguments for this command.\n\n Args:\n parser: An argparse parser that you can use to add arguments that go\n on the command line after this command. Positional arguments are\n allowed.\n ' common_flags.operation_flag(suffix='to describe').AddToParser(parser) parser.display_info.AddFormat(':(metadata.startTime.date(format="%Y-%m-%d %H:%M:%S %Z", tz=LOCAL)) [transforms] default') parser.add_argument('--full', action='store_true', default=False, help='Print the entire operation resource, which could be large. By default, a summary will be printed instead.')
-2,756,768,806,353,174,500
Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.
lib/surface/service_management/operations/describe.py
Args
bshaffer/google-cloud-sdk
python
@staticmethod def Args(parser): 'Args is called by calliope to gather arguments for this command.\n\n Args:\n parser: An argparse parser that you can use to add arguments that go\n on the command line after this command. Positional arguments are\n allowed.\n ' common_flags.operation_flag(suffix='to describe').AddToParser(parser) parser.display_info.AddFormat(':(metadata.startTime.date(format="%Y-%m-%d %H:%M:%S %Z", tz=LOCAL)) [transforms] default') parser.add_argument('--full', action='store_true', default=False, help='Print the entire operation resource, which could be large. By default, a summary will be printed instead.')
def Run(self, args): "Stubs 'service-management operations describe'.\n\n Args:\n args: argparse.Namespace, The arguments that this command was invoked\n with.\n " pass
-7,147,610,344,865,069,000
Stubs 'service-management operations describe'. Args: args: argparse.Namespace, The arguments that this command was invoked with.
lib/surface/service_management/operations/describe.py
Run
bshaffer/google-cloud-sdk
python
def Run(self, args): "Stubs 'service-management operations describe'.\n\n Args:\n args: argparse.Namespace, The arguments that this command was invoked\n with.\n " pass
def initialize_instances(infile): 'Read the m_trg.csv CSV data into a list of instances.' instances = [] dat = open(infile, 'r') reader = csv.reader(dat) dat.close() for row in reader: instance = Instance([float(value) for value in row[:(- 1)]]) if (float(row[(- 1)]) < 0): instance.setLabel(Instance(0)) else: instance.setLabel(Instance(1)) instances.append(instance) return instances
563,886,251,217,483,300
Read the m_trg.csv CSV data into a list of instances.
ABAGAIL_execution/flipflop.py
initialize_instances
tirthajyoti/Randomized_Optimization
python
def initialize_instances(infile): instances = [] dat = open(infile, 'r') reader = csv.reader(dat) dat.close() for row in reader: instance = Instance([float(value) for value in row[:(- 1)]]) if (float(row[(- 1)]) < 0): instance.setLabel(Instance(0)) else: instance.setLabel(Instance(1)) instances.append(instance) return instances
def train(oa, network, oaName, training_ints, validation_ints, testing_ints, measure): 'Train a given network on a set of instances.\n ' print('\nError results for {}\n---------------------------'.format(oaName)) times = [0] for iteration in xrange(TRAINING_ITERATIONS): start = time.clock() oa.train() elapsed = (time.clock() - start) times.append((times[(- 1)] + elapsed)) if ((iteration % 10) == 0): (MSE_trg, acc_trg) = errorOnDataSet(network, training_ints, measure) (MSE_val, acc_val) = errorOnDataSet(network, validation_ints, measure) (MSE_tst, acc_tst) = errorOnDataSet(network, testing_ints, measure) txt = '{},{},{},{},{},{},{},{}\n'.format(iteration, MSE_trg, MSE_val, MSE_tst, acc_trg, acc_val, acc_tst, times[(- 1)]) print(txt) f = open(OUTFILE, 'a+') f.write(txt) f.close()
6,266,635,343,969,500,000
Train a given network on a set of instances.
ABAGAIL_execution/flipflop.py
train
tirthajyoti/Randomized_Optimization
python
def train(oa, network, oaName, training_ints, validation_ints, testing_ints, measure): '\n ' print('\nError results for {}\n---------------------------'.format(oaName)) times = [0] for iteration in xrange(TRAINING_ITERATIONS): start = time.clock() oa.train() elapsed = (time.clock() - start) times.append((times[(- 1)] + elapsed)) if ((iteration % 10) == 0): (MSE_trg, acc_trg) = errorOnDataSet(network, training_ints, measure) (MSE_val, acc_val) = errorOnDataSet(network, validation_ints, measure) (MSE_tst, acc_tst) = errorOnDataSet(network, testing_ints, measure) txt = '{},{},{},{},{},{},{},{}\n'.format(iteration, MSE_trg, MSE_val, MSE_tst, acc_trg, acc_val, acc_tst, times[(- 1)]) print(txt) f = open(OUTFILE, 'a+') f.write(txt) f.close()
def main(): 'Run this experiment' training_ints = initialize_instances('m_trg.csv') testing_ints = initialize_instances('m_test.csv') validation_ints = initialize_instances('m_val.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() rule = RPROPUpdateRule() oa_names = ['Backprop'] classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER], relu) train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network, 'Backprop', training_ints, validation_ints, testing_ints, measure)
-8,651,872,616,011,747,000
Run this experiment
ABAGAIL_execution/flipflop.py
main
tirthajyoti/Randomized_Optimization
python
def main(): training_ints = initialize_instances('m_trg.csv') testing_ints = initialize_instances('m_test.csv') validation_ints = initialize_instances('m_val.csv') factory = BackPropagationNetworkFactory() measure = SumOfSquaresError() data_set = DataSet(training_ints) relu = RELU() rule = RPROPUpdateRule() oa_names = ['Backprop'] classification_network = factory.createClassificationNetwork([INPUT_LAYER, HIDDEN_LAYER1, HIDDEN_LAYER2, HIDDEN_LAYER3, OUTPUT_LAYER], relu) train(BatchBackPropagationTrainer(data_set, classification_network, measure, rule), classification_network, 'Backprop', training_ints, validation_ints, testing_ints, measure)
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, compute_name: Optional[pulumi.Input[str]]=None, identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]]=None, location: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input[Union[(pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs'])]]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, workspace_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None): "\n Machine Learning compute object wrapped into ARM resource envelope.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] compute_name: Name of the Azure Machine Learning compute.\n :param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.\n :param pulumi.Input[str] location: Specifies the location of the resource.\n :param pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]] properties: Compute properties\n :param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.\n :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.\n :param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['compute_name'] = compute_name __props__['identity'] = identity __props__['location'] = location __props__['properties'] = properties if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['sku'] = sku __props__['tags'] = tags if ((workspace_name is None) and (not opts.urn)): raise TypeError("Missing required property 'workspace_name'") __props__['workspace_name'] = workspace_name __props__['name'] = None __props__['system_data'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/latest:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/latest:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20180301preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20181119:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20190501:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20190601:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20191101:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200101:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200218preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200301:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200401:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200501preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200515preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200601:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200801:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200901preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(MachineLearningCompute, __self__).__init__('azure-native:machinelearningservices/v20210101:MachineLearningCompute', resource_name, __props__, opts)
8,050,948,739,499,512,000
Machine Learning compute object wrapped into ARM resource envelope. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] compute_name: Name of the Azure Machine Learning compute. :param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource. :param pulumi.Input[str] location: Specifies the location of the resource. :param pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]] properties: Compute properties :param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located. :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs. :param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
__init__
pulumi-bot/pulumi-azure-native
python
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, compute_name: Optional[pulumi.Input[str]]=None, identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]]=None, location: Optional[pulumi.Input[str]]=None, properties: Optional[pulumi.Input[Union[(pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs'])]]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, workspace_name: Optional[pulumi.Input[str]]=None, __props__=None, __name__=None, __opts__=None): "\n Machine Learning compute object wrapped into ARM resource envelope.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[str] compute_name: Name of the Azure Machine Learning compute.\n :param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.\n :param pulumi.Input[str] location: Specifies the location of the resource.\n :param pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]] properties: Compute properties\n :param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.\n :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.\n :param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.\n " if (__name__ is not None): warnings.warn('explicit use of __name__ is deprecated', DeprecationWarning) resource_name = __name__ if (__opts__ is not None): warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if (opts is None): opts = pulumi.ResourceOptions() if (not isinstance(opts, pulumi.ResourceOptions)): raise TypeError('Expected resource options to be a ResourceOptions instance') if (opts.version is None): opts.version = _utilities.get_version() if (opts.id is None): if (__props__ is not None): raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['compute_name'] = compute_name __props__['identity'] = identity __props__['location'] = location __props__['properties'] = properties if ((resource_group_name is None) and (not opts.urn)): raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['sku'] = sku __props__['tags'] = tags if ((workspace_name is None) and (not opts.urn)): raise TypeError("Missing required property 'workspace_name'") __props__['workspace_name'] = workspace_name __props__['name'] = None __props__['system_data'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/latest:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/latest:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20180301preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20181119:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20190501:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20190601:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20191101:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200101:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200218preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200301:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200401:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200501preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200515preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200601:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200801:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute'), pulumi.Alias(type_='azure-native:machinelearningservices/v20200901preview:MachineLearningCompute'), pulumi.Alias(type_='azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute')]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(MachineLearningCompute, __self__).__init__('azure-native:machinelearningservices/v20210101:MachineLearningCompute', resource_name, __props__, opts)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'MachineLearningCompute': "\n Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['identity'] = None __props__['location'] = None __props__['name'] = None __props__['properties'] = None __props__['sku'] = None __props__['system_data'] = None __props__['tags'] = None __props__['type'] = None return MachineLearningCompute(resource_name, opts=opts, __props__=__props__)
-3,952,396,233,049,537,500
Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
get
pulumi-bot/pulumi-azure-native
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'MachineLearningCompute': "\n Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__['identity'] = None __props__['location'] = None __props__['name'] = None __props__['properties'] = None __props__['sku'] = None __props__['system_data'] = None __props__['tags'] = None __props__['type'] = None return MachineLearningCompute(resource_name, opts=opts, __props__=__props__)
@property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]: '\n The identity of the resource.\n ' return pulumi.get(self, 'identity')
-2,580,811,553,100,511,000
The identity of the resource.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
identity
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]: '\n \n ' return pulumi.get(self, 'identity')
@property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: '\n Specifies the location of the resource.\n ' return pulumi.get(self, 'location')
6,302,777,286,934,958,000
Specifies the location of the resource.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
location
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: '\n \n ' return pulumi.get(self, 'location')
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n Specifies the name of the resource.\n ' return pulumi.get(self, 'name')
-5,472,184,884,634,436,000
Specifies the name of the resource.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
name
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def name(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def properties(self) -> pulumi.Output[Any]: '\n Compute properties\n ' return pulumi.get(self, 'properties')
-7,218,582,079,494,190,000
Compute properties
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
properties
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def properties(self) -> pulumi.Output[Any]: '\n \n ' return pulumi.get(self, 'properties')
@property @pulumi.getter def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]: '\n The sku of the workspace.\n ' return pulumi.get(self, 'sku')
-3,322,611,284,534,289,000
The sku of the workspace.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
sku
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]: '\n \n ' return pulumi.get(self, 'sku')
@property @pulumi.getter(name='systemData') def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']: '\n Read only system data\n ' return pulumi.get(self, 'system_data')
723,081,282,536,590,700
Read only system data
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
system_data
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter(name='systemData') def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']: '\n \n ' return pulumi.get(self, 'system_data')
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n Contains resource tags defined as key/value pairs.\n ' return pulumi.get(self, 'tags')
-4,864,786,089,036,755,000
Contains resource tags defined as key/value pairs.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
tags
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]: '\n \n ' return pulumi.get(self, 'tags')
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n Specifies the type of the resource.\n ' return pulumi.get(self, 'type')
5,546,388,334,793,997,000
Specifies the type of the resource.
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
type
pulumi-bot/pulumi-azure-native
python
@property @pulumi.getter def type(self) -> pulumi.Output[str]: '\n \n ' return pulumi.get(self, 'type')
async def async_setup_entry(hass, config_entry, async_add_entities): 'Set up OpenWeatherMap weather entity based on a config entry.' domain_data = hass.data[DOMAIN][config_entry.entry_id] name = domain_data[ENTRY_NAME] weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR] unique_id = f'{config_entry.unique_id}' owm_weather = OpenWeatherMapWeather(name, unique_id, weather_coordinator) async_add_entities([owm_weather], False)
-5,971,107,687,580,836,000
Set up OpenWeatherMap weather entity based on a config entry.
homeassistant/components/openweathermap/weather.py
async_setup_entry
123dev/core
python
async def async_setup_entry(hass, config_entry, async_add_entities): domain_data = hass.data[DOMAIN][config_entry.entry_id] name = domain_data[ENTRY_NAME] weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR] unique_id = f'{config_entry.unique_id}' owm_weather = OpenWeatherMapWeather(name, unique_id, weather_coordinator) async_add_entities([owm_weather], False)
def __init__(self, name, unique_id, weather_coordinator: WeatherUpdateCoordinator): 'Initialize the sensor.' self._name = name self._unique_id = unique_id self._weather_coordinator = weather_coordinator
4,389,320,978,801,046,000
Initialize the sensor.
homeassistant/components/openweathermap/weather.py
__init__
123dev/core
python
def __init__(self, name, unique_id, weather_coordinator: WeatherUpdateCoordinator): self._name = name self._unique_id = unique_id self._weather_coordinator = weather_coordinator
@property def name(self): 'Return the name of the sensor.' return self._name
8,691,954,631,286,512,000
Return the name of the sensor.
homeassistant/components/openweathermap/weather.py
name
123dev/core
python
@property def name(self): return self._name
@property def unique_id(self): 'Return a unique_id for this entity.' return self._unique_id
2,237,810,817,326,574,000
Return a unique_id for this entity.
homeassistant/components/openweathermap/weather.py
unique_id
123dev/core
python
@property def unique_id(self): return self._unique_id
@property def should_poll(self): 'Return the polling requirement of the entity.' return False
9,164,027,142,541,335,000
Return the polling requirement of the entity.
homeassistant/components/openweathermap/weather.py
should_poll
123dev/core
python
@property def should_poll(self): return False
@property def attribution(self): 'Return the attribution.' return ATTRIBUTION
-4,777,674,644,330,317,000
Return the attribution.
homeassistant/components/openweathermap/weather.py
attribution
123dev/core
python
@property def attribution(self): return ATTRIBUTION
@property def condition(self): 'Return the current condition.' return self._weather_coordinator.data[ATTR_API_CONDITION]
8,621,755,588,087,073,000
Return the current condition.
homeassistant/components/openweathermap/weather.py
condition
123dev/core
python
@property def condition(self): return self._weather_coordinator.data[ATTR_API_CONDITION]
@property def temperature(self): 'Return the temperature.' return self._weather_coordinator.data[ATTR_API_TEMPERATURE]
7,313,664,048,823,649,000
Return the temperature.
homeassistant/components/openweathermap/weather.py
temperature
123dev/core
python
@property def temperature(self): return self._weather_coordinator.data[ATTR_API_TEMPERATURE]
@property def temperature_unit(self): 'Return the unit of measurement.' return TEMP_CELSIUS
4,571,780,805,438,814,700
Return the unit of measurement.
homeassistant/components/openweathermap/weather.py
temperature_unit
123dev/core
python
@property def temperature_unit(self): return TEMP_CELSIUS
@property def pressure(self): 'Return the pressure.' return self._weather_coordinator.data[ATTR_API_PRESSURE]
2,965,828,343,547,977,700
Return the pressure.
homeassistant/components/openweathermap/weather.py
pressure
123dev/core
python
@property def pressure(self): return self._weather_coordinator.data[ATTR_API_PRESSURE]
@property def humidity(self): 'Return the humidity.' return self._weather_coordinator.data[ATTR_API_HUMIDITY]
7,101,145,499,980,695,000
Return the humidity.
homeassistant/components/openweathermap/weather.py
humidity
123dev/core
python
@property def humidity(self): return self._weather_coordinator.data[ATTR_API_HUMIDITY]
@property def wind_speed(self): 'Return the wind speed.' wind_speed = self._weather_coordinator.data[ATTR_API_WIND_SPEED] if (self.hass.config.units.name == 'imperial'): return round((wind_speed * 2.24), 2) return round((wind_speed * 3.6), 2)
2,837,666,101,896,959,000
Return the wind speed.
homeassistant/components/openweathermap/weather.py
wind_speed
123dev/core
python
@property def wind_speed(self): wind_speed = self._weather_coordinator.data[ATTR_API_WIND_SPEED] if (self.hass.config.units.name == 'imperial'): return round((wind_speed * 2.24), 2) return round((wind_speed * 3.6), 2)
@property def wind_bearing(self): 'Return the wind bearing.' return self._weather_coordinator.data[ATTR_API_WIND_BEARING]
5,297,157,121,137,046,000
Return the wind bearing.
homeassistant/components/openweathermap/weather.py
wind_bearing
123dev/core
python
@property def wind_bearing(self): return self._weather_coordinator.data[ATTR_API_WIND_BEARING]
@property def forecast(self): 'Return the forecast array.' return self._weather_coordinator.data[ATTR_API_FORECAST]
-6,175,109,922,992,382,000
Return the forecast array.
homeassistant/components/openweathermap/weather.py
forecast
123dev/core
python
@property def forecast(self): return self._weather_coordinator.data[ATTR_API_FORECAST]
@property def available(self): 'Return True if entity is available.' return self._weather_coordinator.last_update_success
-3,304,158,879,303,020,000
Return True if entity is available.
homeassistant/components/openweathermap/weather.py
available
123dev/core
python
@property def available(self): return self._weather_coordinator.last_update_success
async def async_added_to_hass(self): 'Connect to dispatcher listening for entity data notifications.' self.async_on_remove(self._weather_coordinator.async_add_listener(self.async_write_ha_state))
7,899,978,953,877,624,000
Connect to dispatcher listening for entity data notifications.
homeassistant/components/openweathermap/weather.py
async_added_to_hass
123dev/core
python
async def async_added_to_hass(self): self.async_on_remove(self._weather_coordinator.async_add_listener(self.async_write_ha_state))
async def async_update(self): 'Get the latest data from OWM and updates the states.' (await self._weather_coordinator.async_request_refresh())
-2,303,072,366,161,045,800
Get the latest data from OWM and updates the states.
homeassistant/components/openweathermap/weather.py
async_update
123dev/core
python
async def async_update(self): (await self._weather_coordinator.async_request_refresh())
def get_widgets(self): "\n Returns a list of widgets sorted by their 'order'.\n If two or more widgets have the same 'order', sort by label.\n " return map((lambda x: x['widget']), filter((lambda x: (x['widget'] not in self.removed_widgets)), sorted(self.widgets.values(), key=(lambda x: (x['order'], x['widget'].label)))))
4,196,100,985,637,145,000
Returns a list of widgets sorted by their 'order'. If two or more widgets have the same 'order', sort by label.
mayan/apps/common/classes.py
get_widgets
marumadang/mayan-edms
python
def get_widgets(self): "\n Returns a list of widgets sorted by their 'order'.\n If two or more widgets have the same 'order', sort by label.\n " return map((lambda x: x['widget']), filter((lambda x: (x['widget'] not in self.removed_widgets)), sorted(self.widgets.values(), key=(lambda x: (x['order'], x['widget'].label)))))
def get_result(self, name): '\n The method that produces the actual result. Must be implemented\n by each subclass.\n ' raise NotImplementedError
2,257,598,814,406,162,000
The method that produces the actual result. Must be implemented by each subclass.
mayan/apps/common/classes.py
get_result
marumadang/mayan-edms
python
def get_result(self, name): '\n The method that produces the actual result. Must be implemented\n by each subclass.\n ' raise NotImplementedError
def sample_recognize(local_file_path): '\n Transcribe a short audio file with multiple channels\n\n Args:\n local_file_path Path to local audio file, e.g. /path/audio.wav\n ' client = speech_v1.SpeechClient() audio_channel_count = 2 enable_separate_recognition_per_channel = True language_code = 'en-US' config = {'audio_channel_count': audio_channel_count, 'enable_separate_recognition_per_channel': enable_separate_recognition_per_channel, 'language_code': language_code} with io.open(local_file_path, 'rb') as f: content = f.read() audio = {'content': content} response = client.recognize(config, audio) for result in response.results: print(u'Channel tag: {}'.format(result.channel_tag)) alternative = result.alternatives[0] print(u'Transcript: {}'.format(alternative.transcript))
6,229,858,521,637,275,000
Transcribe a short audio file with multiple channels Args: local_file_path Path to local audio file, e.g. /path/audio.wav
speech/samples/v1/speech_transcribe_multichannel.py
sample_recognize
AzemaBaptiste/google-cloud-python
python
def sample_recognize(local_file_path): '\n Transcribe a short audio file with multiple channels\n\n Args:\n local_file_path Path to local audio file, e.g. /path/audio.wav\n ' client = speech_v1.SpeechClient() audio_channel_count = 2 enable_separate_recognition_per_channel = True language_code = 'en-US' config = {'audio_channel_count': audio_channel_count, 'enable_separate_recognition_per_channel': enable_separate_recognition_per_channel, 'language_code': language_code} with io.open(local_file_path, 'rb') as f: content = f.read() audio = {'content': content} response = client.recognize(config, audio) for result in response.results: print(u'Channel tag: {}'.format(result.channel_tag)) alternative = result.alternatives[0] print(u'Transcript: {}'.format(alternative.transcript))
def __init__(self, security_group_id=None): 'ShowSecurityGroupRequest - a model defined in huaweicloud sdk' self._security_group_id = None self.discriminator = None self.security_group_id = security_group_id
4,764,584,815,604,628,000
ShowSecurityGroupRequest - a model defined in huaweicloud sdk
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
__init__
huaweicloud/huaweicloud-sdk-python-v3
python
def __init__(self, security_group_id=None): self._security_group_id = None self.discriminator = None self.security_group_id = security_group_id
@property def security_group_id(self): 'Gets the security_group_id of this ShowSecurityGroupRequest.\n\n 安全组资源ID\n\n :return: The security_group_id of this ShowSecurityGroupRequest.\n :rtype: str\n ' return self._security_group_id
6,141,350,925,777,083,000
Gets the security_group_id of this ShowSecurityGroupRequest. 安全组资源ID :return: The security_group_id of this ShowSecurityGroupRequest. :rtype: str
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
security_group_id
huaweicloud/huaweicloud-sdk-python-v3
python
@property def security_group_id(self): 'Gets the security_group_id of this ShowSecurityGroupRequest.\n\n 安全组资源ID\n\n :return: The security_group_id of this ShowSecurityGroupRequest.\n :rtype: str\n ' return self._security_group_id
@security_group_id.setter def security_group_id(self, security_group_id): 'Sets the security_group_id of this ShowSecurityGroupRequest.\n\n 安全组资源ID\n\n :param security_group_id: The security_group_id of this ShowSecurityGroupRequest.\n :type: str\n ' self._security_group_id = security_group_id
-7,290,699,017,112,613,000
Sets the security_group_id of this ShowSecurityGroupRequest. 安全组资源ID :param security_group_id: The security_group_id of this ShowSecurityGroupRequest. :type: str
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
security_group_id
huaweicloud/huaweicloud-sdk-python-v3
python
@security_group_id.setter def security_group_id(self, security_group_id): 'Sets the security_group_id of this ShowSecurityGroupRequest.\n\n 安全组资源ID\n\n :param security_group_id: The security_group_id of this ShowSecurityGroupRequest.\n :type: str\n ' self._security_group_id = security_group_id
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) elif (attr in self.sensitive_list): result[attr] = '****' else: result[attr] = value return result
2,594,216,033,120,720,000
Returns the model properties as a dict
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
to_dict
huaweicloud/huaweicloud-sdk-python-v3
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) elif (attr in self.sensitive_list): result[attr] = '****' else: result[attr] = value return result
def to_str(self): 'Returns the string representation of the model' import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding('utf-8') return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
-6,095,553,759,700,562,000
Returns the string representation of the model
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
to_str
huaweicloud/huaweicloud-sdk-python-v3
python
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding('utf-8') return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self): 'For `print`' return self.to_str()
-1,581,176,371,750,213,000
For `print`
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
__repr__
huaweicloud/huaweicloud-sdk-python-v3
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, ShowSecurityGroupRequest)): return False return (self.__dict__ == other.__dict__)
-2,403,763,859,980,322,300
Returns true if both objects are equal
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
__eq__
huaweicloud/huaweicloud-sdk-python-v3
python
def __eq__(self, other): if (not isinstance(other, ShowSecurityGroupRequest)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/show_security_group_request.py
__ne__
huaweicloud/huaweicloud-sdk-python-v3
python
def __ne__(self, other): return (not (self == other))
def __init__(self, error_date_time=None, request_id=None): 'ErrorDetails - a model defined in Swagger' self._error_date_time = None self._request_id = None self.discriminator = None if (error_date_time is not None): self.error_date_time = error_date_time if (request_id is not None): self.request_id = request_id
180,176,011,986,121,900
ErrorDetails - a model defined in Swagger
asposewordscloud/models/error_details.py
__init__
rizwanniazigroupdocs/aspose-words-cloud-python
python
def __init__(self, error_date_time=None, request_id=None): self._error_date_time = None self._request_id = None self.discriminator = None if (error_date_time is not None): self.error_date_time = error_date_time if (request_id is not None): self.request_id = request_id
@property def error_date_time(self): 'Gets the error_date_time of this ErrorDetails. # noqa: E501\n\n Error datetime. # noqa: E501\n\n :return: The error_date_time of this ErrorDetails. # noqa: E501\n :rtype: datetime\n ' return self._error_date_time
-9,134,720,129,385,786,000
Gets the error_date_time of this ErrorDetails. # noqa: E501 Error datetime. # noqa: E501 :return: The error_date_time of this ErrorDetails. # noqa: E501 :rtype: datetime
asposewordscloud/models/error_details.py
error_date_time
rizwanniazigroupdocs/aspose-words-cloud-python
python
@property def error_date_time(self): 'Gets the error_date_time of this ErrorDetails. # noqa: E501\n\n Error datetime. # noqa: E501\n\n :return: The error_date_time of this ErrorDetails. # noqa: E501\n :rtype: datetime\n ' return self._error_date_time
@error_date_time.setter def error_date_time(self, error_date_time): 'Sets the error_date_time of this ErrorDetails.\n\n Error datetime. # noqa: E501\n\n :param error_date_time: The error_date_time of this ErrorDetails. # noqa: E501\n :type: datetime\n ' self._error_date_time = error_date_time
2,731,700,064,338,604,000
Sets the error_date_time of this ErrorDetails. Error datetime. # noqa: E501 :param error_date_time: The error_date_time of this ErrorDetails. # noqa: E501 :type: datetime
asposewordscloud/models/error_details.py
error_date_time
rizwanniazigroupdocs/aspose-words-cloud-python
python
@error_date_time.setter def error_date_time(self, error_date_time): 'Sets the error_date_time of this ErrorDetails.\n\n Error datetime. # noqa: E501\n\n :param error_date_time: The error_date_time of this ErrorDetails. # noqa: E501\n :type: datetime\n ' self._error_date_time = error_date_time
@property def request_id(self): 'Gets the request_id of this ErrorDetails. # noqa: E501\n\n The request id. # noqa: E501\n\n :return: The request_id of this ErrorDetails. # noqa: E501\n :rtype: str\n ' return self._request_id
-2,747,279,147,444,605,400
Gets the request_id of this ErrorDetails. # noqa: E501 The request id. # noqa: E501 :return: The request_id of this ErrorDetails. # noqa: E501 :rtype: str
asposewordscloud/models/error_details.py
request_id
rizwanniazigroupdocs/aspose-words-cloud-python
python
@property def request_id(self): 'Gets the request_id of this ErrorDetails. # noqa: E501\n\n The request id. # noqa: E501\n\n :return: The request_id of this ErrorDetails. # noqa: E501\n :rtype: str\n ' return self._request_id
@request_id.setter def request_id(self, request_id): 'Sets the request_id of this ErrorDetails.\n\n The request id. # noqa: E501\n\n :param request_id: The request_id of this ErrorDetails. # noqa: E501\n :type: str\n ' self._request_id = request_id
4,101,524,972,968,898,600
Sets the request_id of this ErrorDetails. The request id. # noqa: E501 :param request_id: The request_id of this ErrorDetails. # noqa: E501 :type: str
asposewordscloud/models/error_details.py
request_id
rizwanniazigroupdocs/aspose-words-cloud-python
python
@request_id.setter def request_id(self, request_id): 'Sets the request_id of this ErrorDetails.\n\n The request id. # noqa: E501\n\n :param request_id: The request_id of this ErrorDetails. # noqa: E501\n :type: str\n ' self._request_id = request_id
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
-2,772,352,302,133,010,000
Returns the model properties as a dict
asposewordscloud/models/error_details.py
to_dict
rizwanniazigroupdocs/aspose-words-cloud-python
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_json(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[self.attribute_map[attr]] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[self.attribute_map[attr]] = value.to_dict() elif isinstance(value, dict): result[self.attribute_map[attr]] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[self.attribute_map[attr]] = value return json.dumps(result)
-5,130,988,191,037,985,000
Returns the model properties as a dict
asposewordscloud/models/error_details.py
to_json
rizwanniazigroupdocs/aspose-words-cloud-python
python
def to_json(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[self.attribute_map[attr]] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[self.attribute_map[attr]] = value.to_dict() elif isinstance(value, dict): result[self.attribute_map[attr]] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[self.attribute_map[attr]] = value return json.dumps(result)
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
asposewordscloud/models/error_details.py
to_str
rizwanniazigroupdocs/aspose-words-cloud-python
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
asposewordscloud/models/error_details.py
__repr__
rizwanniazigroupdocs/aspose-words-cloud-python
python
def __repr__(self): return self.to_str()