body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, ErrorDetails)): return False return (self.__dict__ == other.__dict__)
7,013,632,968,773,976,000
Returns true if both objects are equal
asposewordscloud/models/error_details.py
__eq__
rizwanniazigroupdocs/aspose-words-cloud-python
python
def __eq__(self, other): if (not isinstance(other, ErrorDetails)): return False return (self.__dict__ == other.__dict__)
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
7,764,124,047,908,058,000
Returns true if both objects are not equal
asposewordscloud/models/error_details.py
__ne__
rizwanniazigroupdocs/aspose-words-cloud-python
python
def __ne__(self, other): return (not (self == other))
@profile @login_required def post(self): '\n Called when saving data from the annotator client\n ' data = request.get_json(force=True) image = data.get('image') dataset = data.get('dataset') image_id = image.get('id') image_model = ImageModel.objects(id=image_id).first() if (image_model is None): return ({'success': False, 'message': 'Image does not exist'}, 400) db_dataset = current_user.datasets.filter(id=image_model.dataset_id).first() if (dataset is None): return {'success': False, 'message': 'Could not find associated dataset'} db_dataset.update(annotate_url=dataset.get('annotate_url', '')) categories = CategoryModel.objects.all() annotations = AnnotationModel.objects(image_id=image_id) current_user.update(preferences=data.get('user', {})) annotated = False for category in data.get('categories', []): category_id = category.get('id') db_category = categories.filter(id=category_id).first() if (db_category is None): continue category_update = {'color': category.get('color')} if current_user.can_edit(db_category): category_update['keypoint_edges'] = category.get('keypoint_edges', []) category_update['keypoint_labels'] = category.get('keypoint_labels', []) db_category.update(**category_update) for annotation in category.get('annotations', []): annotation_id = annotation.get('id') db_annotation = annotations.filter(id=annotation_id).first() if (db_annotation is None): continue sessions = [] total_time = 0 for session in annotation.get('sessions', []): date = datetime.datetime.fromtimestamp((int(session.get('start')) / 1000.0)) model = SessionEvent(user=current_user.username, created_at=date, milliseconds=session.get('milliseconds'), tools_used=session.get('tools')) total_time += session.get('milliseconds') sessions.append(model) db_annotation.update(add_to_set__events=sessions, inc__milliseconds=total_time, set__isbbox=annotation.get('isbbox', False), set__keypoints=annotation.get('keypoints', []), set__metadata=annotation.get('metadata'), set__color=annotation.get('color')) paperjs_object = annotation.get('compoundPath', []) if (len(paperjs_object) == 2): width = db_annotation.width height = db_annotation.height (segmentation, area, bbox) = coco_util.paperjs_to_coco(width, height, paperjs_object) db_annotation.update(set__segmentation=segmentation, set__area=area, set__isbbox=annotation.get('isbbox', False), set__bbox=bbox, set__paper_object=paperjs_object) if (area > 0): annotated = True image_model.update(set__metadata=image.get('metadata', {}), set__annotated=annotated, set__category_ids=image.get('category_ids', []), set__regenerate_thumbnail=True, set__num_annotations=annotations.filter(deleted=False, area__gt=0).count()) return {'success': True}
223,986,864,701,691,170
Called when saving data from the annotator client
coco-annotator/backend/webserver/api/annotator.py
post
Cheol-H-Jeong/Deep-POC-2019
python
@profile @login_required def post(self): '\n \n ' data = request.get_json(force=True) image = data.get('image') dataset = data.get('dataset') image_id = image.get('id') image_model = ImageModel.objects(id=image_id).first() if (image_model is None): return ({'success': False, 'message': 'Image does not exist'}, 400) db_dataset = current_user.datasets.filter(id=image_model.dataset_id).first() if (dataset is None): return {'success': False, 'message': 'Could not find associated dataset'} db_dataset.update(annotate_url=dataset.get('annotate_url', )) categories = CategoryModel.objects.all() annotations = AnnotationModel.objects(image_id=image_id) current_user.update(preferences=data.get('user', {})) annotated = False for category in data.get('categories', []): category_id = category.get('id') db_category = categories.filter(id=category_id).first() if (db_category is None): continue category_update = {'color': category.get('color')} if current_user.can_edit(db_category): category_update['keypoint_edges'] = category.get('keypoint_edges', []) category_update['keypoint_labels'] = category.get('keypoint_labels', []) db_category.update(**category_update) for annotation in category.get('annotations', []): annotation_id = annotation.get('id') db_annotation = annotations.filter(id=annotation_id).first() if (db_annotation is None): continue sessions = [] total_time = 0 for session in annotation.get('sessions', []): date = datetime.datetime.fromtimestamp((int(session.get('start')) / 1000.0)) model = SessionEvent(user=current_user.username, created_at=date, milliseconds=session.get('milliseconds'), tools_used=session.get('tools')) total_time += session.get('milliseconds') sessions.append(model) db_annotation.update(add_to_set__events=sessions, inc__milliseconds=total_time, set__isbbox=annotation.get('isbbox', False), set__keypoints=annotation.get('keypoints', []), set__metadata=annotation.get('metadata'), set__color=annotation.get('color')) paperjs_object = annotation.get('compoundPath', []) if (len(paperjs_object) == 2): width = db_annotation.width height = db_annotation.height (segmentation, area, bbox) = coco_util.paperjs_to_coco(width, height, paperjs_object) db_annotation.update(set__segmentation=segmentation, set__area=area, set__isbbox=annotation.get('isbbox', False), set__bbox=bbox, set__paper_object=paperjs_object) if (area > 0): annotated = True image_model.update(set__metadata=image.get('metadata', {}), set__annotated=annotated, set__category_ids=image.get('category_ids', []), set__regenerate_thumbnail=True, set__num_annotations=annotations.filter(deleted=False, area__gt=0).count()) return {'success': True}
@profile @login_required def get(self, image_id): ' Called when loading from the annotator client ' image = ImageModel.objects(id=image_id).exclude('events').first() if (image is None): return ({'success': False, 'message': 'Could not load image'}, 400) dataset = current_user.datasets.filter(id=image.dataset_id).first() if (dataset is None): return ({'success': False, 'message': 'Could not find associated dataset'}, 400) categories = CategoryModel.objects(deleted=False).in_bulk(dataset.categories).items() images = ImageModel.objects(dataset_id=dataset.id, deleted=False) pre = images.filter(file_name__lt=image.file_name).order_by('-file_name').first() nex = images.filter(file_name__gt=image.file_name).order_by('file_name').first() preferences = {} if (not Config.LOGIN_DISABLED): preferences = current_user.preferences data = {'image': query_util.fix_ids(image), 'categories': [], 'dataset': query_util.fix_ids(dataset), 'preferences': preferences, 'permissions': {'dataset': dataset.permissions(current_user), 'image': image.permissions(current_user)}} data['image']['previous'] = (pre.id if pre else None) data['image']['next'] = (nex.id if nex else None) for category in categories: category = query_util.fix_ids(category[1]) category_id = category.get('id') annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False).exclude('events').all() category['show'] = True category['visualize'] = False category['annotations'] = ([] if (annotations is None) else query_util.fix_ids(annotations)) data.get('categories').append(category) return data
4,471,360,595,673,237,500
Called when loading from the annotator client
coco-annotator/backend/webserver/api/annotator.py
get
Cheol-H-Jeong/Deep-POC-2019
python
@profile @login_required def get(self, image_id): ' ' image = ImageModel.objects(id=image_id).exclude('events').first() if (image is None): return ({'success': False, 'message': 'Could not load image'}, 400) dataset = current_user.datasets.filter(id=image.dataset_id).first() if (dataset is None): return ({'success': False, 'message': 'Could not find associated dataset'}, 400) categories = CategoryModel.objects(deleted=False).in_bulk(dataset.categories).items() images = ImageModel.objects(dataset_id=dataset.id, deleted=False) pre = images.filter(file_name__lt=image.file_name).order_by('-file_name').first() nex = images.filter(file_name__gt=image.file_name).order_by('file_name').first() preferences = {} if (not Config.LOGIN_DISABLED): preferences = current_user.preferences data = {'image': query_util.fix_ids(image), 'categories': [], 'dataset': query_util.fix_ids(dataset), 'preferences': preferences, 'permissions': {'dataset': dataset.permissions(current_user), 'image': image.permissions(current_user)}} data['image']['previous'] = (pre.id if pre else None) data['image']['next'] = (nex.id if nex else None) for category in categories: category = query_util.fix_ids(category[1]) category_id = category.get('id') annotations = AnnotationModel.objects(image_id=image_id, category_id=category_id, deleted=False).exclude('events').all() category['show'] = True category['visualize'] = False category['annotations'] = ([] if (annotations is None) else query_util.fix_ids(annotations)) data.get('categories').append(category) return data
def __init__(self, **kwargs): '\n Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute\n of this class is ``AMAZON_S3_CONNECTION`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param model_type:\n The value to assign to the model_type property of this UpdateConnectionFromAmazonS3.\n Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION", "BIP_CONNECTION"\n :type model_type: str\n\n :param key:\n The value to assign to the key property of this UpdateConnectionFromAmazonS3.\n :type key: str\n\n :param model_version:\n The value to assign to the model_version property of this UpdateConnectionFromAmazonS3.\n :type model_version: str\n\n :param parent_ref:\n The value to assign to the parent_ref property of this UpdateConnectionFromAmazonS3.\n :type parent_ref: oci.data_integration.models.ParentReference\n\n :param name:\n The value to assign to the name property of this UpdateConnectionFromAmazonS3.\n :type name: str\n\n :param description:\n The value to assign to the description property of this UpdateConnectionFromAmazonS3.\n :type description: str\n\n :param object_status:\n The value to assign to the object_status property of this UpdateConnectionFromAmazonS3.\n :type object_status: int\n\n :param object_version:\n The value to assign to the object_version property of this UpdateConnectionFromAmazonS3.\n :type object_version: int\n\n :param identifier:\n The value to assign to the identifier property of this UpdateConnectionFromAmazonS3.\n :type identifier: str\n\n :param connection_properties:\n The value to assign to the connection_properties property of this UpdateConnectionFromAmazonS3.\n :type connection_properties: list[oci.data_integration.models.ConnectionProperty]\n\n :param registry_metadata:\n The value to assign to the registry_metadata property of this UpdateConnectionFromAmazonS3.\n :type registry_metadata: oci.data_integration.models.RegistryMetadata\n\n :param access_key:\n The value to assign to the access_key property of this UpdateConnectionFromAmazonS3.\n :type access_key: oci.data_integration.models.SensitiveAttribute\n\n :param secret_key:\n The value to assign to the secret_key property of this UpdateConnectionFromAmazonS3.\n :type secret_key: oci.data_integration.models.SensitiveAttribute\n\n ' self.swagger_types = {'model_type': 'str', 'key': 'str', 'model_version': 'str', 'parent_ref': 'ParentReference', 'name': 'str', 'description': 'str', 'object_status': 'int', 'object_version': 'int', 'identifier': 'str', 'connection_properties': 'list[ConnectionProperty]', 'registry_metadata': 'RegistryMetadata', 'access_key': 'SensitiveAttribute', 'secret_key': 'SensitiveAttribute'} self.attribute_map = {'model_type': 'modelType', 'key': 'key', 'model_version': 'modelVersion', 'parent_ref': 'parentRef', 'name': 'name', 'description': 'description', 'object_status': 'objectStatus', 'object_version': 'objectVersion', 'identifier': 'identifier', 'connection_properties': 'connectionProperties', 'registry_metadata': 'registryMetadata', 'access_key': 'accessKey', 'secret_key': 'secretKey'} self._model_type = None self._key = None self._model_version = None self._parent_ref = None self._name = None self._description = None self._object_status = None self._object_version = None self._identifier = None self._connection_properties = None self._registry_metadata = None self._access_key = None self._secret_key = None self._model_type = 'AMAZON_S3_CONNECTION'
2,299,845,921,030,368,500
Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute of this class is ``AMAZON_S3_CONNECTION`` and it should not be changed. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param model_type: The value to assign to the model_type property of this UpdateConnectionFromAmazonS3. Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION", "BIP_CONNECTION" :type model_type: str :param key: The value to assign to the key property of this UpdateConnectionFromAmazonS3. :type key: str :param model_version: The value to assign to the model_version property of this UpdateConnectionFromAmazonS3. :type model_version: str :param parent_ref: The value to assign to the parent_ref property of this UpdateConnectionFromAmazonS3. :type parent_ref: oci.data_integration.models.ParentReference :param name: The value to assign to the name property of this UpdateConnectionFromAmazonS3. :type name: str :param description: The value to assign to the description property of this UpdateConnectionFromAmazonS3. :type description: str :param object_status: The value to assign to the object_status property of this UpdateConnectionFromAmazonS3. :type object_status: int :param object_version: The value to assign to the object_version property of this UpdateConnectionFromAmazonS3. :type object_version: int :param identifier: The value to assign to the identifier property of this UpdateConnectionFromAmazonS3. :type identifier: str :param connection_properties: The value to assign to the connection_properties property of this UpdateConnectionFromAmazonS3. :type connection_properties: list[oci.data_integration.models.ConnectionProperty] :param registry_metadata: The value to assign to the registry_metadata property of this UpdateConnectionFromAmazonS3. :type registry_metadata: oci.data_integration.models.RegistryMetadata :param access_key: The value to assign to the access_key property of this UpdateConnectionFromAmazonS3. :type access_key: oci.data_integration.models.SensitiveAttribute :param secret_key: The value to assign to the secret_key property of this UpdateConnectionFromAmazonS3. :type secret_key: oci.data_integration.models.SensitiveAttribute
src/oci/data_integration/models/update_connection_from_amazon_s3.py
__init__
pabs3/oci-python-sdk
python
def __init__(self, **kwargs): '\n Initializes a new UpdateConnectionFromAmazonS3 object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.UpdateConnectionFromAmazonS3.model_type` attribute\n of this class is ``AMAZON_S3_CONNECTION`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param model_type:\n The value to assign to the model_type property of this UpdateConnectionFromAmazonS3.\n Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION", "BIP_CONNECTION"\n :type model_type: str\n\n :param key:\n The value to assign to the key property of this UpdateConnectionFromAmazonS3.\n :type key: str\n\n :param model_version:\n The value to assign to the model_version property of this UpdateConnectionFromAmazonS3.\n :type model_version: str\n\n :param parent_ref:\n The value to assign to the parent_ref property of this UpdateConnectionFromAmazonS3.\n :type parent_ref: oci.data_integration.models.ParentReference\n\n :param name:\n The value to assign to the name property of this UpdateConnectionFromAmazonS3.\n :type name: str\n\n :param description:\n The value to assign to the description property of this UpdateConnectionFromAmazonS3.\n :type description: str\n\n :param object_status:\n The value to assign to the object_status property of this UpdateConnectionFromAmazonS3.\n :type object_status: int\n\n :param object_version:\n The value to assign to the object_version property of this UpdateConnectionFromAmazonS3.\n :type object_version: int\n\n :param identifier:\n The value to assign to the identifier property of this UpdateConnectionFromAmazonS3.\n :type identifier: str\n\n :param connection_properties:\n The value to assign to the connection_properties property of this UpdateConnectionFromAmazonS3.\n :type connection_properties: list[oci.data_integration.models.ConnectionProperty]\n\n :param registry_metadata:\n The value to assign to the registry_metadata property of this UpdateConnectionFromAmazonS3.\n :type registry_metadata: oci.data_integration.models.RegistryMetadata\n\n :param access_key:\n The value to assign to the access_key property of this UpdateConnectionFromAmazonS3.\n :type access_key: oci.data_integration.models.SensitiveAttribute\n\n :param secret_key:\n The value to assign to the secret_key property of this UpdateConnectionFromAmazonS3.\n :type secret_key: oci.data_integration.models.SensitiveAttribute\n\n ' self.swagger_types = {'model_type': 'str', 'key': 'str', 'model_version': 'str', 'parent_ref': 'ParentReference', 'name': 'str', 'description': 'str', 'object_status': 'int', 'object_version': 'int', 'identifier': 'str', 'connection_properties': 'list[ConnectionProperty]', 'registry_metadata': 'RegistryMetadata', 'access_key': 'SensitiveAttribute', 'secret_key': 'SensitiveAttribute'} self.attribute_map = {'model_type': 'modelType', 'key': 'key', 'model_version': 'modelVersion', 'parent_ref': 'parentRef', 'name': 'name', 'description': 'description', 'object_status': 'objectStatus', 'object_version': 'objectVersion', 'identifier': 'identifier', 'connection_properties': 'connectionProperties', 'registry_metadata': 'registryMetadata', 'access_key': 'accessKey', 'secret_key': 'secretKey'} self._model_type = None self._key = None self._model_version = None self._parent_ref = None self._name = None self._description = None self._object_status = None self._object_version = None self._identifier = None self._connection_properties = None self._registry_metadata = None self._access_key = None self._secret_key = None self._model_type = 'AMAZON_S3_CONNECTION'
@property def access_key(self): '\n Gets the access_key of this UpdateConnectionFromAmazonS3.\n\n :return: The access_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n ' return self._access_key
-34,331,669,729,738,812
Gets the access_key of this UpdateConnectionFromAmazonS3. :return: The access_key of this UpdateConnectionFromAmazonS3. :rtype: oci.data_integration.models.SensitiveAttribute
src/oci/data_integration/models/update_connection_from_amazon_s3.py
access_key
pabs3/oci-python-sdk
python
@property def access_key(self): '\n Gets the access_key of this UpdateConnectionFromAmazonS3.\n\n :return: The access_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n ' return self._access_key
@access_key.setter def access_key(self, access_key): '\n Sets the access_key of this UpdateConnectionFromAmazonS3.\n\n :param access_key: The access_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n ' self._access_key = access_key
-474,915,086,494,389,300
Sets the access_key of this UpdateConnectionFromAmazonS3. :param access_key: The access_key of this UpdateConnectionFromAmazonS3. :type: oci.data_integration.models.SensitiveAttribute
src/oci/data_integration/models/update_connection_from_amazon_s3.py
access_key
pabs3/oci-python-sdk
python
@access_key.setter def access_key(self, access_key): '\n Sets the access_key of this UpdateConnectionFromAmazonS3.\n\n :param access_key: The access_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n ' self._access_key = access_key
@property def secret_key(self): '\n Gets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :return: The secret_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n ' return self._secret_key
7,734,419,076,322,159,000
Gets the secret_key of this UpdateConnectionFromAmazonS3. :return: The secret_key of this UpdateConnectionFromAmazonS3. :rtype: oci.data_integration.models.SensitiveAttribute
src/oci/data_integration/models/update_connection_from_amazon_s3.py
secret_key
pabs3/oci-python-sdk
python
@property def secret_key(self): '\n Gets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :return: The secret_key of this UpdateConnectionFromAmazonS3.\n :rtype: oci.data_integration.models.SensitiveAttribute\n ' return self._secret_key
@secret_key.setter def secret_key(self, secret_key): '\n Sets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :param secret_key: The secret_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n ' self._secret_key = secret_key
-7,769,865,444,699,896,000
Sets the secret_key of this UpdateConnectionFromAmazonS3. :param secret_key: The secret_key of this UpdateConnectionFromAmazonS3. :type: oci.data_integration.models.SensitiveAttribute
src/oci/data_integration/models/update_connection_from_amazon_s3.py
secret_key
pabs3/oci-python-sdk
python
@secret_key.setter def secret_key(self, secret_key): '\n Sets the secret_key of this UpdateConnectionFromAmazonS3.\n\n :param secret_key: The secret_key of this UpdateConnectionFromAmazonS3.\n :type: oci.data_integration.models.SensitiveAttribute\n ' self._secret_key = secret_key
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None): 'Train the model.' if (args.local_rank in [(- 1), 0]): tb_writer = SummaryWriter() args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu)) print(f'Local Rank = {args.local_rank}') print(len(train_dataset)) train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset)) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if (args.max_steps > 0): t_total = args.max_steps args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1) else: t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) logging.info([n for (n, p) in model.named_parameters() if p.requires_grad]) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.') (model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if (args.n_gpu > 1): model = torch.nn.DataParallel(model) if (args.local_rank != (- 1)): model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) logger.info('***** Running training *****') logger.info(' Num examples = %d', len(train_dataset)) logger.info(' Num Epochs = %d', args.num_train_epochs) logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size) logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1))) logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps) logger.info(' Total optimization steps = %d', t_total) best_score = 0.0 best_checkpoint = None patience = 0 global_step = 0 (tr_loss, logging_loss) = (0.0, 0.0) model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0])) set_seed(args) cur_epoch = 0 for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0])) cur_epoch += 1 for (step, batch) in enumerate(epoch_iterator): batch = tuple((t.to(args.device) for t in batch if (t is not None))) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if (args.model_type != 'distilbert'): inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet']) else None) if (args.model_type == 'xlm'): inputs['langs'] = batch[4] outputs = model(**inputs) loss = outputs[0] if (args.n_gpu > 1): loss = loss.mean() if (args.gradient_accumulation_steps > 1): loss = (loss / args.gradient_accumulation_steps) if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (((step + 1) % args.gradient_accumulation_steps) == 0): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() optimizer.step() model.zero_grad() global_step += 1 if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)): if ((args.local_rank == (- 1)) and args.evaluate_during_training): (results, _) = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode='dev', lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name) for (key, value) in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step) logging_loss = tr_loss if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)): if args.save_only_best_checkpoint: (result, _) = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode='dev', prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name) if (result['f1'] > best_score): logger.info("result['f1']={} > best_score={}".format(result['f1'], best_score)) best_score = result['f1'] output_dir = os.path.join(args.output_dir, 'checkpoint-best') best_checkpoint = output_dir if (not os.path.exists(output_dir)): os.makedirs(output_dir) model_to_save = (model.module if hasattr(model, 'module') else model) if args.do_save_adapters: model_to_save.save_all_adapters(output_dir) if args.do_save_adapter_fusions: model_to_save.save_all_adapter_fusions(output_dir) if args.do_save_full_model: model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info('Saving the best model checkpoint to %s', output_dir) logger.info('Reset patience to 0') patience = 0 else: patience += 1 logger.info('Hit patience={}'.format(patience)) if ((args.eval_patience > 0) and (patience > args.eval_patience)): logger.info('early stop! patience={}'.format(patience)) epoch_iterator.close() train_iterator.close() if (args.local_rank in [(- 1), 0]): tb_writer.close() return (global_step, (tr_loss / global_step)) else: output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if (not os.path.exists(output_dir)): os.makedirs(output_dir) model_to_save = (model.module if hasattr(model, 'module') else model) if args.do_save_adapters: model_to_save.save_all_adapters(output_dir) if args.do_save_adapter_fusions: model_to_save.save_all_adapter_fusions(output_dir) if args.do_save_full_model: model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info('Saving model checkpoint to %s', output_dir) if ((args.max_steps > 0) and (global_step > args.max_steps)): epoch_iterator.close() break if ((args.max_steps > 0) and (global_step > args.max_steps)): train_iterator.close() break if (args.local_rank in [(- 1), 0]): tb_writer.close() return (global_step, (tr_loss / global_step))
8,833,718,236,150,706,000
Train the model.
third_party/ridayesh_run_tag.py
train
rohanshah13/cloud-emea-copy
python
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None): if (args.local_rank in [(- 1), 0]): tb_writer = SummaryWriter() args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu)) print(f'Local Rank = {args.local_rank}') print(len(train_dataset)) train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset)) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if (args.max_steps > 0): t_total = args.max_steps args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1) else: t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) logging.info([n for (n, p) in model.named_parameters() if p.requires_grad]) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.') (model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if (args.n_gpu > 1): model = torch.nn.DataParallel(model) if (args.local_rank != (- 1)): model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) logger.info('***** Running training *****') logger.info(' Num examples = %d', len(train_dataset)) logger.info(' Num Epochs = %d', args.num_train_epochs) logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size) logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1))) logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps) logger.info(' Total optimization steps = %d', t_total) best_score = 0.0 best_checkpoint = None patience = 0 global_step = 0 (tr_loss, logging_loss) = (0.0, 0.0) model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0])) set_seed(args) cur_epoch = 0 for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0])) cur_epoch += 1 for (step, batch) in enumerate(epoch_iterator): batch = tuple((t.to(args.device) for t in batch if (t is not None))) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]} if (args.model_type != 'distilbert'): inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet']) else None) if (args.model_type == 'xlm'): inputs['langs'] = batch[4] outputs = model(**inputs) loss = outputs[0] if (args.n_gpu > 1): loss = loss.mean() if (args.gradient_accumulation_steps > 1): loss = (loss / args.gradient_accumulation_steps) if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (((step + 1) % args.gradient_accumulation_steps) == 0): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() optimizer.step() model.zero_grad() global_step += 1 if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)): if ((args.local_rank == (- 1)) and args.evaluate_during_training): (results, _) = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode='dev', lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name) for (key, value) in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step) logging_loss = tr_loss if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)): if args.save_only_best_checkpoint: (result, _) = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode='dev', prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name) if (result['f1'] > best_score): logger.info("result['f1']={} > best_score={}".format(result['f1'], best_score)) best_score = result['f1'] output_dir = os.path.join(args.output_dir, 'checkpoint-best') best_checkpoint = output_dir if (not os.path.exists(output_dir)): os.makedirs(output_dir) model_to_save = (model.module if hasattr(model, 'module') else model) if args.do_save_adapters: model_to_save.save_all_adapters(output_dir) if args.do_save_adapter_fusions: model_to_save.save_all_adapter_fusions(output_dir) if args.do_save_full_model: model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info('Saving the best model checkpoint to %s', output_dir) logger.info('Reset patience to 0') patience = 0 else: patience += 1 logger.info('Hit patience={}'.format(patience)) if ((args.eval_patience > 0) and (patience > args.eval_patience)): logger.info('early stop! patience={}'.format(patience)) epoch_iterator.close() train_iterator.close() if (args.local_rank in [(- 1), 0]): tb_writer.close() return (global_step, (tr_loss / global_step)) else: output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if (not os.path.exists(output_dir)): os.makedirs(output_dir) model_to_save = (model.module if hasattr(model, 'module') else model) if args.do_save_adapters: model_to_save.save_all_adapters(output_dir) if args.do_save_adapter_fusions: model_to_save.save_all_adapter_fusions(output_dir) if args.do_save_full_model: model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info('Saving model checkpoint to %s', output_dir) if ((args.max_steps > 0) and (global_step > args.max_steps)): epoch_iterator.close() break if ((args.max_steps > 0) and (global_step > args.max_steps)): train_iterator.close() break if (args.local_rank in [(- 1), 0]): tb_writer.close() return (global_step, (tr_loss / global_step))
def _find_all_hints_in_graph_def(session): 'Look at the current default graph and return a list of LiteFuncCall objs.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n a list of `LifeFuncCall` objects in the form\n\n ' func_calls = _collections.defaultdict(_LiteFuncCall) seen_ops = set() for op in session.graph.get_operations(): for operand in _itertools.chain(op.inputs, op.outputs): if (operand in seen_ops): continue seen_ops.add(operand) attr = operand.op.node_def.attr uuid = attr[OpHint.FUNCTION_UUID_ATTR].s if (OpHint.FUNCTION_UUID_ATTR not in attr): continue call_def = func_calls[uuid] call_def.uuid = uuid if (OpHint.FUNCTION_UUID_ATTR in attr): call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s if (OpHint.FUNCTION_INPUT_INDEX_ATTR in attr): call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand if (OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr): call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand for a in attr: if a.startswith('_tflite_attr_'): call_def.params[a.replace('_tflite_attr_,', '')] = attr[a].tensor return func_calls
7,412,164,229,717,128,000
Look at the current default graph and return a list of LiteFuncCall objs. Args: session: A TensorFlow session that contains the graph to convert. Returns: a list of `LifeFuncCall` objects in the form
tensorflow/contrib/lite/python/op_hint.py
_find_all_hints_in_graph_def
188080501/tensorflow
python
def _find_all_hints_in_graph_def(session): 'Look at the current default graph and return a list of LiteFuncCall objs.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n a list of `LifeFuncCall` objects in the form\n\n ' func_calls = _collections.defaultdict(_LiteFuncCall) seen_ops = set() for op in session.graph.get_operations(): for operand in _itertools.chain(op.inputs, op.outputs): if (operand in seen_ops): continue seen_ops.add(operand) attr = operand.op.node_def.attr uuid = attr[OpHint.FUNCTION_UUID_ATTR].s if (OpHint.FUNCTION_UUID_ATTR not in attr): continue call_def = func_calls[uuid] call_def.uuid = uuid if (OpHint.FUNCTION_UUID_ATTR in attr): call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s if (OpHint.FUNCTION_INPUT_INDEX_ATTR in attr): call_def.inputs[attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i] = operand if (OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr): call_def.outputs[attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i] = operand for a in attr: if a.startswith('_tflite_attr_'): call_def.params[a.replace('_tflite_attr_,', )] = attr[a].tensor return func_calls
def _tensor_name_base(full_tensor_name): 'Removes the device assignment code from a tensor.\n\n e.g. _tensor_name_base("foo:3") => "foo"\n\n Args:\n full_tensor_name: A tensor name that is annotated with a device placement\n (this is what tensor flow introspection gives).\n Returns:\n A name without any device assignment.\n ' return full_tensor_name.name.split(':')[0]
-9,004,534,146,274,701,000
Removes the device assignment code from a tensor. e.g. _tensor_name_base("foo:3") => "foo" Args: full_tensor_name: A tensor name that is annotated with a device placement (this is what tensor flow introspection gives). Returns: A name without any device assignment.
tensorflow/contrib/lite/python/op_hint.py
_tensor_name_base
188080501/tensorflow
python
def _tensor_name_base(full_tensor_name): 'Removes the device assignment code from a tensor.\n\n e.g. _tensor_name_base("foo:3") => "foo"\n\n Args:\n full_tensor_name: A tensor name that is annotated with a device placement\n (this is what tensor flow introspection gives).\n Returns:\n A name without any device assignment.\n ' return full_tensor_name.name.split(':')[0]
def convert_op_hints_to_stubs(session): 'Converts a graphdef with LiteOp hints into stub operations.\n\n This is used to prepare for toco conversion of complex intrinsic usages.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n A new graphdef with all ops contained in OpHints being replaced by\n a single op call with the right parameters.\n ' hints = _find_all_hints_in_graph_def(session) current_graph_def = session.graph_def for call in hints.values(): input_names = ([None] * len(call.inputs)) output_names = ([None] * len(call.outputs)) output_dtypes = ([None] * len(call.outputs)) output_quantized = False for (input_index, tensor) in call.inputs.items(): input_names[input_index] = _tensor_name_base(tensor) for (output_index, tensor) in call.outputs.items(): output_names[output_index] = _tensor_name_base(tensor) output_dtypes[output_index] = tensor.dtype.as_datatype_enum current_graph_def = _framework.fuse_op(current_graph_def, input_names, output_names, output_dtypes, output_quantized, call.uuid, call.function_name) for node in current_graph_def.node: if (node.name == call.uuid): for (param, tensor) in call.params.items(): node.attr[param].tensor.CopyFrom(tensor) return current_graph_def
545,267,334,812,460,350
Converts a graphdef with LiteOp hints into stub operations. This is used to prepare for toco conversion of complex intrinsic usages. Args: session: A TensorFlow session that contains the graph to convert. Returns: A new graphdef with all ops contained in OpHints being replaced by a single op call with the right parameters.
tensorflow/contrib/lite/python/op_hint.py
convert_op_hints_to_stubs
188080501/tensorflow
python
def convert_op_hints_to_stubs(session): 'Converts a graphdef with LiteOp hints into stub operations.\n\n This is used to prepare for toco conversion of complex intrinsic usages.\n\n Args:\n session: A TensorFlow session that contains the graph to convert.\n Returns:\n A new graphdef with all ops contained in OpHints being replaced by\n a single op call with the right parameters.\n ' hints = _find_all_hints_in_graph_def(session) current_graph_def = session.graph_def for call in hints.values(): input_names = ([None] * len(call.inputs)) output_names = ([None] * len(call.outputs)) output_dtypes = ([None] * len(call.outputs)) output_quantized = False for (input_index, tensor) in call.inputs.items(): input_names[input_index] = _tensor_name_base(tensor) for (output_index, tensor) in call.outputs.items(): output_names[output_index] = _tensor_name_base(tensor) output_dtypes[output_index] = tensor.dtype.as_datatype_enum current_graph_def = _framework.fuse_op(current_graph_def, input_names, output_names, output_dtypes, output_quantized, call.uuid, call.function_name) for node in current_graph_def.node: if (node.name == call.uuid): for (param, tensor) in call.params.items(): node.attr[param].tensor.CopyFrom(tensor) return current_graph_def
def __init__(self, function_name, **kwargs): 'Create a OpHint.\n\n Args:\n function_name: Name of the function (the custom op name in tflite)\n **kwargs: Keyword arguments of any constant attributes for the function.\n ' self._function_name = function_name self._unique_function_id = _uuid.uuid1().hex self._curr_input_index = 0 self._curr_output_index = 0 self._attrs_to_store_later = kwargs self._stored_attrs = False
2,070,700,012,877,376,300
Create a OpHint. Args: function_name: Name of the function (the custom op name in tflite) **kwargs: Keyword arguments of any constant attributes for the function.
tensorflow/contrib/lite/python/op_hint.py
__init__
188080501/tensorflow
python
def __init__(self, function_name, **kwargs): 'Create a OpHint.\n\n Args:\n function_name: Name of the function (the custom op name in tflite)\n **kwargs: Keyword arguments of any constant attributes for the function.\n ' self._function_name = function_name self._unique_function_id = _uuid.uuid1().hex self._curr_input_index = 0 self._curr_output_index = 0 self._attrs_to_store_later = kwargs self._stored_attrs = False
def add_inputs(self, *args): "Add a sequence of inputs to the function invocation.\n\n Args:\n *args: List of inputs to be converted (should be Tf.Tensor).\n Returns:\n Wrapped inputs (identity standins that have additional metadata). These\n are also are also tf.Tensor's.\n " def augmented_identity(arg): identity_op = _array_ops.identity(arg) identity_op.op._set_attr(OpHint.FUNCTION_NAME_ATTR, _attr_value_pb2.AttrValue(s=self._function_name)) identity_op.op._set_attr(OpHint.FUNCTION_UUID_ATTR, _attr_value_pb2.AttrValue(s=self._unique_function_id)) identity_op.op._set_attr(OpHint.FUNCTION_INPUT_INDEX_ATTR, _attr_value_pb2.AttrValue(i=self._curr_input_index)) self._curr_input_index += 1 return identity_op return [augmented_identity(arg) for arg in args]
-2,426,469,873,050,694,700
Add a sequence of inputs to the function invocation. Args: *args: List of inputs to be converted (should be Tf.Tensor). Returns: Wrapped inputs (identity standins that have additional metadata). These are also are also tf.Tensor's.
tensorflow/contrib/lite/python/op_hint.py
add_inputs
188080501/tensorflow
python
def add_inputs(self, *args): "Add a sequence of inputs to the function invocation.\n\n Args:\n *args: List of inputs to be converted (should be Tf.Tensor).\n Returns:\n Wrapped inputs (identity standins that have additional metadata). These\n are also are also tf.Tensor's.\n " def augmented_identity(arg): identity_op = _array_ops.identity(arg) identity_op.op._set_attr(OpHint.FUNCTION_NAME_ATTR, _attr_value_pb2.AttrValue(s=self._function_name)) identity_op.op._set_attr(OpHint.FUNCTION_UUID_ATTR, _attr_value_pb2.AttrValue(s=self._unique_function_id)) identity_op.op._set_attr(OpHint.FUNCTION_INPUT_INDEX_ATTR, _attr_value_pb2.AttrValue(i=self._curr_input_index)) self._curr_input_index += 1 return identity_op return [augmented_identity(arg) for arg in args]
def add_outputs(self, *args): "Add a sequence of outputs to the function invocation.\n\n Args:\n *args: List of outputs to be converted (should be tf.Tensor).\n Returns:\n Wrapped outputs (identity standins that have additional metadata). These\n are also tf.Tensor's.\n " def augmented_identity(arg): identity_op = _array_ops.identity(arg) identity_op.op._set_attr(OpHint.FUNCTION_NAME_ATTR, _attr_value_pb2.AttrValue(s=self._function_name)) identity_op.op._set_attr(OpHint.FUNCTION_UUID_ATTR, _attr_value_pb2.AttrValue(s=self._unique_function_id)) identity_op.op._set_attr(OpHint.FUNCTION_OUTPUT_INDEX_ATTR, _attr_value_pb2.AttrValue(i=self._curr_output_index)) self._curr_output_index += 1 return identity_op wrapped_outputs = [augmented_identity(arg) for arg in args] if (not self._stored_attrs): for (key, value) in self._attrs_to_store_later.iteritems(): self._setattr(wrapped_outputs[0], ('_tflite_attr_' + key), value) self._stored_attrs = True return wrapped_outputs
-7,205,941,043,342,234,000
Add a sequence of outputs to the function invocation. Args: *args: List of outputs to be converted (should be tf.Tensor). Returns: Wrapped outputs (identity standins that have additional metadata). These are also tf.Tensor's.
tensorflow/contrib/lite/python/op_hint.py
add_outputs
188080501/tensorflow
python
def add_outputs(self, *args): "Add a sequence of outputs to the function invocation.\n\n Args:\n *args: List of outputs to be converted (should be tf.Tensor).\n Returns:\n Wrapped outputs (identity standins that have additional metadata). These\n are also tf.Tensor's.\n " def augmented_identity(arg): identity_op = _array_ops.identity(arg) identity_op.op._set_attr(OpHint.FUNCTION_NAME_ATTR, _attr_value_pb2.AttrValue(s=self._function_name)) identity_op.op._set_attr(OpHint.FUNCTION_UUID_ATTR, _attr_value_pb2.AttrValue(s=self._unique_function_id)) identity_op.op._set_attr(OpHint.FUNCTION_OUTPUT_INDEX_ATTR, _attr_value_pb2.AttrValue(i=self._curr_output_index)) self._curr_output_index += 1 return identity_op wrapped_outputs = [augmented_identity(arg) for arg in args] if (not self._stored_attrs): for (key, value) in self._attrs_to_store_later.iteritems(): self._setattr(wrapped_outputs[0], ('_tflite_attr_' + key), value) self._stored_attrs = True return wrapped_outputs
def extract(infile): '\n Merges bioindex.tsv with the infile (balanced data),\n finds the volsplit.zip location for each bio file and \n extracts the files into secure_volume/holding_folder.\n ' bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t') balanced_bioindex = pd.read_table(infile) for suffix in balanced_bioindex.filesuffix.unique(): volsplit_file = (('volsplit' + str(suffix)) + '.zip') volsplit_df = balanced_bioindex.loc[(balanced_bioindex.filesuffix == suffix), :] try: with zipfile.ZipFile(('/media/secure_volume/' + volsplit_file), 'r') as myzip: for (idx, row) in volsplit_df.iterrows(): filename = (row['mainid'] + '.zip') myzip.extract(filename, '/media/secure_volume/holding_folder') except Exception as e: print('ERROR:', filename, 'not found in', volsplit_file, '!', e)
-1,507,047,250,928,302,000
Merges bioindex.tsv with the infile (balanced data), finds the volsplit.zip location for each bio file and extracts the files into secure_volume/holding_folder.
code/extract_balanced.py
extract
afcarl/biographies
python
def extract(infile): '\n Merges bioindex.tsv with the infile (balanced data),\n finds the volsplit.zip location for each bio file and \n extracts the files into secure_volume/holding_folder.\n ' bioindex = pd.read_csv('/media/secure_volume/index/bioindex.tsv', sep='\t') balanced_bioindex = pd.read_table(infile) for suffix in balanced_bioindex.filesuffix.unique(): volsplit_file = (('volsplit' + str(suffix)) + '.zip') volsplit_df = balanced_bioindex.loc[(balanced_bioindex.filesuffix == suffix), :] try: with zipfile.ZipFile(('/media/secure_volume/' + volsplit_file), 'r') as myzip: for (idx, row) in volsplit_df.iterrows(): filename = (row['mainid'] + '.zip') myzip.extract(filename, '/media/secure_volume/holding_folder') except Exception as e: print('ERROR:', filename, 'not found in', volsplit_file, '!', e)
@task @with_validation def generate(directory=None): '\n Generate configuration files.\n ' for conffiles in iter_conffiles(directory): status("Generating templates for '{environment}' and '{role}'", environment=conffiles.environment, role=conffiles.role) conffiles.generate()
-2,122,800,150,191,893,500
Generate configuration files.
confab/generate.py
generate
locationlabs/confab
python
@task @with_validation def generate(directory=None): '\n \n ' for conffiles in iter_conffiles(directory): status("Generating templates for '{environment}' and '{role}'", environment=conffiles.environment, role=conffiles.role) conffiles.generate()
def test_overdue_habit(datasett): "\n please note the 'double tt' for datasett. This stands to differentiate\n the functional test data from the data used for unit tests.\n habit 1 is the overdue habit since its added first in the func/conftest\n module.\n :param datasett: from func/conftest\n :return:\n " session = datasett complete(1, session) result = session.query(HabitHistory.broken_count).filter((HabitHistory.habitid == 1)).all() assert (result == [(1,)])
1,522,588,135,354,832,000
please note the 'double tt' for datasett. This stands to differentiate the functional test data from the data used for unit tests. habit 1 is the overdue habit since its added first in the func/conftest module. :param datasett: from func/conftest :return:
tests/func/test_complete_habit.py
test_overdue_habit
takavarasha-desire/habittracker1_1
python
def test_overdue_habit(datasett): "\n please note the 'double tt' for datasett. This stands to differentiate\n the functional test data from the data used for unit tests.\n habit 1 is the overdue habit since its added first in the func/conftest\n module.\n :param datasett: from func/conftest\n :return:\n " session = datasett complete(1, session) result = session.query(HabitHistory.broken_count).filter((HabitHistory.habitid == 1)).all() assert (result == [(1,)])
def test_a_habit_due_for_completion(datasett): '\n habit 2 is the due habit since its added second in the func/conftest\n module.\n :param datasett: from func/conftest\n :return:\n ' session = datasett complete(2, session) result = session.query(HabitHistory.streak).filter((HabitHistory.habitid == 2)).all() assert (result == [(1,)])
3,921,509,153,605,490,000
habit 2 is the due habit since its added second in the func/conftest module. :param datasett: from func/conftest :return:
tests/func/test_complete_habit.py
test_a_habit_due_for_completion
takavarasha-desire/habittracker1_1
python
def test_a_habit_due_for_completion(datasett): '\n habit 2 is the due habit since its added second in the func/conftest\n module.\n :param datasett: from func/conftest\n :return:\n ' session = datasett complete(2, session) result = session.query(HabitHistory.streak).filter((HabitHistory.habitid == 2)).all() assert (result == [(1,)])
def __init__(self, *, hass, logger, domain, platform_name, platform, scan_interval, entity_namespace, async_entities_added_callback): 'Initialize the entity platform.\n\n hass: HomeAssistant\n logger: Logger\n domain: str\n platform_name: str\n scan_interval: timedelta\n entity_namespace: str\n async_entities_added_callback: @callback method\n ' self.hass = hass self.logger = logger self.domain = domain self.platform_name = platform_name self.platform = platform self.scan_interval = scan_interval self.entity_namespace = entity_namespace self.async_entities_added_callback = async_entities_added_callback self.config_entry = None self.entities = {} self._tasks = [] self._async_unsub_polling = None self._async_cancel_retry_setup = None self._process_updates = asyncio.Lock() if (platform is None): self.parallel_updates = None self.parallel_updates_semaphore = None return self.parallel_updates = getattr(platform, 'PARALLEL_UPDATES', None) self.parallel_updates_semaphore = None
-3,546,419,058,523,400,000
Initialize the entity platform. hass: HomeAssistant logger: Logger domain: str platform_name: str scan_interval: timedelta entity_namespace: str async_entities_added_callback: @callback method
homeassistant/helpers/entity_platform.py
__init__
crazyfish1111/home-assistant
python
def __init__(self, *, hass, logger, domain, platform_name, platform, scan_interval, entity_namespace, async_entities_added_callback): 'Initialize the entity platform.\n\n hass: HomeAssistant\n logger: Logger\n domain: str\n platform_name: str\n scan_interval: timedelta\n entity_namespace: str\n async_entities_added_callback: @callback method\n ' self.hass = hass self.logger = logger self.domain = domain self.platform_name = platform_name self.platform = platform self.scan_interval = scan_interval self.entity_namespace = entity_namespace self.async_entities_added_callback = async_entities_added_callback self.config_entry = None self.entities = {} self._tasks = [] self._async_unsub_polling = None self._async_cancel_retry_setup = None self._process_updates = asyncio.Lock() if (platform is None): self.parallel_updates = None self.parallel_updates_semaphore = None return self.parallel_updates = getattr(platform, 'PARALLEL_UPDATES', None) self.parallel_updates_semaphore = None
def _get_parallel_updates_semaphore(self): 'Get or create a semaphore for parallel updates.' if (self.parallel_updates_semaphore is None): self.parallel_updates_semaphore = asyncio.Semaphore((self.parallel_updates if self.parallel_updates else 1), loop=self.hass.loop) return self.parallel_updates_semaphore
2,508,172,302,676,324,400
Get or create a semaphore for parallel updates.
homeassistant/helpers/entity_platform.py
_get_parallel_updates_semaphore
crazyfish1111/home-assistant
python
def _get_parallel_updates_semaphore(self): if (self.parallel_updates_semaphore is None): self.parallel_updates_semaphore = asyncio.Semaphore((self.parallel_updates if self.parallel_updates else 1), loop=self.hass.loop) return self.parallel_updates_semaphore
async def async_setup(self, platform_config, discovery_info=None): 'Set up the platform from a config file.' platform = self.platform hass = self.hass @callback def async_create_setup_task(): 'Get task to set up platform.' if getattr(platform, 'async_setup_platform', None): return platform.async_setup_platform(hass, platform_config, self._async_schedule_add_entities, discovery_info) return hass.loop.run_in_executor(None, platform.setup_platform, hass, platform_config, self._schedule_add_entities, discovery_info) (await self._async_setup_platform(async_create_setup_task))
6,370,612,533,691,341,000
Set up the platform from a config file.
homeassistant/helpers/entity_platform.py
async_setup
crazyfish1111/home-assistant
python
async def async_setup(self, platform_config, discovery_info=None): platform = self.platform hass = self.hass @callback def async_create_setup_task(): 'Get task to set up platform.' if getattr(platform, 'async_setup_platform', None): return platform.async_setup_platform(hass, platform_config, self._async_schedule_add_entities, discovery_info) return hass.loop.run_in_executor(None, platform.setup_platform, hass, platform_config, self._schedule_add_entities, discovery_info) (await self._async_setup_platform(async_create_setup_task))
async def async_setup_entry(self, config_entry): 'Set up the platform from a config entry.' self.config_entry = config_entry platform = self.platform @callback def async_create_setup_task(): 'Get task to set up platform.' return platform.async_setup_entry(self.hass, config_entry, self._async_schedule_add_entities) return (await self._async_setup_platform(async_create_setup_task))
75,007,586,245,701,980
Set up the platform from a config entry.
homeassistant/helpers/entity_platform.py
async_setup_entry
crazyfish1111/home-assistant
python
async def async_setup_entry(self, config_entry): self.config_entry = config_entry platform = self.platform @callback def async_create_setup_task(): 'Get task to set up platform.' return platform.async_setup_entry(self.hass, config_entry, self._async_schedule_add_entities) return (await self._async_setup_platform(async_create_setup_task))
async def _async_setup_platform(self, async_create_setup_task, tries=0): 'Set up a platform via config file or config entry.\n\n async_create_setup_task creates a coroutine that sets up platform.\n ' logger = self.logger hass = self.hass full_name = '{}.{}'.format(self.domain, self.platform_name) logger.info('Setting up %s', full_name) warn_task = hass.loop.call_later(SLOW_SETUP_WARNING, logger.warning, 'Setup of platform %s is taking over %s seconds.', self.platform_name, SLOW_SETUP_WARNING) try: task = async_create_setup_task() (await asyncio.wait_for(asyncio.shield(task), SLOW_SETUP_MAX_WAIT)) if self._tasks: pending = [task for task in self._tasks if (not task.done())] self._tasks.clear() if pending: (await asyncio.wait(pending)) hass.config.components.add(full_name) return True except PlatformNotReady: tries += 1 wait_time = (min(tries, 6) * 30) logger.warning('Platform %s not ready yet. Retrying in %d seconds.', self.platform_name, wait_time) async def setup_again(now): 'Run setup again.' self._async_cancel_retry_setup = None (await self._async_setup_platform(async_create_setup_task, tries)) self._async_cancel_retry_setup = async_call_later(hass, wait_time, setup_again) return False except asyncio.TimeoutError: logger.error('Setup of platform %s is taking longer than %s seconds. Startup will proceed without waiting any longer.', self.platform_name, SLOW_SETUP_MAX_WAIT) return False except Exception: logger.exception('Error while setting up platform %s', self.platform_name) return False finally: warn_task.cancel()
-8,883,834,158,884,943,000
Set up a platform via config file or config entry. async_create_setup_task creates a coroutine that sets up platform.
homeassistant/helpers/entity_platform.py
_async_setup_platform
crazyfish1111/home-assistant
python
async def _async_setup_platform(self, async_create_setup_task, tries=0): 'Set up a platform via config file or config entry.\n\n async_create_setup_task creates a coroutine that sets up platform.\n ' logger = self.logger hass = self.hass full_name = '{}.{}'.format(self.domain, self.platform_name) logger.info('Setting up %s', full_name) warn_task = hass.loop.call_later(SLOW_SETUP_WARNING, logger.warning, 'Setup of platform %s is taking over %s seconds.', self.platform_name, SLOW_SETUP_WARNING) try: task = async_create_setup_task() (await asyncio.wait_for(asyncio.shield(task), SLOW_SETUP_MAX_WAIT)) if self._tasks: pending = [task for task in self._tasks if (not task.done())] self._tasks.clear() if pending: (await asyncio.wait(pending)) hass.config.components.add(full_name) return True except PlatformNotReady: tries += 1 wait_time = (min(tries, 6) * 30) logger.warning('Platform %s not ready yet. Retrying in %d seconds.', self.platform_name, wait_time) async def setup_again(now): 'Run setup again.' self._async_cancel_retry_setup = None (await self._async_setup_platform(async_create_setup_task, tries)) self._async_cancel_retry_setup = async_call_later(hass, wait_time, setup_again) return False except asyncio.TimeoutError: logger.error('Setup of platform %s is taking longer than %s seconds. Startup will proceed without waiting any longer.', self.platform_name, SLOW_SETUP_MAX_WAIT) return False except Exception: logger.exception('Error while setting up platform %s', self.platform_name) return False finally: warn_task.cancel()
def _schedule_add_entities(self, new_entities, update_before_add=False): 'Schedule adding entities for a single platform, synchronously.' run_callback_threadsafe(self.hass.loop, self._async_schedule_add_entities, list(new_entities), update_before_add).result()
7,908,124,374,192,280,000
Schedule adding entities for a single platform, synchronously.
homeassistant/helpers/entity_platform.py
_schedule_add_entities
crazyfish1111/home-assistant
python
def _schedule_add_entities(self, new_entities, update_before_add=False): run_callback_threadsafe(self.hass.loop, self._async_schedule_add_entities, list(new_entities), update_before_add).result()
@callback def _async_schedule_add_entities(self, new_entities, update_before_add=False): 'Schedule adding entities for a single platform async.' self._tasks.append(self.hass.async_add_job(self.async_add_entities(new_entities, update_before_add=update_before_add)))
6,827,352,441,585,063,000
Schedule adding entities for a single platform async.
homeassistant/helpers/entity_platform.py
_async_schedule_add_entities
crazyfish1111/home-assistant
python
@callback def _async_schedule_add_entities(self, new_entities, update_before_add=False): self._tasks.append(self.hass.async_add_job(self.async_add_entities(new_entities, update_before_add=update_before_add)))
def add_entities(self, new_entities, update_before_add=False): 'Add entities for a single platform.' if update_before_add: self.logger.warning("Call 'add_entities' with update_before_add=True only inside tests or you can run into a deadlock!") run_coroutine_threadsafe(self.async_add_entities(list(new_entities), update_before_add), self.hass.loop).result()
-443,141,501,391,420,860
Add entities for a single platform.
homeassistant/helpers/entity_platform.py
add_entities
crazyfish1111/home-assistant
python
def add_entities(self, new_entities, update_before_add=False): if update_before_add: self.logger.warning("Call 'add_entities' with update_before_add=True only inside tests or you can run into a deadlock!") run_coroutine_threadsafe(self.async_add_entities(list(new_entities), update_before_add), self.hass.loop).result()
async def async_add_entities(self, new_entities, update_before_add=False): 'Add entities for a single platform async.\n\n This method must be run in the event loop.\n ' if (not new_entities): return hass = self.hass device_registry = (await hass.helpers.device_registry.async_get_registry()) entity_registry = (await hass.helpers.entity_registry.async_get_registry()) tasks = [self._async_add_entity(entity, update_before_add, entity_registry, device_registry) for entity in new_entities] if (not tasks): return (await asyncio.wait(tasks)) self.async_entities_added_callback() if ((self._async_unsub_polling is not None) or (not any((entity.should_poll for entity in self.entities.values())))): return self._async_unsub_polling = async_track_time_interval(self.hass, self._update_entity_states, self.scan_interval)
-4,472,886,937,978,459,600
Add entities for a single platform async. This method must be run in the event loop.
homeassistant/helpers/entity_platform.py
async_add_entities
crazyfish1111/home-assistant
python
async def async_add_entities(self, new_entities, update_before_add=False): 'Add entities for a single platform async.\n\n This method must be run in the event loop.\n ' if (not new_entities): return hass = self.hass device_registry = (await hass.helpers.device_registry.async_get_registry()) entity_registry = (await hass.helpers.entity_registry.async_get_registry()) tasks = [self._async_add_entity(entity, update_before_add, entity_registry, device_registry) for entity in new_entities] if (not tasks): return (await asyncio.wait(tasks)) self.async_entities_added_callback() if ((self._async_unsub_polling is not None) or (not any((entity.should_poll for entity in self.entities.values())))): return self._async_unsub_polling = async_track_time_interval(self.hass, self._update_entity_states, self.scan_interval)
async def _async_add_entity(self, entity, update_before_add, entity_registry, device_registry): 'Add an entity to the platform.' if (entity is None): raise ValueError('Entity cannot be None') entity.hass = self.hass entity.platform = self if (hasattr(entity, 'async_update') and (not self.parallel_updates)): entity.parallel_updates = None elif ((not hasattr(entity, 'async_update')) and (self.parallel_updates == 0)): entity.parallel_updates = None else: entity.parallel_updates = self._get_parallel_updates_semaphore() if update_before_add: try: (await entity.async_device_update(warning=False)) except Exception: self.logger.exception('%s: Error on device update!', self.platform_name) return suggested_object_id = None if (entity.unique_id is not None): if (entity.entity_id is not None): suggested_object_id = split_entity_id(entity.entity_id)[1] else: suggested_object_id = entity.name if (self.entity_namespace is not None): suggested_object_id = '{} {}'.format(self.entity_namespace, suggested_object_id) if (self.config_entry is not None): config_entry_id = self.config_entry.entry_id else: config_entry_id = None device_info = entity.device_info device_id = None if ((config_entry_id is not None) and (device_info is not None)): processed_dev_info = {'config_entry_id': config_entry_id} for key in ('connections', 'identifiers', 'manufacturer', 'model', 'name', 'sw_version', 'via_hub'): if (key in device_info): processed_dev_info[key] = device_info[key] device = device_registry.async_get_or_create(**processed_dev_info) if device: device_id = device.id entry = entity_registry.async_get_or_create(self.domain, self.platform_name, entity.unique_id, suggested_object_id=suggested_object_id, config_entry_id=config_entry_id, device_id=device_id, known_object_ids=self.entities.keys()) if entry.disabled: self.logger.info("Not adding entity %s because it's disabled", (entry.name or entity.name or '"{} {}"'.format(self.platform_name, entity.unique_id))) return entity.entity_id = entry.entity_id entity.registry_name = entry.name entity.async_on_remove(entry.add_update_listener(entity)) elif ((entity.entity_id is not None) and entity_registry.async_is_registered(entity.entity_id)): suggested_object_id = split_entity_id(entity.entity_id)[1] entity.entity_id = None if (entity.entity_id is None): suggested_object_id = (suggested_object_id or entity.name or DEVICE_DEFAULT_NAME) if (self.entity_namespace is not None): suggested_object_id = '{} {}'.format(self.entity_namespace, suggested_object_id) entity.entity_id = entity_registry.async_generate_entity_id(self.domain, suggested_object_id, self.entities.keys()) if (not valid_entity_id(entity.entity_id)): raise HomeAssistantError('Invalid entity id: {}'.format(entity.entity_id)) if ((entity.entity_id in self.entities) or (entity.entity_id in self.hass.states.async_entity_ids(self.domain))): msg = 'Entity id already exists: {}'.format(entity.entity_id) if (entity.unique_id is not None): msg += '. Platform {} does not generate unique IDs'.format(self.platform_name) raise HomeAssistantError(msg) entity_id = entity.entity_id self.entities[entity_id] = entity entity.async_on_remove((lambda : self.entities.pop(entity_id))) (await entity.async_added_to_hass()) (await entity.async_update_ha_state())
530,176,300,249,078,340
Add an entity to the platform.
homeassistant/helpers/entity_platform.py
_async_add_entity
crazyfish1111/home-assistant
python
async def _async_add_entity(self, entity, update_before_add, entity_registry, device_registry): if (entity is None): raise ValueError('Entity cannot be None') entity.hass = self.hass entity.platform = self if (hasattr(entity, 'async_update') and (not self.parallel_updates)): entity.parallel_updates = None elif ((not hasattr(entity, 'async_update')) and (self.parallel_updates == 0)): entity.parallel_updates = None else: entity.parallel_updates = self._get_parallel_updates_semaphore() if update_before_add: try: (await entity.async_device_update(warning=False)) except Exception: self.logger.exception('%s: Error on device update!', self.platform_name) return suggested_object_id = None if (entity.unique_id is not None): if (entity.entity_id is not None): suggested_object_id = split_entity_id(entity.entity_id)[1] else: suggested_object_id = entity.name if (self.entity_namespace is not None): suggested_object_id = '{} {}'.format(self.entity_namespace, suggested_object_id) if (self.config_entry is not None): config_entry_id = self.config_entry.entry_id else: config_entry_id = None device_info = entity.device_info device_id = None if ((config_entry_id is not None) and (device_info is not None)): processed_dev_info = {'config_entry_id': config_entry_id} for key in ('connections', 'identifiers', 'manufacturer', 'model', 'name', 'sw_version', 'via_hub'): if (key in device_info): processed_dev_info[key] = device_info[key] device = device_registry.async_get_or_create(**processed_dev_info) if device: device_id = device.id entry = entity_registry.async_get_or_create(self.domain, self.platform_name, entity.unique_id, suggested_object_id=suggested_object_id, config_entry_id=config_entry_id, device_id=device_id, known_object_ids=self.entities.keys()) if entry.disabled: self.logger.info("Not adding entity %s because it's disabled", (entry.name or entity.name or '"{} {}"'.format(self.platform_name, entity.unique_id))) return entity.entity_id = entry.entity_id entity.registry_name = entry.name entity.async_on_remove(entry.add_update_listener(entity)) elif ((entity.entity_id is not None) and entity_registry.async_is_registered(entity.entity_id)): suggested_object_id = split_entity_id(entity.entity_id)[1] entity.entity_id = None if (entity.entity_id is None): suggested_object_id = (suggested_object_id or entity.name or DEVICE_DEFAULT_NAME) if (self.entity_namespace is not None): suggested_object_id = '{} {}'.format(self.entity_namespace, suggested_object_id) entity.entity_id = entity_registry.async_generate_entity_id(self.domain, suggested_object_id, self.entities.keys()) if (not valid_entity_id(entity.entity_id)): raise HomeAssistantError('Invalid entity id: {}'.format(entity.entity_id)) if ((entity.entity_id in self.entities) or (entity.entity_id in self.hass.states.async_entity_ids(self.domain))): msg = 'Entity id already exists: {}'.format(entity.entity_id) if (entity.unique_id is not None): msg += '. Platform {} does not generate unique IDs'.format(self.platform_name) raise HomeAssistantError(msg) entity_id = entity.entity_id self.entities[entity_id] = entity entity.async_on_remove((lambda : self.entities.pop(entity_id))) (await entity.async_added_to_hass()) (await entity.async_update_ha_state())
async def async_reset(self): 'Remove all entities and reset data.\n\n This method must be run in the event loop.\n ' if (self._async_cancel_retry_setup is not None): self._async_cancel_retry_setup() self._async_cancel_retry_setup = None if (not self.entities): return tasks = [self.async_remove_entity(entity_id) for entity_id in self.entities] (await asyncio.wait(tasks)) if (self._async_unsub_polling is not None): self._async_unsub_polling() self._async_unsub_polling = None
-510,075,945,936,083,100
Remove all entities and reset data. This method must be run in the event loop.
homeassistant/helpers/entity_platform.py
async_reset
crazyfish1111/home-assistant
python
async def async_reset(self): 'Remove all entities and reset data.\n\n This method must be run in the event loop.\n ' if (self._async_cancel_retry_setup is not None): self._async_cancel_retry_setup() self._async_cancel_retry_setup = None if (not self.entities): return tasks = [self.async_remove_entity(entity_id) for entity_id in self.entities] (await asyncio.wait(tasks)) if (self._async_unsub_polling is not None): self._async_unsub_polling() self._async_unsub_polling = None
async def async_remove_entity(self, entity_id): 'Remove entity id from platform.' (await self.entities[entity_id].async_remove()) if ((self._async_unsub_polling is not None) and (not any((entity.should_poll for entity in self.entities.values())))): self._async_unsub_polling() self._async_unsub_polling = None
-7,593,386,608,796,709,000
Remove entity id from platform.
homeassistant/helpers/entity_platform.py
async_remove_entity
crazyfish1111/home-assistant
python
async def async_remove_entity(self, entity_id): (await self.entities[entity_id].async_remove()) if ((self._async_unsub_polling is not None) and (not any((entity.should_poll for entity in self.entities.values())))): self._async_unsub_polling() self._async_unsub_polling = None
async def _update_entity_states(self, now): 'Update the states of all the polling entities.\n\n To protect from flooding the executor, we will update async entities\n in parallel and other entities sequential.\n\n This method must be run in the event loop.\n ' if self._process_updates.locked(): self.logger.warning('Updating %s %s took longer than the scheduled update interval %s', self.platform_name, self.domain, self.scan_interval) return async with self._process_updates: tasks = [] for entity in self.entities.values(): if (not entity.should_poll): continue tasks.append(entity.async_update_ha_state(True)) if tasks: (await asyncio.wait(tasks))
7,350,641,399,040,290,000
Update the states of all the polling entities. To protect from flooding the executor, we will update async entities in parallel and other entities sequential. This method must be run in the event loop.
homeassistant/helpers/entity_platform.py
_update_entity_states
crazyfish1111/home-assistant
python
async def _update_entity_states(self, now): 'Update the states of all the polling entities.\n\n To protect from flooding the executor, we will update async entities\n in parallel and other entities sequential.\n\n This method must be run in the event loop.\n ' if self._process_updates.locked(): self.logger.warning('Updating %s %s took longer than the scheduled update interval %s', self.platform_name, self.domain, self.scan_interval) return async with self._process_updates: tasks = [] for entity in self.entities.values(): if (not entity.should_poll): continue tasks.append(entity.async_update_ha_state(True)) if tasks: (await asyncio.wait(tasks))
@callback def async_create_setup_task(): 'Get task to set up platform.' if getattr(platform, 'async_setup_platform', None): return platform.async_setup_platform(hass, platform_config, self._async_schedule_add_entities, discovery_info) return hass.loop.run_in_executor(None, platform.setup_platform, hass, platform_config, self._schedule_add_entities, discovery_info)
9,092,128,761,817,666,000
Get task to set up platform.
homeassistant/helpers/entity_platform.py
async_create_setup_task
crazyfish1111/home-assistant
python
@callback def async_create_setup_task(): if getattr(platform, 'async_setup_platform', None): return platform.async_setup_platform(hass, platform_config, self._async_schedule_add_entities, discovery_info) return hass.loop.run_in_executor(None, platform.setup_platform, hass, platform_config, self._schedule_add_entities, discovery_info)
@callback def async_create_setup_task(): 'Get task to set up platform.' return platform.async_setup_entry(self.hass, config_entry, self._async_schedule_add_entities)
-284,641,014,274,873,100
Get task to set up platform.
homeassistant/helpers/entity_platform.py
async_create_setup_task
crazyfish1111/home-assistant
python
@callback def async_create_setup_task(): return platform.async_setup_entry(self.hass, config_entry, self._async_schedule_add_entities)
async def setup_again(now): 'Run setup again.' self._async_cancel_retry_setup = None (await self._async_setup_platform(async_create_setup_task, tries))
-514,513,532,165,713,860
Run setup again.
homeassistant/helpers/entity_platform.py
setup_again
crazyfish1111/home-assistant
python
async def setup_again(now): self._async_cancel_retry_setup = None (await self._async_setup_platform(async_create_setup_task, tries))
@with_cupy_rmm def fit(self, X): '\n Fit a multi-node multi-GPU KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Training data to cluster.\n\n ' data = DistributedDataHandler.create(X, client=self.client) self.datatype = data.datatype comms = CommsContext(comms_p2p=False) comms.init(workers=data.workers) kmeans_fit = [self.client.submit(KMeans._func_fit, comms.sessionId, wf[1], self.datatype, **self.kwargs, workers=[wf[0]], pure=False) for (idx, wf) in enumerate(data.worker_to_parts.items())] wait(kmeans_fit) raise_exception_from_futures(kmeans_fit) comms.destroy() self.local_model = kmeans_fit[0].result() self.cluster_centers_ = self.local_model.cluster_centers_ return self
7,721,958,996,140,420,000
Fit a multi-node multi-GPU KMeans model Parameters ---------- X : Dask cuDF DataFrame or CuPy backed Dask Array Training data to cluster.
python/cuml/dask/cluster/kmeans.py
fit
Chetank99/cuml
python
@with_cupy_rmm def fit(self, X): '\n Fit a multi-node multi-GPU KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Training data to cluster.\n\n ' data = DistributedDataHandler.create(X, client=self.client) self.datatype = data.datatype comms = CommsContext(comms_p2p=False) comms.init(workers=data.workers) kmeans_fit = [self.client.submit(KMeans._func_fit, comms.sessionId, wf[1], self.datatype, **self.kwargs, workers=[wf[0]], pure=False) for (idx, wf) in enumerate(data.worker_to_parts.items())] wait(kmeans_fit) raise_exception_from_futures(kmeans_fit) comms.destroy() self.local_model = kmeans_fit[0].result() self.cluster_centers_ = self.local_model.cluster_centers_ return self
def fit_predict(self, X, delayed=True): '\n Compute cluster centers and predict cluster index for each sample.\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing predictions\n\n ' return self.fit(X).predict(X, delayed=delayed)
6,022,462,453,244,419,000
Compute cluster centers and predict cluster index for each sample. Parameters ---------- X : Dask cuDF DataFrame or CuPy backed Dask Array Data to predict Returns ------- result: Dask cuDF DataFrame or CuPy backed Dask Array Distributed object containing predictions
python/cuml/dask/cluster/kmeans.py
fit_predict
Chetank99/cuml
python
def fit_predict(self, X, delayed=True): '\n Compute cluster centers and predict cluster index for each sample.\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing predictions\n\n ' return self.fit(X).predict(X, delayed=delayed)
def predict(self, X, delayed=True): '\n Predict labels for the input\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to do a lazy prediction (and return Delayed objects) or an\n eagerly executed one.\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing predictions\n ' return self._predict(X, delayed=delayed)
-6,130,491,462,909,309,000
Predict labels for the input Parameters ---------- X : Dask cuDF DataFrame or CuPy backed Dask Array Data to predict delayed : bool (default = True) Whether to do a lazy prediction (and return Delayed objects) or an eagerly executed one. Returns ------- result: Dask cuDF DataFrame or CuPy backed Dask Array Distributed object containing predictions
python/cuml/dask/cluster/kmeans.py
predict
Chetank99/cuml
python
def predict(self, X, delayed=True): '\n Predict labels for the input\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to do a lazy prediction (and return Delayed objects) or an\n eagerly executed one.\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing predictions\n ' return self._predict(X, delayed=delayed)
def fit_transform(self, X, delayed=True): '\n Calls fit followed by transform using a distributed KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to execute as a delayed task or eager.\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing the transformed data\n ' return self.fit(X).transform(X, delayed=delayed)
2,970,504,870,052,390,000
Calls fit followed by transform using a distributed KMeans model Parameters ---------- X : Dask cuDF DataFrame or CuPy backed Dask Array Data to predict delayed : bool (default = True) Whether to execute as a delayed task or eager. Returns ------- result: Dask cuDF DataFrame or CuPy backed Dask Array Distributed object containing the transformed data
python/cuml/dask/cluster/kmeans.py
fit_transform
Chetank99/cuml
python
def fit_transform(self, X, delayed=True): '\n Calls fit followed by transform using a distributed KMeans model\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to execute as a delayed task or eager.\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing the transformed data\n ' return self.fit(X).transform(X, delayed=delayed)
def transform(self, X, delayed=True): '\n Transforms the input into the learned centroid space\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to execute as a delayed task or eager.\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing the transformed data\n ' return self._transform(X, n_dims=2, delayed=delayed)
-7,165,475,942,176,801,000
Transforms the input into the learned centroid space Parameters ---------- X : Dask cuDF DataFrame or CuPy backed Dask Array Data to predict delayed : bool (default = True) Whether to execute as a delayed task or eager. Returns ------- result: Dask cuDF DataFrame or CuPy backed Dask Array Distributed object containing the transformed data
python/cuml/dask/cluster/kmeans.py
transform
Chetank99/cuml
python
def transform(self, X, delayed=True): '\n Transforms the input into the learned centroid space\n\n Parameters\n ----------\n X : Dask cuDF DataFrame or CuPy backed Dask Array\n Data to predict\n\n delayed : bool (default = True)\n Whether to execute as a delayed task or eager.\n\n Returns\n -------\n result: Dask cuDF DataFrame or CuPy backed Dask Array\n Distributed object containing the transformed data\n ' return self._transform(X, n_dims=2, delayed=delayed)
@with_cupy_rmm def score(self, X): '\n Computes the inertia score for the trained KMeans centroids.\n\n Parameters\n ----------\n X : dask_cudf.Dataframe\n Dataframe to compute score\n\n Returns\n -------\n\n Inertial score\n ' scores = self._run_parallel_func(KMeans._score, X, n_dims=1, delayed=False, output_futures=True) return ((- 1) * cp.sum((cp.asarray(self.client.compute(scores, sync=True)) * (- 1.0))))
5,906,948,693,175,010,000
Computes the inertia score for the trained KMeans centroids. Parameters ---------- X : dask_cudf.Dataframe Dataframe to compute score Returns ------- Inertial score
python/cuml/dask/cluster/kmeans.py
score
Chetank99/cuml
python
@with_cupy_rmm def score(self, X): '\n Computes the inertia score for the trained KMeans centroids.\n\n Parameters\n ----------\n X : dask_cudf.Dataframe\n Dataframe to compute score\n\n Returns\n -------\n\n Inertial score\n ' scores = self._run_parallel_func(KMeans._score, X, n_dims=1, delayed=False, output_futures=True) return ((- 1) * cp.sum((cp.asarray(self.client.compute(scores, sync=True)) * (- 1.0))))
def parse_rec(filename): ' Parse a PASCAL VOC xml file ' tree = ET.parse(filename) objects = [] for obj in tree.findall('object'): obj_struct = {} obj_struct['name'] = obj.find('name').text obj_struct['pose'] = obj.find('pose').text obj_struct['truncated'] = int(obj.find('truncated').text) obj_struct['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_struct['bbox'] = [(int(bbox.find('xmin').text) - 1), (int(bbox.find('ymin').text) - 1), (int(bbox.find('xmax').text) - 1), (int(bbox.find('ymax').text) - 1)] objects.append(obj_struct) return objects
-1,181,628,649,275,111,700
Parse a PASCAL VOC xml file
eval.py
parse_rec
FLyingLSJ/ssd.pytorch
python
def parse_rec(filename): ' ' tree = ET.parse(filename) objects = [] for obj in tree.findall('object'): obj_struct = {} obj_struct['name'] = obj.find('name').text obj_struct['pose'] = obj.find('pose').text obj_struct['truncated'] = int(obj.find('truncated').text) obj_struct['difficult'] = int(obj.find('difficult').text) bbox = obj.find('bndbox') obj_struct['bbox'] = [(int(bbox.find('xmin').text) - 1), (int(bbox.find('ymin').text) - 1), (int(bbox.find('xmax').text) - 1), (int(bbox.find('ymax').text) - 1)] objects.append(obj_struct) return objects
def get_output_dir(name, phase): 'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n ' filedir = os.path.join(name, phase) if (not os.path.exists(filedir)): os.makedirs(filedir) return filedir
-4,561,549,611,072,020,500
Return the directory where experimental artifacts are placed. If the directory does not exist, it is created. A canonical path is built using the name from an imdb and a network (if not None).
eval.py
get_output_dir
FLyingLSJ/ssd.pytorch
python
def get_output_dir(name, phase): 'Return the directory where experimental artifacts are placed.\n If the directory does not exist, it is created.\n A canonical path is built using the name from an imdb and a network\n (if not None).\n ' filedir = os.path.join(name, phase) if (not os.path.exists(filedir)): os.makedirs(filedir) return filedir
def voc_ap(rec, prec, use_07_metric=True): ' ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:True).\n ' if use_07_metric: ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if (np.sum((rec >= t)) == 0): p = 0 else: p = np.max(prec[(rec >= t)]) ap = (ap + (p / 11.0)) else: mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) for i in range((mpre.size - 1), 0, (- 1)): mpre[(i - 1)] = np.maximum(mpre[(i - 1)], mpre[i]) i = np.where((mrec[1:] != mrec[:(- 1)]))[0] ap = np.sum(((mrec[(i + 1)] - mrec[i]) * mpre[(i + 1)])) return ap
-5,061,982,948,125,241,000
ap = voc_ap(rec, prec, [use_07_metric]) Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11 point method (default:True).
eval.py
voc_ap
FLyingLSJ/ssd.pytorch
python
def voc_ap(rec, prec, use_07_metric=True): ' ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:True).\n ' if use_07_metric: ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if (np.sum((rec >= t)) == 0): p = 0 else: p = np.max(prec[(rec >= t)]) ap = (ap + (p / 11.0)) else: mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) for i in range((mpre.size - 1), 0, (- 1)): mpre[(i - 1)] = np.maximum(mpre[(i - 1)], mpre[i]) i = np.where((mrec[1:] != mrec[:(- 1)]))[0] ap = np.sum(((mrec[(i + 1)] - mrec[i]) * mpre[(i + 1)])) return ap
def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=True): "rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\nTop level function that does the PASCAL VOC evaluation.\ndetpath: Path to detections\n detpath.format(classname) should produce the detection results file.\nannopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\nimagesetfile: Text file containing the list of images, one image per line.\nclassname: Category name (duh)\ncachedir: Directory for caching the annotations\n[ovthresh]: Overlap threshold (default = 0.5)\n[use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default True)\n" if (not os.path.isdir(cachedir)): os.mkdir(cachedir) cachefile = os.path.join(cachedir, 'annots.pkl') with open(imagesetfile, 'r') as f: lines = f.readlines() imagenames = [x.strip() for x in lines] if (not os.path.isfile(cachefile)): recs = {} for (i, imagename) in enumerate(imagenames): recs[imagename] = parse_rec((annopath % imagename)) if ((i % 100) == 0): print('Reading annotation for {:d}/{:d}'.format((i + 1), len(imagenames))) print('Saving cached annotations to {:s}'.format(cachefile)) with open(cachefile, 'wb') as f: pickle.dump(recs, f) else: with open(cachefile, 'rb') as f: recs = pickle.load(f) class_recs = {} npos = 0 for imagename in imagenames: R = [obj for obj in recs[imagename] if (obj['name'] == classname)] bbox = np.array([x['bbox'] for x in R]) difficult = np.array([x['difficult'] for x in R]).astype(np.bool) det = ([False] * len(R)) npos = (npos + sum((~ difficult))) class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det} detfile = detpath.format(classname) with open(detfile, 'r') as f: lines = f.readlines() if (any(lines) == 1): splitlines = [x.strip().split(' ') for x in lines] image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) sorted_ind = np.argsort((- confidence)) sorted_scores = np.sort((- confidence)) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): R = class_recs[image_ids[d]] bb = BB[d, :].astype(float) ovmax = (- np.inf) BBGT = R['bbox'].astype(float) if (BBGT.size > 0): ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum((ixmax - ixmin), 0.0) ih = np.maximum((iymax - iymin), 0.0) inters = (iw * ih) uni = ((((bb[2] - bb[0]) * (bb[3] - bb[1])) + ((BBGT[:, 2] - BBGT[:, 0]) * (BBGT[:, 3] - BBGT[:, 1]))) - inters) overlaps = (inters / uni) ovmax = np.max(overlaps) jmax = np.argmax(overlaps) if (ovmax > ovthresh): if (not R['difficult'][jmax]): if (not R['det'][jmax]): tp[d] = 1.0 R['det'][jmax] = 1 else: fp[d] = 1.0 else: fp[d] = 1.0 fp = np.cumsum(fp) tp = np.cumsum(tp) rec = (tp / float(npos)) prec = (tp / np.maximum((tp + fp), np.finfo(np.float64).eps)) ap = voc_ap(rec, prec, use_07_metric) else: rec = (- 1.0) prec = (- 1.0) ap = (- 1.0) return (rec, prec, ap)
562,733,316,720,542,660
rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections detpath.format(classname) should produce the detection results file. annopath: Path to annotations annopath.format(imagename) should be the xml annotations file. imagesetfile: Text file containing the list of images, one image per line. classname: Category name (duh) cachedir: Directory for caching the annotations [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default True)
eval.py
voc_eval
FLyingLSJ/ssd.pytorch
python
def voc_eval(detpath, annopath, imagesetfile, classname, cachedir, ovthresh=0.5, use_07_metric=True): "rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\nTop level function that does the PASCAL VOC evaluation.\ndetpath: Path to detections\n detpath.format(classname) should produce the detection results file.\nannopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\nimagesetfile: Text file containing the list of images, one image per line.\nclassname: Category name (duh)\ncachedir: Directory for caching the annotations\n[ovthresh]: Overlap threshold (default = 0.5)\n[use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default True)\n" if (not os.path.isdir(cachedir)): os.mkdir(cachedir) cachefile = os.path.join(cachedir, 'annots.pkl') with open(imagesetfile, 'r') as f: lines = f.readlines() imagenames = [x.strip() for x in lines] if (not os.path.isfile(cachefile)): recs = {} for (i, imagename) in enumerate(imagenames): recs[imagename] = parse_rec((annopath % imagename)) if ((i % 100) == 0): print('Reading annotation for {:d}/{:d}'.format((i + 1), len(imagenames))) print('Saving cached annotations to {:s}'.format(cachefile)) with open(cachefile, 'wb') as f: pickle.dump(recs, f) else: with open(cachefile, 'rb') as f: recs = pickle.load(f) class_recs = {} npos = 0 for imagename in imagenames: R = [obj for obj in recs[imagename] if (obj['name'] == classname)] bbox = np.array([x['bbox'] for x in R]) difficult = np.array([x['difficult'] for x in R]).astype(np.bool) det = ([False] * len(R)) npos = (npos + sum((~ difficult))) class_recs[imagename] = {'bbox': bbox, 'difficult': difficult, 'det': det} detfile = detpath.format(classname) with open(detfile, 'r') as f: lines = f.readlines() if (any(lines) == 1): splitlines = [x.strip().split(' ') for x in lines] image_ids = [x[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) sorted_ind = np.argsort((- confidence)) sorted_scores = np.sort((- confidence)) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): R = class_recs[image_ids[d]] bb = BB[d, :].astype(float) ovmax = (- np.inf) BBGT = R['bbox'].astype(float) if (BBGT.size > 0): ixmin = np.maximum(BBGT[:, 0], bb[0]) iymin = np.maximum(BBGT[:, 1], bb[1]) ixmax = np.minimum(BBGT[:, 2], bb[2]) iymax = np.minimum(BBGT[:, 3], bb[3]) iw = np.maximum((ixmax - ixmin), 0.0) ih = np.maximum((iymax - iymin), 0.0) inters = (iw * ih) uni = ((((bb[2] - bb[0]) * (bb[3] - bb[1])) + ((BBGT[:, 2] - BBGT[:, 0]) * (BBGT[:, 3] - BBGT[:, 1]))) - inters) overlaps = (inters / uni) ovmax = np.max(overlaps) jmax = np.argmax(overlaps) if (ovmax > ovthresh): if (not R['difficult'][jmax]): if (not R['det'][jmax]): tp[d] = 1.0 R['det'][jmax] = 1 else: fp[d] = 1.0 else: fp[d] = 1.0 fp = np.cumsum(fp) tp = np.cumsum(tp) rec = (tp / float(npos)) prec = (tp / np.maximum((tp + fp), np.finfo(np.float64).eps)) ap = voc_ap(rec, prec, use_07_metric) else: rec = (- 1.0) prec = (- 1.0) ap = (- 1.0) return (rec, prec, ap)
def __init__(self, minconn, maxconn, *args, **kwargs): "Initialize the connection pool.\n\n New 'minconn' connections are created immediately calling 'connfunc'\n with given parameters. The connection pool will support a maximum of\n about 'maxconn' connections. \n " self.minconn = int(minconn) self.maxconn = int(maxconn) self.closed = False self._args = args self._kwargs = kwargs self._pool = [] self._used = {} self._rused = {} self._keys = 0 for i in range(self.minconn): self._connect()
1,293,587,767,893,814,000
Initialize the connection pool. New 'minconn' connections are created immediately calling 'connfunc' with given parameters. The connection pool will support a maximum of about 'maxconn' connections.
lexis/Lib/site-packages/psycopg2/pool.py
__init__
ALEXIS2ES/sherom-Serve
python
def __init__(self, minconn, maxconn, *args, **kwargs): "Initialize the connection pool.\n\n New 'minconn' connections are created immediately calling 'connfunc'\n with given parameters. The connection pool will support a maximum of\n about 'maxconn' connections. \n " self.minconn = int(minconn) self.maxconn = int(maxconn) self.closed = False self._args = args self._kwargs = kwargs self._pool = [] self._used = {} self._rused = {} self._keys = 0 for i in range(self.minconn): self._connect()
def _connect(self, key=None): "Create a new connection and assign it to 'key' if not None." conn = psycopg2.connect(*self._args, **self._kwargs) if (key is not None): self._used[key] = conn self._rused[id(conn)] = key else: self._pool.append(conn) return conn
5,585,987,887,297,364,000
Create a new connection and assign it to 'key' if not None.
lexis/Lib/site-packages/psycopg2/pool.py
_connect
ALEXIS2ES/sherom-Serve
python
def _connect(self, key=None): conn = psycopg2.connect(*self._args, **self._kwargs) if (key is not None): self._used[key] = conn self._rused[id(conn)] = key else: self._pool.append(conn) return conn
def _getkey(self): 'Return a new unique key.' self._keys += 1 return self._keys
-2,913,718,119,693,489,700
Return a new unique key.
lexis/Lib/site-packages/psycopg2/pool.py
_getkey
ALEXIS2ES/sherom-Serve
python
def _getkey(self): self._keys += 1 return self._keys
def _getconn(self, key=None): "Get a free connection and assign it to 'key' if not None." if self.closed: raise PoolError('connection pool is closed') if (key is None): key = self._getkey() if (key in self._used): return self._used[key] if self._pool: self._used[key] = conn = self._pool.pop() self._rused[id(conn)] = key return conn else: if (len(self._used) == self.maxconn): raise PoolError('connection pool exhausted') return self._connect(key)
-1,052,344,869,246,796,800
Get a free connection and assign it to 'key' if not None.
lexis/Lib/site-packages/psycopg2/pool.py
_getconn
ALEXIS2ES/sherom-Serve
python
def _getconn(self, key=None): if self.closed: raise PoolError('connection pool is closed') if (key is None): key = self._getkey() if (key in self._used): return self._used[key] if self._pool: self._used[key] = conn = self._pool.pop() self._rused[id(conn)] = key return conn else: if (len(self._used) == self.maxconn): raise PoolError('connection pool exhausted') return self._connect(key)
def _putconn(self, conn, key=None, close=False): 'Put away a connection.' if self.closed: raise PoolError('connection pool is closed') if (key is None): key = self._rused.get(id(conn)) if (not key): raise PoolError('trying to put unkeyed connection') if ((len(self._pool) < self.minconn) and (not close)): if (not conn.closed): status = conn.get_transaction_status() if (status == _ext.TRANSACTION_STATUS_UNKNOWN): conn.close() elif (status != _ext.TRANSACTION_STATUS_IDLE): conn.rollback() self._pool.append(conn) else: self._pool.append(conn) else: conn.close() if ((not self.closed) or (key in self._used)): del self._used[key] del self._rused[id(conn)]
1,155,863,612,707,922,400
Put away a connection.
lexis/Lib/site-packages/psycopg2/pool.py
_putconn
ALEXIS2ES/sherom-Serve
python
def _putconn(self, conn, key=None, close=False): if self.closed: raise PoolError('connection pool is closed') if (key is None): key = self._rused.get(id(conn)) if (not key): raise PoolError('trying to put unkeyed connection') if ((len(self._pool) < self.minconn) and (not close)): if (not conn.closed): status = conn.get_transaction_status() if (status == _ext.TRANSACTION_STATUS_UNKNOWN): conn.close() elif (status != _ext.TRANSACTION_STATUS_IDLE): conn.rollback() self._pool.append(conn) else: self._pool.append(conn) else: conn.close() if ((not self.closed) or (key in self._used)): del self._used[key] del self._rused[id(conn)]
def _closeall(self): 'Close all connections.\n\n Note that this can lead to some code fail badly when trying to use\n an already closed connection. If you call .closeall() make sure\n your code can deal with it.\n ' if self.closed: raise PoolError('connection pool is closed') for conn in (self._pool + list(self._used.values())): try: conn.close() except: pass self.closed = True
433,966,829,568,226,200
Close all connections. Note that this can lead to some code fail badly when trying to use an already closed connection. If you call .closeall() make sure your code can deal with it.
lexis/Lib/site-packages/psycopg2/pool.py
_closeall
ALEXIS2ES/sherom-Serve
python
def _closeall(self): 'Close all connections.\n\n Note that this can lead to some code fail badly when trying to use\n an already closed connection. If you call .closeall() make sure\n your code can deal with it.\n ' if self.closed: raise PoolError('connection pool is closed') for conn in (self._pool + list(self._used.values())): try: conn.close() except: pass self.closed = True
def __init__(self, minconn, maxconn, *args, **kwargs): 'Initialize the threading lock.' import threading AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs) self._lock = threading.Lock()
8,024,484,810,999,034,000
Initialize the threading lock.
lexis/Lib/site-packages/psycopg2/pool.py
__init__
ALEXIS2ES/sherom-Serve
python
def __init__(self, minconn, maxconn, *args, **kwargs): import threading AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs) self._lock = threading.Lock()
def getconn(self, key=None): "Get a free connection and assign it to 'key' if not None." self._lock.acquire() try: return self._getconn(key) finally: self._lock.release()
6,270,094,374,509,713,000
Get a free connection and assign it to 'key' if not None.
lexis/Lib/site-packages/psycopg2/pool.py
getconn
ALEXIS2ES/sherom-Serve
python
def getconn(self, key=None): self._lock.acquire() try: return self._getconn(key) finally: self._lock.release()
def putconn(self, conn=None, key=None, close=False): 'Put away an unused connection.' self._lock.acquire() try: self._putconn(conn, key, close) finally: self._lock.release()
-2,805,035,333,017,517,600
Put away an unused connection.
lexis/Lib/site-packages/psycopg2/pool.py
putconn
ALEXIS2ES/sherom-Serve
python
def putconn(self, conn=None, key=None, close=False): self._lock.acquire() try: self._putconn(conn, key, close) finally: self._lock.release()
def closeall(self): 'Close all connections (even the one currently in use.)' self._lock.acquire() try: self._closeall() finally: self._lock.release()
8,940,636,885,304,963,000
Close all connections (even the one currently in use.)
lexis/Lib/site-packages/psycopg2/pool.py
closeall
ALEXIS2ES/sherom-Serve
python
def closeall(self): self._lock.acquire() try: self._closeall() finally: self._lock.release()
def __init__(self, minconn, maxconn, *args, **kwargs): 'Initialize the threading lock.' import warnings warnings.warn('deprecated: use ZPsycopgDA.pool implementation', DeprecationWarning) import threading AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs) self._lock = threading.Lock() import _thread as _thread self.__thread = _thread
-4,742,599,862,310,846,000
Initialize the threading lock.
lexis/Lib/site-packages/psycopg2/pool.py
__init__
ALEXIS2ES/sherom-Serve
python
def __init__(self, minconn, maxconn, *args, **kwargs): import warnings warnings.warn('deprecated: use ZPsycopgDA.pool implementation', DeprecationWarning) import threading AbstractConnectionPool.__init__(self, minconn, maxconn, *args, **kwargs) self._lock = threading.Lock() import _thread as _thread self.__thread = _thread
def getconn(self): 'Generate thread id and return a connection.' key = self.__thread.get_ident() self._lock.acquire() try: return self._getconn(key) finally: self._lock.release()
7,005,839,141,883,069,000
Generate thread id and return a connection.
lexis/Lib/site-packages/psycopg2/pool.py
getconn
ALEXIS2ES/sherom-Serve
python
def getconn(self): key = self.__thread.get_ident() self._lock.acquire() try: return self._getconn(key) finally: self._lock.release()
def putconn(self, conn=None, close=False): 'Put away an unused connection.' key = self.__thread.get_ident() self._lock.acquire() try: if (not conn): conn = self._used[key] self._putconn(conn, key, close) finally: self._lock.release()
2,892,461,049,250,483,700
Put away an unused connection.
lexis/Lib/site-packages/psycopg2/pool.py
putconn
ALEXIS2ES/sherom-Serve
python
def putconn(self, conn=None, close=False): key = self.__thread.get_ident() self._lock.acquire() try: if (not conn): conn = self._used[key] self._putconn(conn, key, close) finally: self._lock.release()
def closeall(self): 'Close all connections (even the one currently in use.)' self._lock.acquire() try: self._closeall() finally: self._lock.release()
8,940,636,885,304,963,000
Close all connections (even the one currently in use.)
lexis/Lib/site-packages/psycopg2/pool.py
closeall
ALEXIS2ES/sherom-Serve
python
def closeall(self): self._lock.acquire() try: self._closeall() finally: self._lock.release()
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs): '\n This function converts pypower case files to pandapower net structure.\n\n INPUT:\n\n **ppc** : The pypower case file.\n\n OPTIONAL:\n\n **f_hz** (float, 50) - The frequency of the network.\n\n **validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.\n For running the validation, the ppc must already contain the pypower\n powerflow results or pypower must be importable.\n\n ****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True\n\n OUTPUT:\n\n **net** : pandapower net.\n\n EXAMPLE:\n\n import pandapower.converter as pc\n\n from pypower import case4gs\n\n ppc_net = case4gs.case4gs()\n\n net = pc.from_ppc(ppc_net, f_hz=60)\n\n ' if Series((ppc['bus'][:, BASE_KV] <= 0)).any(): logger.info('There are false baseKV given in the pypower case file.') baseMVA = ppc['baseMVA'] omega = (pi * f_hz) MAX_VAL = 99999.0 net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA) for i in range(len(ppc['bus'])): pp.create_bus(net, name=int(ppc['bus'][(i, 0)]), vn_kv=ppc['bus'][(i, 9)], type='b', zone=ppc['bus'][(i, 10)], in_service=bool((ppc['bus'][(i, 1)] != 4)), max_vm_pu=ppc['bus'][(i, 11)], min_vm_pu=ppc['bus'][(i, 12)]) if (ppc['bus'][(i, 2)] > 0): pp.create_load(net, i, p_mw=ppc['bus'][(i, 2)], q_mvar=ppc['bus'][(i, 3)], controllable=False) elif (ppc['bus'][(i, 2)] < 0): pp.create_sgen(net, i, p_mw=(- ppc['bus'][(i, 2)]), q_mvar=(- ppc['bus'][(i, 3)]), type='', controllable=False) elif (ppc['bus'][(i, 3)] != 0): pp.create_load(net, i, p_mw=ppc['bus'][(i, 2)], q_mvar=ppc['bus'][(i, 3)], controllable=False) if ((ppc['bus'][(i, 4)] != 0) or (ppc['bus'][(i, 5)] != 0)): pp.create_shunt(net, i, p_mw=ppc['bus'][(i, 4)], q_mvar=(- ppc['bus'][(i, 5)])) gen_lookup = DataFrame(nan, columns=['element', 'element_type'], index=range(len(ppc['gen'][:, 0]))) if (len(ppc['gen'].shape) == 1): ppc['gen'] = array(ppc['gen'], ndmin=2) for i in range(len(ppc['gen'][:, 0])): (current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, last_same_bus_in_service_gen_idx) = _gen_bus_info(ppc, i) if (current_bus_type == 3): if (i == first_same_bus_in_service_gen_idx): gen_lookup.element.loc[i] = pp.create_ext_grid(net, bus=current_bus_idx, vm_pu=ppc['gen'][(last_same_bus_in_service_gen_idx, 5)], va_degree=ppc['bus'][(current_bus_idx, 8)], in_service=bool((ppc['gen'][(i, 7)] > 0)), max_p_mw=ppc['gen'][(i, PMAX)], min_p_mw=ppc['gen'][(i, PMIN)], max_q_mvar=ppc['gen'][(i, QMAX)], min_q_mvar=ppc['gen'][(i, QMIN)]) gen_lookup.element_type.loc[i] = 'ext_grid' if (ppc['gen'][(i, 4)] > ppc['gen'][(i, 3)]): logger.info(('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)) if ((- ppc['gen'][(i, 9)]) < (- ppc['gen'][(i, 8)])): logger.info(('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)) else: current_bus_type = 1 elif (current_bus_type == 2): if (i == first_same_bus_in_service_gen_idx): gen_lookup.element.loc[i] = pp.create_gen(net, bus=current_bus_idx, vm_pu=ppc['gen'][(last_same_bus_in_service_gen_idx, 5)], p_mw=ppc['gen'][(i, 1)], in_service=bool((ppc['gen'][(i, 7)] > 0)), controllable=True, max_p_mw=ppc['gen'][(i, PMAX)], min_p_mw=ppc['gen'][(i, PMIN)], max_q_mvar=ppc['gen'][(i, QMAX)], min_q_mvar=ppc['gen'][(i, QMIN)]) gen_lookup.element_type.loc[i] = 'gen' if (ppc['gen'][(i, 1)] < 0): logger.info(('p_mw of gen %d must be less than zero but is not.' % i)) if (ppc['gen'][(i, 4)] > ppc['gen'][(i, 3)]): logger.info(('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)) if ((- ppc['gen'][(i, 9)]) < (- ppc['gen'][(i, 8)])): logger.info(('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)) else: current_bus_type = 1 if (current_bus_type == 1): gen_lookup.element.loc[i] = pp.create_sgen(net, bus=current_bus_idx, p_mw=ppc['gen'][(i, 1)], q_mvar=ppc['gen'][(i, 2)], type='', in_service=bool((ppc['gen'][(i, 7)] > 0)), max_p_mw=ppc['gen'][(i, PMAX)], min_p_mw=ppc['gen'][(i, PMIN)], max_q_mvar=ppc['gen'][(i, QMAX)], min_q_mvar=ppc['gen'][(i, QMIN)], controllable=True) gen_lookup.element_type.loc[i] = 'sgen' if (ppc['gen'][(i, 1)] < 0): logger.info(('p_mw of sgen %d must be less than zero but is not.' % i)) if (ppc['gen'][(i, 4)] > ppc['gen'][(i, 3)]): logger.info(('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)) if ((- ppc['gen'][(i, 9)]) < (- ppc['gen'][(i, 8)])): logger.info(('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)) for i in range(len(ppc['branch'])): from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][(i, 0)])) to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][(i, 1)])) from_vn_kv = ppc['bus'][(from_bus, 9)] to_vn_kv = ppc['bus'][(to_bus, 9)] if (((from_vn_kv == to_vn_kv) & ((ppc['branch'][(i, 8)] == 0) | (ppc['branch'][(i, 8)] == 1))) & (ppc['branch'][(i, 9)] == 0)): Zni = ((ppc['bus'][(to_bus, 9)] ** 2) / baseMVA) max_i_ka = ((ppc['branch'][(i, 5)] / ppc['bus'][(to_bus, 9)]) / sqrt(3)) if (max_i_ka == 0.0): max_i_ka = MAX_VAL logger.debug(('ppc branch rateA is zero -> Using MAX_VAL instead to calculate ' + 'maximum branch flow')) pp.create_line_from_parameters(net, from_bus=from_bus, to_bus=to_bus, length_km=1, r_ohm_per_km=(ppc['branch'][(i, 2)] * Zni), x_ohm_per_km=(ppc['branch'][(i, 3)] * Zni), c_nf_per_km=((((ppc['branch'][(i, 4)] / Zni) / omega) * 1000000000.0) / 2), max_i_ka=max_i_ka, type='ol', max_loading_percent=100, in_service=bool(ppc['branch'][(i, 10)])) else: if (from_vn_kv >= to_vn_kv): hv_bus = from_bus vn_hv_kv = from_vn_kv lv_bus = to_bus vn_lv_kv = to_vn_kv tap_side = 'hv' else: hv_bus = to_bus vn_hv_kv = to_vn_kv lv_bus = from_bus vn_lv_kv = from_vn_kv tap_side = 'lv' if (from_vn_kv == to_vn_kv): logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered as a transformer because of a ratio != 0 | 1 but it connects the same voltage level', i, ppc['branch'][(i, 0)], ppc['branch'][(i, 1)]) rk = ppc['branch'][(i, 2)] xk = ppc['branch'][(i, 3)] zk = (((rk ** 2) + (xk ** 2)) ** 0.5) sn = ppc['branch'][(i, 5)] if (sn == 0.0): sn = MAX_VAL logger.debug(('ppc branch rateA is zero -> Using MAX_VAL instead to calculate ' + 'apparent power')) ratio_1 = (0 if (ppc['branch'][(i, 8)] == 0) else ((ppc['branch'][(i, 8)] - 1) * 100)) i0_percent = ((((- ppc['branch'][(i, 4)]) * 100) * baseMVA) / sn) if (i0_percent < 0): logger.info('A transformer always behaves inductive consumpting but the susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is positive.', i, ppc['branch'][(i, 0)], ppc['branch'][(i, 1)]) pp.create_transformer_from_parameters(net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv, vn_lv_kv=vn_lv_kv, vk_percent=((((sign(xk) * zk) * sn) * 100) / baseMVA), vkr_percent=(((rk * sn) * 100) / baseMVA), max_loading_percent=100, pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][(i, 9)], tap_step_percent=abs(ratio_1), tap_pos=sign(ratio_1), tap_side=tap_side, tap_neutral=0) if ('gencost' in ppc): if (len(ppc['gencost'].shape) == 1): ppc['gencost'] = ppc['gencost'].reshape((1, (- 1))) if (ppc['gencost'].shape[0] <= gen_lookup.shape[0]): idx_p = range(ppc['gencost'].shape[0]) idx_q = [] elif (ppc['gencost'].shape[0] > gen_lookup.shape[0]): idx_p = range(gen_lookup.shape[0]) idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0]) if (ppc['gencost'].shape[0] >= (2 * gen_lookup.shape[0])): idx_p = range(gen_lookup.shape[0]) idx_q = range(gen_lookup.shape[0], (2 * gen_lookup.shape[0])) for idx in idx_p: _create_costs(net, ppc, gen_lookup, 'p', idx) for idx in idx_q: _create_costs(net, ppc, gen_lookup, 'q', idx) if validate_conversion: logger.setLevel(logging.DEBUG) if (not validate_from_ppc(ppc, net, **kwargs)): logger.error('Validation failed.') net._options = {} net._options['gen_lookup'] = gen_lookup return net
-607,897,207,075,075,600
This function converts pypower case files to pandapower net structure. INPUT: **ppc** : The pypower case file. OPTIONAL: **f_hz** (float, 50) - The frequency of the network. **validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion. For running the validation, the ppc must already contain the pypower powerflow results or pypower must be importable. ****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True OUTPUT: **net** : pandapower net. EXAMPLE: import pandapower.converter as pc from pypower import case4gs ppc_net = case4gs.case4gs() net = pc.from_ppc(ppc_net, f_hz=60)
pandapower/converter/pypower/from_ppc.py
from_ppc
BaraaUniKassel/pandapower
python
def from_ppc(ppc, f_hz=50, validate_conversion=False, **kwargs): '\n This function converts pypower case files to pandapower net structure.\n\n INPUT:\n\n **ppc** : The pypower case file.\n\n OPTIONAL:\n\n **f_hz** (float, 50) - The frequency of the network.\n\n **validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.\n For running the validation, the ppc must already contain the pypower\n powerflow results or pypower must be importable.\n\n ****kwargs** keyword arguments for validate_from_ppc if validate_conversion is True\n\n OUTPUT:\n\n **net** : pandapower net.\n\n EXAMPLE:\n\n import pandapower.converter as pc\n\n from pypower import case4gs\n\n ppc_net = case4gs.case4gs()\n\n net = pc.from_ppc(ppc_net, f_hz=60)\n\n ' if Series((ppc['bus'][:, BASE_KV] <= 0)).any(): logger.info('There are false baseKV given in the pypower case file.') baseMVA = ppc['baseMVA'] omega = (pi * f_hz) MAX_VAL = 99999.0 net = pp.create_empty_network(f_hz=f_hz, sn_mva=baseMVA) for i in range(len(ppc['bus'])): pp.create_bus(net, name=int(ppc['bus'][(i, 0)]), vn_kv=ppc['bus'][(i, 9)], type='b', zone=ppc['bus'][(i, 10)], in_service=bool((ppc['bus'][(i, 1)] != 4)), max_vm_pu=ppc['bus'][(i, 11)], min_vm_pu=ppc['bus'][(i, 12)]) if (ppc['bus'][(i, 2)] > 0): pp.create_load(net, i, p_mw=ppc['bus'][(i, 2)], q_mvar=ppc['bus'][(i, 3)], controllable=False) elif (ppc['bus'][(i, 2)] < 0): pp.create_sgen(net, i, p_mw=(- ppc['bus'][(i, 2)]), q_mvar=(- ppc['bus'][(i, 3)]), type=, controllable=False) elif (ppc['bus'][(i, 3)] != 0): pp.create_load(net, i, p_mw=ppc['bus'][(i, 2)], q_mvar=ppc['bus'][(i, 3)], controllable=False) if ((ppc['bus'][(i, 4)] != 0) or (ppc['bus'][(i, 5)] != 0)): pp.create_shunt(net, i, p_mw=ppc['bus'][(i, 4)], q_mvar=(- ppc['bus'][(i, 5)])) gen_lookup = DataFrame(nan, columns=['element', 'element_type'], index=range(len(ppc['gen'][:, 0]))) if (len(ppc['gen'].shape) == 1): ppc['gen'] = array(ppc['gen'], ndmin=2) for i in range(len(ppc['gen'][:, 0])): (current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, last_same_bus_in_service_gen_idx) = _gen_bus_info(ppc, i) if (current_bus_type == 3): if (i == first_same_bus_in_service_gen_idx): gen_lookup.element.loc[i] = pp.create_ext_grid(net, bus=current_bus_idx, vm_pu=ppc['gen'][(last_same_bus_in_service_gen_idx, 5)], va_degree=ppc['bus'][(current_bus_idx, 8)], in_service=bool((ppc['gen'][(i, 7)] > 0)), max_p_mw=ppc['gen'][(i, PMAX)], min_p_mw=ppc['gen'][(i, PMIN)], max_q_mvar=ppc['gen'][(i, QMAX)], min_q_mvar=ppc['gen'][(i, QMIN)]) gen_lookup.element_type.loc[i] = 'ext_grid' if (ppc['gen'][(i, 4)] > ppc['gen'][(i, 3)]): logger.info(('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)) if ((- ppc['gen'][(i, 9)]) < (- ppc['gen'][(i, 8)])): logger.info(('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)) else: current_bus_type = 1 elif (current_bus_type == 2): if (i == first_same_bus_in_service_gen_idx): gen_lookup.element.loc[i] = pp.create_gen(net, bus=current_bus_idx, vm_pu=ppc['gen'][(last_same_bus_in_service_gen_idx, 5)], p_mw=ppc['gen'][(i, 1)], in_service=bool((ppc['gen'][(i, 7)] > 0)), controllable=True, max_p_mw=ppc['gen'][(i, PMAX)], min_p_mw=ppc['gen'][(i, PMIN)], max_q_mvar=ppc['gen'][(i, QMAX)], min_q_mvar=ppc['gen'][(i, QMIN)]) gen_lookup.element_type.loc[i] = 'gen' if (ppc['gen'][(i, 1)] < 0): logger.info(('p_mw of gen %d must be less than zero but is not.' % i)) if (ppc['gen'][(i, 4)] > ppc['gen'][(i, 3)]): logger.info(('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)) if ((- ppc['gen'][(i, 9)]) < (- ppc['gen'][(i, 8)])): logger.info(('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)) else: current_bus_type = 1 if (current_bus_type == 1): gen_lookup.element.loc[i] = pp.create_sgen(net, bus=current_bus_idx, p_mw=ppc['gen'][(i, 1)], q_mvar=ppc['gen'][(i, 2)], type=, in_service=bool((ppc['gen'][(i, 7)] > 0)), max_p_mw=ppc['gen'][(i, PMAX)], min_p_mw=ppc['gen'][(i, PMIN)], max_q_mvar=ppc['gen'][(i, QMAX)], min_q_mvar=ppc['gen'][(i, QMIN)], controllable=True) gen_lookup.element_type.loc[i] = 'sgen' if (ppc['gen'][(i, 1)] < 0): logger.info(('p_mw of sgen %d must be less than zero but is not.' % i)) if (ppc['gen'][(i, 4)] > ppc['gen'][(i, 3)]): logger.info(('min_q_mvar of gen %d must be less than max_q_mvar but is not.' % i)) if ((- ppc['gen'][(i, 9)]) < (- ppc['gen'][(i, 8)])): logger.info(('max_p_mw of gen %d must be less than min_p_mw but is not.' % i)) for i in range(len(ppc['branch'])): from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][(i, 0)])) to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][(i, 1)])) from_vn_kv = ppc['bus'][(from_bus, 9)] to_vn_kv = ppc['bus'][(to_bus, 9)] if (((from_vn_kv == to_vn_kv) & ((ppc['branch'][(i, 8)] == 0) | (ppc['branch'][(i, 8)] == 1))) & (ppc['branch'][(i, 9)] == 0)): Zni = ((ppc['bus'][(to_bus, 9)] ** 2) / baseMVA) max_i_ka = ((ppc['branch'][(i, 5)] / ppc['bus'][(to_bus, 9)]) / sqrt(3)) if (max_i_ka == 0.0): max_i_ka = MAX_VAL logger.debug(('ppc branch rateA is zero -> Using MAX_VAL instead to calculate ' + 'maximum branch flow')) pp.create_line_from_parameters(net, from_bus=from_bus, to_bus=to_bus, length_km=1, r_ohm_per_km=(ppc['branch'][(i, 2)] * Zni), x_ohm_per_km=(ppc['branch'][(i, 3)] * Zni), c_nf_per_km=((((ppc['branch'][(i, 4)] / Zni) / omega) * 1000000000.0) / 2), max_i_ka=max_i_ka, type='ol', max_loading_percent=100, in_service=bool(ppc['branch'][(i, 10)])) else: if (from_vn_kv >= to_vn_kv): hv_bus = from_bus vn_hv_kv = from_vn_kv lv_bus = to_bus vn_lv_kv = to_vn_kv tap_side = 'hv' else: hv_bus = to_bus vn_hv_kv = to_vn_kv lv_bus = from_bus vn_lv_kv = from_vn_kv tap_side = 'lv' if (from_vn_kv == to_vn_kv): logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered as a transformer because of a ratio != 0 | 1 but it connects the same voltage level', i, ppc['branch'][(i, 0)], ppc['branch'][(i, 1)]) rk = ppc['branch'][(i, 2)] xk = ppc['branch'][(i, 3)] zk = (((rk ** 2) + (xk ** 2)) ** 0.5) sn = ppc['branch'][(i, 5)] if (sn == 0.0): sn = MAX_VAL logger.debug(('ppc branch rateA is zero -> Using MAX_VAL instead to calculate ' + 'apparent power')) ratio_1 = (0 if (ppc['branch'][(i, 8)] == 0) else ((ppc['branch'][(i, 8)] - 1) * 100)) i0_percent = ((((- ppc['branch'][(i, 4)]) * 100) * baseMVA) / sn) if (i0_percent < 0): logger.info('A transformer always behaves inductive consumpting but the susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is positive.', i, ppc['branch'][(i, 0)], ppc['branch'][(i, 1)]) pp.create_transformer_from_parameters(net, hv_bus=hv_bus, lv_bus=lv_bus, sn_mva=sn, vn_hv_kv=vn_hv_kv, vn_lv_kv=vn_lv_kv, vk_percent=((((sign(xk) * zk) * sn) * 100) / baseMVA), vkr_percent=(((rk * sn) * 100) / baseMVA), max_loading_percent=100, pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][(i, 9)], tap_step_percent=abs(ratio_1), tap_pos=sign(ratio_1), tap_side=tap_side, tap_neutral=0) if ('gencost' in ppc): if (len(ppc['gencost'].shape) == 1): ppc['gencost'] = ppc['gencost'].reshape((1, (- 1))) if (ppc['gencost'].shape[0] <= gen_lookup.shape[0]): idx_p = range(ppc['gencost'].shape[0]) idx_q = [] elif (ppc['gencost'].shape[0] > gen_lookup.shape[0]): idx_p = range(gen_lookup.shape[0]) idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0]) if (ppc['gencost'].shape[0] >= (2 * gen_lookup.shape[0])): idx_p = range(gen_lookup.shape[0]) idx_q = range(gen_lookup.shape[0], (2 * gen_lookup.shape[0])) for idx in idx_p: _create_costs(net, ppc, gen_lookup, 'p', idx) for idx in idx_q: _create_costs(net, ppc, gen_lookup, 'q', idx) if validate_conversion: logger.setLevel(logging.DEBUG) if (not validate_from_ppc(ppc, net, **kwargs)): logger.error('Validation failed.') net._options = {} net._options['gen_lookup'] = gen_lookup return net
def validate_from_ppc(ppc_net, net, pf_type='runpp', max_diff_values={'bus_vm_pu': 1e-06, 'bus_va_degree': 1e-05, 'branch_p_mw': 1e-06, 'branch_q_mvar': 1e-06, 'gen_p_mw': 1e-06, 'gen_q_mvar': 1e-06}, run=True): '\n This function validates the pypower case files to pandapower net structure conversion via a comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)\n\n INPUT:\n\n **ppc_net** - The pypower case file, which must already contain the pypower powerflow\n results or pypower must be importable.\n\n **net** - The pandapower network.\n\n OPTIONAL:\n\n **pf_type** ("runpp", string) - Type of validated power flow. Possible are ("runpp",\n "rundcpp", "runopp", "rundcopp")\n\n **max_diff_values** - Dict of maximal allowed difference values. The keys must be\n \'vm_pu\', \'va_degree\', \'p_branch_mw\', \'q_branch_mvar\', \'p_gen_mw\' and \'q_gen_mvar\' and\n the values floats.\n\n **run** (True, bool or list of two bools) - changing the value to False avoids trying to run\n (optimal) loadflows. Giving a list of two bools addresses first pypower and second\n pandapower.\n\n OUTPUT:\n\n **conversion_success** - conversion_success is returned as False if pypower or pandapower\n cannot calculate a powerflow or if the maximum difference values (max_diff_values )\n cannot be hold.\n\n EXAMPLE:\n\n import pandapower.converter as pc\n\n net = cv.from_ppc(ppc_net, f_hz=50)\n\n conversion_success = cv.validate_from_ppc(ppc_net, net)\n\n NOTE:\n\n The user has to take care that the loadflow results already are included in the provided ppc_net or pypower is importable.\n ' if ('opp' in pf_type): if (not (len(net.polynomial_cost) | len(net.piecewise_linear_cost))): if ('gencost' in ppc_net): if (not len(ppc_net['gencost'])): logger.debug('ppc and pandapower net do not include cost information.') return True else: logger.error('The pandapower net does not include cost information.') return False else: logger.debug('ppc and pandapower net do not include cost information.') return True run = ([run, run] if isinstance(run, bool) else run) if (pypower_import and run[0]): try: if (pf_type == 'runpp'): ppc_net = runpf.runpf(ppc_net, ppopt)[0] elif (pf_type == 'rundcpp'): ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0] elif (pf_type == 'runopp'): ppc_net = runopf.runopf(ppc_net, ppopt) elif (pf_type == 'rundcopp'): ppc_net = rundcopf.rundcopf(ppc_net, ppopt) else: raise ValueError(('The pf_type %s is unknown' % pf_type)) except: logger.debug('The pypower run did not work.') ppc_success = True if ('success' in ppc_net.keys()): if (ppc_net['success'] != 1): ppc_success = False logger.error(('The given ppc data indicates an unsuccessful pypower powerflow: ' + "'ppc_net['success'] != 1'")) if (ppc_net['branch'].shape[1] < 17): ppc_success = False logger.error('The shape of given ppc data indicates missing pypower powerflow results.') if run[1]: if (pf_type == 'runpp'): try: pp.runpp(net, init='dc', calculate_voltage_angles=True, trafo_model='pi') except pp.LoadflowNotConverged: try: pp.runpp(net, calculate_voltage_angles=True, init='flat', trafo_model='pi') except pp.LoadflowNotConverged: try: pp.runpp(net, trafo_model='pi', calculate_voltage_angles=False) if ('bus_va_degree' in max_diff_values.keys()): max_diff_values['bus_va_degree'] = (100.0 if (max_diff_values['bus_va_degree'] < 100.0) else max_diff_values['bus_va_degree']) logger.info('voltage_angles could be calculated.') except pp.LoadflowNotConverged: logger.error('The pandapower powerflow does not converge.') elif (pf_type == 'rundcpp'): try: pp.rundcpp(net, trafo_model='pi') except pp.LoadflowNotConverged: logger.error('The pandapower dc powerflow does not converge.') elif (pf_type == 'runopp'): try: pp.runopp(net, init='flat', calculate_voltage_angles=True) except pp.OPFNotConverged: try: pp.runopp(net, init='pf', calculate_voltage_angles=True) except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError): try: pp.runopp(net, init='flat', calculate_voltage_angles=False) logger.info('voltage_angles could be calculated.') if ('bus_va_degree' in max_diff_values.keys()): max_diff_values['bus_va_degree'] = (100.0 if (max_diff_values['bus_va_degree'] < 100.0) else max_diff_values['bus_va_degree']) except pp.OPFNotConverged: try: pp.runopp(net, init='pf', calculate_voltage_angles=False) if ('bus_va_degree' in max_diff_values.keys()): max_diff_values['bus_va_degree'] = (100.0 if (max_diff_values['bus_va_degree'] < 100.0) else max_diff_values['bus_va_degree']) logger.info('voltage_angles could be calculated.') except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError): logger.error('The pandapower optimal powerflow does not converge.') elif (pf_type == 'rundcopp'): try: pp.rundcopp(net) except pp.LoadflowNotConverged: logger.error('The pandapower dc optimal powerflow does not converge.') else: raise ValueError(('The pf_type %s is unknown' % pf_type)) if (not ppc_success): return False if ('opp' in pf_type): if (not net.OPF_converged): return elif (not net.converged): return False ppc_res = dict.fromkeys(ppc_elms) ppc_res['branch'] = ppc_net['branch'][:, 13:17] ppc_res['bus'] = ppc_net['bus'][:, 7:9] ppc_res['gen'] = ppc_net['gen'][:, 1:3] pp_res = dict.fromkeys(ppc_elms) pp_res['bus'] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']]) pp_res['gen'] = zeros([1, 2]) if (len(ppc_net['gen'].shape) == 1): ppc_net['gen'] = array(ppc_net['gen'], ndmin=2) GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int)) GEN_uniq = GENS.drop_duplicates() already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int), index=[int(v) for v in GEN_uniq.values]) change_q_compare = [] for (i, j) in GENS.iterrows(): (current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, last_same_bus_in_service_gen_idx) = _gen_bus_info(ppc_net, i) if ((current_bus_type == 3) and (i == first_same_bus_in_service_gen_idx)): pp_res['gen'] = append(pp_res['gen'], array(net.res_ext_grid[(net.ext_grid.bus == current_bus_idx)][['p_mw', 'q_mvar']]).reshape((1, 2)), 0) elif ((current_bus_type == 2) and (i == first_same_bus_in_service_gen_idx)): pp_res['gen'] = append(pp_res['gen'], array(net.res_gen[(net.gen.bus == current_bus_idx)][['p_mw', 'q_mvar']]).reshape((1, 2)), 0) else: pp_res['gen'] = append(pp_res['gen'], array(net.res_sgen[(net.sgen.bus == current_bus_idx)][['p_mw', 'q_mvar']])[already_used_gen.at[int(j)]].reshape((1, 2)), 0) already_used_gen.at[int(j)] += 1 change_q_compare += [int(j)] pp_res['gen'] = pp_res['gen'][1:, :] pp_res['branch'] = zeros([1, 4]) try: init1 = concat([net.line.from_bus, net.line.to_bus], axis=1, sort=True).drop_duplicates() init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1, sort=True).drop_duplicates() except TypeError: init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates() init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1).drop_duplicates() init1['hv_bus'] = nan init1['lv_bus'] = nan init2['from_bus'] = nan init2['to_bus'] = nan try: already_used_branches = concat([init1, init2], axis=0, sort=True) except TypeError: already_used_branches = concat([init1, init2], axis=0) already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int) BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]]) for i in BRANCHES.index: from_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][(i, 0)])) to_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][(i, 1)])) from_vn_kv = ppc_net['bus'][(from_bus, 9)] to_vn_kv = ppc_net['bus'][(to_bus, 9)] ratio = BRANCHES[2].at[i] angle = BRANCHES[3].at[i] if (((from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1))) & (angle == 0)): pp_res['branch'] = append(pp_res['branch'], array(net.res_line[((net.line.from_bus == from_bus) & (net.line.to_bus == to_bus))][['p_from_mw', 'q_from_mvar', 'p_to_mw', 'q_to_mvar']])[int(already_used_branches.number.loc[((already_used_branches.from_bus == from_bus) & (already_used_branches.to_bus == to_bus))].values)].reshape(1, 4), 0) already_used_branches.number.loc[((already_used_branches.from_bus == from_bus) & (already_used_branches.to_bus == to_bus))] += 1 elif (from_vn_kv >= to_vn_kv): pp_res['branch'] = append(pp_res['branch'], array(net.res_trafo[((net.trafo.hv_bus == from_bus) & (net.trafo.lv_bus == to_bus))][['p_hv_mw', 'q_hv_mvar', 'p_lv_mw', 'q_lv_mvar']])[int(already_used_branches.number.loc[((already_used_branches.hv_bus == from_bus) & (already_used_branches.lv_bus == to_bus))].values)].reshape(1, 4), 0) already_used_branches.number.loc[((already_used_branches.hv_bus == from_bus) & (already_used_branches.lv_bus == to_bus))] += 1 else: pp_res['branch'] = append(pp_res['branch'], array(net.res_trafo[((net.trafo.hv_bus == to_bus) & (net.trafo.lv_bus == from_bus))][['p_lv_mw', 'q_lv_mvar', 'p_hv_mw', 'q_hv_mvar']])[int(already_used_branches.number.loc[((already_used_branches.hv_bus == to_bus) & (already_used_branches.lv_bus == from_bus))].values)].reshape(1, 4), 0) already_used_branches.number.loc[((already_used_branches.hv_bus == to_bus) & (already_used_branches.lv_bus == from_bus))] += 1 pp_res['branch'] = pp_res['branch'][1:, :] diff_res = dict.fromkeys(ppc_elms) diff_res['bus'] = (ppc_res['bus'] - pp_res['bus']) diff_res['bus'][:, 1] -= diff_res['bus'][(0, 1)] diff_res['branch'] = (ppc_res['branch'] - pp_res['branch']) diff_res['gen'] = (ppc_res['gen'] - pp_res['gen']) for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index: next_is = GEN_uniq.index[(GEN_uniq.index > i)] if (len(next_is) > 0): next_i = next_is[0] else: next_i = (GENS.index[(- 1)] + 1) if ((next_i - i) > 1): diff_res['gen'][i:next_i, 1] = sum(diff_res['gen'][i:next_i, 1]) logger.debug(('Maximum voltage magnitude difference between pypower and pandapower: %.2e pu' % max_(abs(diff_res['bus'][:, 0])))) logger.debug(('Maximum voltage angle difference between pypower and pandapower: %.2e degree' % max_(abs(diff_res['bus'][:, 1])))) logger.debug(('Maximum branch flow active power difference between pypower and pandapower: %.2e MW' % max_(abs(diff_res['branch'][:, [0, 2]])))) logger.debug(('Maximum branch flow reactive power difference between pypower and pandapower: %.2e MVAr' % max_(abs(diff_res['branch'][:, [1, 3]])))) logger.debug(('Maximum active power generation difference between pypower and pandapower: %.2e MW' % max_(abs(diff_res['gen'][:, 0])))) logger.debug(('Maximum reactive power generation difference between pypower and pandapower: %.2e MVAr' % max_(abs(diff_res['gen'][:, 1])))) if (_validate_diff_res(diff_res, {'bus_vm_pu': 0.001, 'bus_va_degree': 0.001, 'branch_p_mw': 1e-06, 'branch_q_mvar': 1e-06}) and (max_(abs(diff_res['gen'])) > 0.1).any()): logger.debug('The active/reactive power generation difference possibly results because of a pypower error. Please validate the results via pypower loadflow.') if isinstance(max_diff_values, dict): return _validate_diff_res(diff_res, max_diff_values) else: logger.debug("'max_diff_values' must be a dict.")
-2,964,167,797,680,866,300
This function validates the pypower case files to pandapower net structure conversion via a comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.) INPUT: **ppc_net** - The pypower case file, which must already contain the pypower powerflow results or pypower must be importable. **net** - The pandapower network. OPTIONAL: **pf_type** ("runpp", string) - Type of validated power flow. Possible are ("runpp", "rundcpp", "runopp", "rundcopp") **max_diff_values** - Dict of maximal allowed difference values. The keys must be 'vm_pu', 'va_degree', 'p_branch_mw', 'q_branch_mvar', 'p_gen_mw' and 'q_gen_mvar' and the values floats. **run** (True, bool or list of two bools) - changing the value to False avoids trying to run (optimal) loadflows. Giving a list of two bools addresses first pypower and second pandapower. OUTPUT: **conversion_success** - conversion_success is returned as False if pypower or pandapower cannot calculate a powerflow or if the maximum difference values (max_diff_values ) cannot be hold. EXAMPLE: import pandapower.converter as pc net = cv.from_ppc(ppc_net, f_hz=50) conversion_success = cv.validate_from_ppc(ppc_net, net) NOTE: The user has to take care that the loadflow results already are included in the provided ppc_net or pypower is importable.
pandapower/converter/pypower/from_ppc.py
validate_from_ppc
BaraaUniKassel/pandapower
python
def validate_from_ppc(ppc_net, net, pf_type='runpp', max_diff_values={'bus_vm_pu': 1e-06, 'bus_va_degree': 1e-05, 'branch_p_mw': 1e-06, 'branch_q_mvar': 1e-06, 'gen_p_mw': 1e-06, 'gen_q_mvar': 1e-06}, run=True): '\n This function validates the pypower case files to pandapower net structure conversion via a comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)\n\n INPUT:\n\n **ppc_net** - The pypower case file, which must already contain the pypower powerflow\n results or pypower must be importable.\n\n **net** - The pandapower network.\n\n OPTIONAL:\n\n **pf_type** ("runpp", string) - Type of validated power flow. Possible are ("runpp",\n "rundcpp", "runopp", "rundcopp")\n\n **max_diff_values** - Dict of maximal allowed difference values. The keys must be\n \'vm_pu\', \'va_degree\', \'p_branch_mw\', \'q_branch_mvar\', \'p_gen_mw\' and \'q_gen_mvar\' and\n the values floats.\n\n **run** (True, bool or list of two bools) - changing the value to False avoids trying to run\n (optimal) loadflows. Giving a list of two bools addresses first pypower and second\n pandapower.\n\n OUTPUT:\n\n **conversion_success** - conversion_success is returned as False if pypower or pandapower\n cannot calculate a powerflow or if the maximum difference values (max_diff_values )\n cannot be hold.\n\n EXAMPLE:\n\n import pandapower.converter as pc\n\n net = cv.from_ppc(ppc_net, f_hz=50)\n\n conversion_success = cv.validate_from_ppc(ppc_net, net)\n\n NOTE:\n\n The user has to take care that the loadflow results already are included in the provided ppc_net or pypower is importable.\n ' if ('opp' in pf_type): if (not (len(net.polynomial_cost) | len(net.piecewise_linear_cost))): if ('gencost' in ppc_net): if (not len(ppc_net['gencost'])): logger.debug('ppc and pandapower net do not include cost information.') return True else: logger.error('The pandapower net does not include cost information.') return False else: logger.debug('ppc and pandapower net do not include cost information.') return True run = ([run, run] if isinstance(run, bool) else run) if (pypower_import and run[0]): try: if (pf_type == 'runpp'): ppc_net = runpf.runpf(ppc_net, ppopt)[0] elif (pf_type == 'rundcpp'): ppc_net = rundcpf.rundcpf(ppc_net, ppopt)[0] elif (pf_type == 'runopp'): ppc_net = runopf.runopf(ppc_net, ppopt) elif (pf_type == 'rundcopp'): ppc_net = rundcopf.rundcopf(ppc_net, ppopt) else: raise ValueError(('The pf_type %s is unknown' % pf_type)) except: logger.debug('The pypower run did not work.') ppc_success = True if ('success' in ppc_net.keys()): if (ppc_net['success'] != 1): ppc_success = False logger.error(('The given ppc data indicates an unsuccessful pypower powerflow: ' + "'ppc_net['success'] != 1'")) if (ppc_net['branch'].shape[1] < 17): ppc_success = False logger.error('The shape of given ppc data indicates missing pypower powerflow results.') if run[1]: if (pf_type == 'runpp'): try: pp.runpp(net, init='dc', calculate_voltage_angles=True, trafo_model='pi') except pp.LoadflowNotConverged: try: pp.runpp(net, calculate_voltage_angles=True, init='flat', trafo_model='pi') except pp.LoadflowNotConverged: try: pp.runpp(net, trafo_model='pi', calculate_voltage_angles=False) if ('bus_va_degree' in max_diff_values.keys()): max_diff_values['bus_va_degree'] = (100.0 if (max_diff_values['bus_va_degree'] < 100.0) else max_diff_values['bus_va_degree']) logger.info('voltage_angles could be calculated.') except pp.LoadflowNotConverged: logger.error('The pandapower powerflow does not converge.') elif (pf_type == 'rundcpp'): try: pp.rundcpp(net, trafo_model='pi') except pp.LoadflowNotConverged: logger.error('The pandapower dc powerflow does not converge.') elif (pf_type == 'runopp'): try: pp.runopp(net, init='flat', calculate_voltage_angles=True) except pp.OPFNotConverged: try: pp.runopp(net, init='pf', calculate_voltage_angles=True) except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError): try: pp.runopp(net, init='flat', calculate_voltage_angles=False) logger.info('voltage_angles could be calculated.') if ('bus_va_degree' in max_diff_values.keys()): max_diff_values['bus_va_degree'] = (100.0 if (max_diff_values['bus_va_degree'] < 100.0) else max_diff_values['bus_va_degree']) except pp.OPFNotConverged: try: pp.runopp(net, init='pf', calculate_voltage_angles=False) if ('bus_va_degree' in max_diff_values.keys()): max_diff_values['bus_va_degree'] = (100.0 if (max_diff_values['bus_va_degree'] < 100.0) else max_diff_values['bus_va_degree']) logger.info('voltage_angles could be calculated.') except (pp.OPFNotConverged, pp.LoadflowNotConverged, KeyError): logger.error('The pandapower optimal powerflow does not converge.') elif (pf_type == 'rundcopp'): try: pp.rundcopp(net) except pp.LoadflowNotConverged: logger.error('The pandapower dc optimal powerflow does not converge.') else: raise ValueError(('The pf_type %s is unknown' % pf_type)) if (not ppc_success): return False if ('opp' in pf_type): if (not net.OPF_converged): return elif (not net.converged): return False ppc_res = dict.fromkeys(ppc_elms) ppc_res['branch'] = ppc_net['branch'][:, 13:17] ppc_res['bus'] = ppc_net['bus'][:, 7:9] ppc_res['gen'] = ppc_net['gen'][:, 1:3] pp_res = dict.fromkeys(ppc_elms) pp_res['bus'] = array(net.res_bus.sort_index()[['vm_pu', 'va_degree']]) pp_res['gen'] = zeros([1, 2]) if (len(ppc_net['gen'].shape) == 1): ppc_net['gen'] = array(ppc_net['gen'], ndmin=2) GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int)) GEN_uniq = GENS.drop_duplicates() already_used_gen = Series(zeros(GEN_uniq.shape[0]).astype(int), index=[int(v) for v in GEN_uniq.values]) change_q_compare = [] for (i, j) in GENS.iterrows(): (current_bus_type, current_bus_idx, same_bus_gen_idx, first_same_bus_in_service_gen_idx, last_same_bus_in_service_gen_idx) = _gen_bus_info(ppc_net, i) if ((current_bus_type == 3) and (i == first_same_bus_in_service_gen_idx)): pp_res['gen'] = append(pp_res['gen'], array(net.res_ext_grid[(net.ext_grid.bus == current_bus_idx)][['p_mw', 'q_mvar']]).reshape((1, 2)), 0) elif ((current_bus_type == 2) and (i == first_same_bus_in_service_gen_idx)): pp_res['gen'] = append(pp_res['gen'], array(net.res_gen[(net.gen.bus == current_bus_idx)][['p_mw', 'q_mvar']]).reshape((1, 2)), 0) else: pp_res['gen'] = append(pp_res['gen'], array(net.res_sgen[(net.sgen.bus == current_bus_idx)][['p_mw', 'q_mvar']])[already_used_gen.at[int(j)]].reshape((1, 2)), 0) already_used_gen.at[int(j)] += 1 change_q_compare += [int(j)] pp_res['gen'] = pp_res['gen'][1:, :] pp_res['branch'] = zeros([1, 4]) try: init1 = concat([net.line.from_bus, net.line.to_bus], axis=1, sort=True).drop_duplicates() init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1, sort=True).drop_duplicates() except TypeError: init1 = concat([net.line.from_bus, net.line.to_bus], axis=1).drop_duplicates() init2 = concat([net.trafo.hv_bus, net.trafo.lv_bus], axis=1).drop_duplicates() init1['hv_bus'] = nan init1['lv_bus'] = nan init2['from_bus'] = nan init2['to_bus'] = nan try: already_used_branches = concat([init1, init2], axis=0, sort=True) except TypeError: already_used_branches = concat([init1, init2], axis=0) already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int) BRANCHES = DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]]) for i in BRANCHES.index: from_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][(i, 0)])) to_bus = pp.get_element_index(net, 'bus', name=int(ppc_net['branch'][(i, 1)])) from_vn_kv = ppc_net['bus'][(from_bus, 9)] to_vn_kv = ppc_net['bus'][(to_bus, 9)] ratio = BRANCHES[2].at[i] angle = BRANCHES[3].at[i] if (((from_vn_kv == to_vn_kv) & ((ratio == 0) | (ratio == 1))) & (angle == 0)): pp_res['branch'] = append(pp_res['branch'], array(net.res_line[((net.line.from_bus == from_bus) & (net.line.to_bus == to_bus))][['p_from_mw', 'q_from_mvar', 'p_to_mw', 'q_to_mvar']])[int(already_used_branches.number.loc[((already_used_branches.from_bus == from_bus) & (already_used_branches.to_bus == to_bus))].values)].reshape(1, 4), 0) already_used_branches.number.loc[((already_used_branches.from_bus == from_bus) & (already_used_branches.to_bus == to_bus))] += 1 elif (from_vn_kv >= to_vn_kv): pp_res['branch'] = append(pp_res['branch'], array(net.res_trafo[((net.trafo.hv_bus == from_bus) & (net.trafo.lv_bus == to_bus))][['p_hv_mw', 'q_hv_mvar', 'p_lv_mw', 'q_lv_mvar']])[int(already_used_branches.number.loc[((already_used_branches.hv_bus == from_bus) & (already_used_branches.lv_bus == to_bus))].values)].reshape(1, 4), 0) already_used_branches.number.loc[((already_used_branches.hv_bus == from_bus) & (already_used_branches.lv_bus == to_bus))] += 1 else: pp_res['branch'] = append(pp_res['branch'], array(net.res_trafo[((net.trafo.hv_bus == to_bus) & (net.trafo.lv_bus == from_bus))][['p_lv_mw', 'q_lv_mvar', 'p_hv_mw', 'q_hv_mvar']])[int(already_used_branches.number.loc[((already_used_branches.hv_bus == to_bus) & (already_used_branches.lv_bus == from_bus))].values)].reshape(1, 4), 0) already_used_branches.number.loc[((already_used_branches.hv_bus == to_bus) & (already_used_branches.lv_bus == from_bus))] += 1 pp_res['branch'] = pp_res['branch'][1:, :] diff_res = dict.fromkeys(ppc_elms) diff_res['bus'] = (ppc_res['bus'] - pp_res['bus']) diff_res['bus'][:, 1] -= diff_res['bus'][(0, 1)] diff_res['branch'] = (ppc_res['branch'] - pp_res['branch']) diff_res['gen'] = (ppc_res['gen'] - pp_res['gen']) for i in GEN_uniq.loc[GEN_uniq[0].isin(change_q_compare)].index: next_is = GEN_uniq.index[(GEN_uniq.index > i)] if (len(next_is) > 0): next_i = next_is[0] else: next_i = (GENS.index[(- 1)] + 1) if ((next_i - i) > 1): diff_res['gen'][i:next_i, 1] = sum(diff_res['gen'][i:next_i, 1]) logger.debug(('Maximum voltage magnitude difference between pypower and pandapower: %.2e pu' % max_(abs(diff_res['bus'][:, 0])))) logger.debug(('Maximum voltage angle difference between pypower and pandapower: %.2e degree' % max_(abs(diff_res['bus'][:, 1])))) logger.debug(('Maximum branch flow active power difference between pypower and pandapower: %.2e MW' % max_(abs(diff_res['branch'][:, [0, 2]])))) logger.debug(('Maximum branch flow reactive power difference between pypower and pandapower: %.2e MVAr' % max_(abs(diff_res['branch'][:, [1, 3]])))) logger.debug(('Maximum active power generation difference between pypower and pandapower: %.2e MW' % max_(abs(diff_res['gen'][:, 0])))) logger.debug(('Maximum reactive power generation difference between pypower and pandapower: %.2e MVAr' % max_(abs(diff_res['gen'][:, 1])))) if (_validate_diff_res(diff_res, {'bus_vm_pu': 0.001, 'bus_va_degree': 0.001, 'branch_p_mw': 1e-06, 'branch_q_mvar': 1e-06}) and (max_(abs(diff_res['gen'])) > 0.1).any()): logger.debug('The active/reactive power generation difference possibly results because of a pypower error. Please validate the results via pypower loadflow.') if isinstance(max_diff_values, dict): return _validate_diff_res(diff_res, max_diff_values) else: logger.debug("'max_diff_values' must be a dict.")
def _get_zh_a_page_count() -> int: '\n 所有股票的总页数\n http://vip.stock.finance.sina.com.cn/mkt/#hs_a\n :return: 需要抓取的股票总页数\n :rtype: int\n ' res = requests.get(zh_sina_a_stock_count_url) page_count = (int(re.findall(re.compile('\\d+'), res.text)[0]) / 80) if isinstance(page_count, int): return page_count else: return (int(page_count) + 1)
5,514,657,700,420,927,000
所有股票的总页数 http://vip.stock.finance.sina.com.cn/mkt/#hs_a :return: 需要抓取的股票总页数 :rtype: int
akshare/stock/zh_stock_a_sina.py
_get_zh_a_page_count
fellowfun/akshare
python
def _get_zh_a_page_count() -> int: '\n 所有股票的总页数\n http://vip.stock.finance.sina.com.cn/mkt/#hs_a\n :return: 需要抓取的股票总页数\n :rtype: int\n ' res = requests.get(zh_sina_a_stock_count_url) page_count = (int(re.findall(re.compile('\\d+'), res.text)[0]) / 80) if isinstance(page_count, int): return page_count else: return (int(page_count) + 1)
def stock_zh_a_spot() -> pd.DataFrame: '\n 从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP\n http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk\n :return: pandas.DataFrame\n symbol code name trade pricechange changepercent buy 0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920\n 1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110\n 2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410\n 3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240\n 4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310\n ... ... ... ... ... ... ...\n 3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270\n 3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180\n 3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540\n 3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540\n 3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200\n sell settlement open high low volume amount 0 12.930 12.950 12.950 13.100 12.860 46023920 597016896\n 1 18.120 18.480 18.510 18.510 17.880 24175071 437419344\n 2 4.420 4.440 4.490 4.490 4.410 4304900 19130233\n 3 17.280 17.600 17.670 17.670 17.220 684801 11879731\n 4 3.320 3.350 3.360 3.360 3.300 8284294 27579688\n ... ... ... ... ... ... ...\n 3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172\n 3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669\n 3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901\n 3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997\n 3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447\n ticktime per pb mktcap nmc turnoverratio\n 0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376\n 1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826\n 2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525\n 3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798\n 4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185\n ... ... ... ... ... ...\n 3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386\n 3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268\n 3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323\n 3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167\n 3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782\n ' big_df = pd.DataFrame() page_count = _get_zh_a_page_count() zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy() for page in tqdm(range(1, (page_count + 1)), desc='Please wait for a moment'): zh_sina_stock_payload_copy.update({'page': page}) r = requests.get(zh_sina_a_stock_url, params=zh_sina_stock_payload_copy) data_json = demjson.decode(r.text) big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True) return big_df
-3,537,146,474,981,795,300
从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk :return: pandas.DataFrame symbol code name trade pricechange changepercent buy 0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920 1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110 2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410 3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240 4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310 ... ... ... ... ... ... ... 3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270 3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180 3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540 3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540 3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200 sell settlement open high low volume amount 0 12.930 12.950 12.950 13.100 12.860 46023920 597016896 1 18.120 18.480 18.510 18.510 17.880 24175071 437419344 2 4.420 4.440 4.490 4.490 4.410 4304900 19130233 3 17.280 17.600 17.670 17.670 17.220 684801 11879731 4 3.320 3.350 3.360 3.360 3.300 8284294 27579688 ... ... ... ... ... ... ... 3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172 3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669 3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901 3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997 3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447 ticktime per pb mktcap nmc turnoverratio 0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376 1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826 2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525 3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798 4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185 ... ... ... ... ... ... 3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386 3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268 3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323 3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167 3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782
akshare/stock/zh_stock_a_sina.py
stock_zh_a_spot
fellowfun/akshare
python
def stock_zh_a_spot() -> pd.DataFrame: '\n 从新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP\n http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk\n :return: pandas.DataFrame\n symbol code name trade pricechange changepercent buy 0 sh600000 600000 浦发银行 12.920 -0.030 -0.232 12.920\n 1 sh600004 600004 白云机场 18.110 -0.370 -2.002 18.110\n 2 sh600006 600006 东风汽车 4.410 -0.030 -0.676 4.410\n 3 sh600007 600007 中国国贸 17.240 -0.360 -2.045 17.240\n 4 sh600008 600008 首创股份 3.320 -0.030 -0.896 3.310\n ... ... ... ... ... ... ...\n 3755 sh600096 600096 云天化 5.270 -0.220 -4.007 5.270\n 3756 sh600097 600097 开创国际 10.180 -0.120 -1.165 10.180\n 3757 sh600098 600098 广州发展 6.550 -0.040 -0.607 6.540\n 3758 sh600099 600099 林海股份 6.540 -0.150 -2.242 6.540\n 3759 sh600100 600100 同方股份 8.200 -0.100 -1.205 8.200\n sell settlement open high low volume amount 0 12.930 12.950 12.950 13.100 12.860 46023920 597016896\n 1 18.120 18.480 18.510 18.510 17.880 24175071 437419344\n 2 4.420 4.440 4.490 4.490 4.410 4304900 19130233\n 3 17.280 17.600 17.670 17.670 17.220 684801 11879731\n 4 3.320 3.350 3.360 3.360 3.300 8284294 27579688\n ... ... ... ... ... ... ...\n 3755 5.280 5.490 5.490 5.500 5.220 16964636 90595172\n 3756 10.190 10.300 10.220 10.340 10.090 1001676 10231669\n 3757 6.550 6.590 6.560 6.620 6.500 1996449 13098901\n 3758 6.580 6.690 6.650 6.680 6.530 1866180 12314997\n 3759 8.210 8.300 8.300 8.310 8.120 12087236 99281447\n ticktime per pb mktcap nmc turnoverratio\n 0 15:00:00 6.984 0.790 3.792289e+07 3.631006e+07 0.16376\n 1 15:00:07 32.927 2.365 3.747539e+06 3.747539e+06 1.16826\n 2 15:00:02 15.926 1.207 8.820000e+05 8.820000e+05 0.21525\n 3 15:00:02 22.390 2.367 1.736555e+06 1.736555e+06 0.06798\n 4 15:00:07 22.912 1.730 1.887569e+06 1.600444e+06 0.17185\n ... ... ... ... ... ...\n 3755 15:00:00 56.728 1.566 7.523847e+05 6.963668e+05 1.28386\n 3756 15:00:00 17.552 1.434 2.452734e+05 2.303459e+05 0.44268\n 3757 15:00:00 25.476 1.059 1.785659e+06 1.785659e+06 0.07323\n 3758 15:00:00 540.496 3.023 1.433045e+05 1.433045e+05 0.85167\n 3759 15:00:07 -6.264 1.465 2.430397e+06 2.430397e+06 0.40782\n ' big_df = pd.DataFrame() page_count = _get_zh_a_page_count() zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy() for page in tqdm(range(1, (page_count + 1)), desc='Please wait for a moment'): zh_sina_stock_payload_copy.update({'page': page}) r = requests.get(zh_sina_a_stock_url, params=zh_sina_stock_payload_copy) data_json = demjson.decode(r.text) big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True) return big_df
def stock_zh_a_daily(symbol: str='sz000613', adjust: str='qfq') -> pd.DataFrame: '\n 新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP\n :param symbol: sh600000\n :type symbol: str\n :param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子\n :type adjust: str\n :return: specific data\n :rtype: pandas.DataFrame\n ' res = requests.get(zh_sina_a_stock_hist_url.format(symbol)) js_code = execjs.compile(hk_js_decode) dict_list = js_code.call('d', res.text.split('=')[1].split(';')[0].replace('"', '')) data_df = pd.DataFrame(dict_list) data_df['date'] = data_df['date'].str.split('T', expand=True).iloc[:, 0] data_df.index = pd.to_datetime(data_df['date']) del data_df['date'] data_df = data_df.astype('float') r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol)) amount_data_json = demjson.decode(r.text[r.text.find('['):(r.text.rfind(']') + 1)]) amount_data_df = pd.DataFrame(amount_data_json) amount_data_df.index = pd.to_datetime(amount_data_df.date) del amount_data_df['date'] temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how='left') temp_df.fillna(method='ffill', inplace=True) temp_df = temp_df.astype(float) temp_df['amount'] = (temp_df['amount'] * 10000) temp_df['turnover'] = (temp_df['volume'] / temp_df['amount']) temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover'] if (adjust == ''): return temp_df if (adjust == 'hfq'): res = requests.get(zh_sina_a_stock_hfq_url.format(symbol)) hfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) hfq_factor_df.columns = ['date', 'hfq_factor'] hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date) del hfq_factor_df['date'] temp_df = pd.merge(temp_df, hfq_factor_df, left_index=True, right_index=True, how='left') temp_df.fillna(method='ffill', inplace=True) temp_df = temp_df.astype(float) temp_df['open'] = (temp_df['open'] * temp_df['hfq_factor']) temp_df['high'] = (temp_df['high'] * temp_df['hfq_factor']) temp_df['close'] = (temp_df['close'] * temp_df['hfq_factor']) temp_df['low'] = (temp_df['low'] * temp_df['hfq_factor']) return temp_df.iloc[:, :(- 1)] if (adjust == 'qfq'): res = requests.get(zh_sina_a_stock_qfq_url.format(symbol)) qfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) qfq_factor_df.columns = ['date', 'qfq_factor'] qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date) del qfq_factor_df['date'] temp_df = pd.merge(temp_df, qfq_factor_df, left_index=True, right_index=True, how='left') temp_df.fillna(method='ffill', inplace=True) temp_df = temp_df.astype(float) temp_df['open'] = (temp_df['open'] / temp_df['qfq_factor']) temp_df['high'] = (temp_df['high'] / temp_df['qfq_factor']) temp_df['close'] = (temp_df['close'] / temp_df['qfq_factor']) temp_df['low'] = (temp_df['low'] / temp_df['qfq_factor']) return temp_df.iloc[:, :(- 1)] if (adjust == 'hfq-factor'): res = requests.get(zh_sina_a_stock_hfq_url.format(symbol)) hfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) hfq_factor_df.columns = ['date', 'hfq_factor'] hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date) del hfq_factor_df['date'] return hfq_factor_df if (adjust == 'qfq-factor'): res = requests.get(zh_sina_a_stock_qfq_url.format(symbol)) qfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) qfq_factor_df.columns = ['date', 'qfq_factor'] qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date) del qfq_factor_df['date'] return qfq_factor_df
1,219,581,013,612,928,500
新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP :param symbol: sh600000 :type symbol: str :param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子 :type adjust: str :return: specific data :rtype: pandas.DataFrame
akshare/stock/zh_stock_a_sina.py
stock_zh_a_daily
fellowfun/akshare
python
def stock_zh_a_daily(symbol: str='sz000613', adjust: str='qfq') -> pd.DataFrame: '\n 新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP\n :param symbol: sh600000\n :type symbol: str\n :param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子\n :type adjust: str\n :return: specific data\n :rtype: pandas.DataFrame\n ' res = requests.get(zh_sina_a_stock_hist_url.format(symbol)) js_code = execjs.compile(hk_js_decode) dict_list = js_code.call('d', res.text.split('=')[1].split(';')[0].replace('"', )) data_df = pd.DataFrame(dict_list) data_df['date'] = data_df['date'].str.split('T', expand=True).iloc[:, 0] data_df.index = pd.to_datetime(data_df['date']) del data_df['date'] data_df = data_df.astype('float') r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol)) amount_data_json = demjson.decode(r.text[r.text.find('['):(r.text.rfind(']') + 1)]) amount_data_df = pd.DataFrame(amount_data_json) amount_data_df.index = pd.to_datetime(amount_data_df.date) del amount_data_df['date'] temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how='left') temp_df.fillna(method='ffill', inplace=True) temp_df = temp_df.astype(float) temp_df['amount'] = (temp_df['amount'] * 10000) temp_df['turnover'] = (temp_df['volume'] / temp_df['amount']) temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover'] if (adjust == ): return temp_df if (adjust == 'hfq'): res = requests.get(zh_sina_a_stock_hfq_url.format(symbol)) hfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) hfq_factor_df.columns = ['date', 'hfq_factor'] hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date) del hfq_factor_df['date'] temp_df = pd.merge(temp_df, hfq_factor_df, left_index=True, right_index=True, how='left') temp_df.fillna(method='ffill', inplace=True) temp_df = temp_df.astype(float) temp_df['open'] = (temp_df['open'] * temp_df['hfq_factor']) temp_df['high'] = (temp_df['high'] * temp_df['hfq_factor']) temp_df['close'] = (temp_df['close'] * temp_df['hfq_factor']) temp_df['low'] = (temp_df['low'] * temp_df['hfq_factor']) return temp_df.iloc[:, :(- 1)] if (adjust == 'qfq'): res = requests.get(zh_sina_a_stock_qfq_url.format(symbol)) qfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) qfq_factor_df.columns = ['date', 'qfq_factor'] qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date) del qfq_factor_df['date'] temp_df = pd.merge(temp_df, qfq_factor_df, left_index=True, right_index=True, how='left') temp_df.fillna(method='ffill', inplace=True) temp_df = temp_df.astype(float) temp_df['open'] = (temp_df['open'] / temp_df['qfq_factor']) temp_df['high'] = (temp_df['high'] / temp_df['qfq_factor']) temp_df['close'] = (temp_df['close'] / temp_df['qfq_factor']) temp_df['low'] = (temp_df['low'] / temp_df['qfq_factor']) return temp_df.iloc[:, :(- 1)] if (adjust == 'hfq-factor'): res = requests.get(zh_sina_a_stock_hfq_url.format(symbol)) hfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) hfq_factor_df.columns = ['date', 'hfq_factor'] hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date) del hfq_factor_df['date'] return hfq_factor_df if (adjust == 'qfq-factor'): res = requests.get(zh_sina_a_stock_qfq_url.format(symbol)) qfq_factor_df = pd.DataFrame(eval(res.text.split('=')[1].split('\n')[0])['data']) qfq_factor_df.columns = ['date', 'qfq_factor'] qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date) del qfq_factor_df['date'] return qfq_factor_df
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None) -> None: 'Old way of setting up HomematicIP Cloud lights.' pass
-2,221,725,257,671,890,000
Old way of setting up HomematicIP Cloud lights.
homeassistant/components/homematicip_cloud/light.py
async_setup_platform
0x00-0xFF/home-assistant
python
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None) -> None: pass
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities) -> None: 'Set up the HomematicIP Cloud lights from a config entry.' hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]] entities = [] for device in hap.home.devices: if isinstance(device, AsyncBrandSwitchMeasuring): entities.append(HomematicipLightMeasuring(hap, device)) elif isinstance(device, AsyncBrandSwitchNotificationLight): entities.append(HomematicipLight(hap, device)) entities.append(HomematicipNotificationLight(hap, device, device.topLightChannelIndex)) entities.append(HomematicipNotificationLight(hap, device, device.bottomLightChannelIndex)) elif isinstance(device, (AsyncDimmer, AsyncPluggableDimmer, AsyncBrandDimmer, AsyncFullFlushDimmer)): entities.append(HomematicipDimmer(hap, device)) if entities: async_add_entities(entities)
481,496,749,042,861,000
Set up the HomematicIP Cloud lights from a config entry.
homeassistant/components/homematicip_cloud/light.py
async_setup_entry
0x00-0xFF/home-assistant
python
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities) -> None: hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]] entities = [] for device in hap.home.devices: if isinstance(device, AsyncBrandSwitchMeasuring): entities.append(HomematicipLightMeasuring(hap, device)) elif isinstance(device, AsyncBrandSwitchNotificationLight): entities.append(HomematicipLight(hap, device)) entities.append(HomematicipNotificationLight(hap, device, device.topLightChannelIndex)) entities.append(HomematicipNotificationLight(hap, device, device.bottomLightChannelIndex)) elif isinstance(device, (AsyncDimmer, AsyncPluggableDimmer, AsyncBrandDimmer, AsyncFullFlushDimmer)): entities.append(HomematicipDimmer(hap, device)) if entities: async_add_entities(entities)
def _convert_color(color: tuple) -> RGBColorState: '\n Convert the given color to the reduced RGBColorState color.\n\n RGBColorStat contains only 8 colors including white and black,\n so a conversion is required.\n ' if (color is None): return RGBColorState.WHITE hue = int(color[0]) saturation = int(color[1]) if (saturation < 5): return RGBColorState.WHITE if (30 < hue <= 90): return RGBColorState.YELLOW if (90 < hue <= 160): return RGBColorState.GREEN if (150 < hue <= 210): return RGBColorState.TURQUOISE if (210 < hue <= 270): return RGBColorState.BLUE if (270 < hue <= 330): return RGBColorState.PURPLE return RGBColorState.RED
3,999,648,746,070,601,000
Convert the given color to the reduced RGBColorState color. RGBColorStat contains only 8 colors including white and black, so a conversion is required.
homeassistant/components/homematicip_cloud/light.py
_convert_color
0x00-0xFF/home-assistant
python
def _convert_color(color: tuple) -> RGBColorState: '\n Convert the given color to the reduced RGBColorState color.\n\n RGBColorStat contains only 8 colors including white and black,\n so a conversion is required.\n ' if (color is None): return RGBColorState.WHITE hue = int(color[0]) saturation = int(color[1]) if (saturation < 5): return RGBColorState.WHITE if (30 < hue <= 90): return RGBColorState.YELLOW if (90 < hue <= 160): return RGBColorState.GREEN if (150 < hue <= 210): return RGBColorState.TURQUOISE if (210 < hue <= 270): return RGBColorState.BLUE if (270 < hue <= 330): return RGBColorState.PURPLE return RGBColorState.RED
def __init__(self, hap: HomematicipHAP, device) -> None: 'Initialize the light device.' super().__init__(hap, device)
4,148,022,420,929,488,400
Initialize the light device.
homeassistant/components/homematicip_cloud/light.py
__init__
0x00-0xFF/home-assistant
python
def __init__(self, hap: HomematicipHAP, device) -> None: super().__init__(hap, device)
@property def is_on(self) -> bool: 'Return true if device is on.' return self._device.on
-2,283,132,927,271,933,000
Return true if device is on.
homeassistant/components/homematicip_cloud/light.py
is_on
0x00-0xFF/home-assistant
python
@property def is_on(self) -> bool: return self._device.on
async def async_turn_on(self, **kwargs) -> None: 'Turn the device on.' (await self._device.turn_on())
2,166,206,960,677,107,000
Turn the device on.
homeassistant/components/homematicip_cloud/light.py
async_turn_on
0x00-0xFF/home-assistant
python
async def async_turn_on(self, **kwargs) -> None: (await self._device.turn_on())
async def async_turn_off(self, **kwargs) -> None: 'Turn the device off.' (await self._device.turn_off())
155,385,039,799,394,780
Turn the device off.
homeassistant/components/homematicip_cloud/light.py
async_turn_off
0x00-0xFF/home-assistant
python
async def async_turn_off(self, **kwargs) -> None: (await self._device.turn_off())
@property def device_state_attributes(self) -> Dict[(str, Any)]: 'Return the state attributes of the generic device.' state_attr = super().device_state_attributes current_power_w = self._device.currentPowerConsumption if (current_power_w > 0.05): state_attr[ATTR_CURRENT_POWER_W] = round(current_power_w, 2) state_attr[ATTR_TODAY_ENERGY_KWH] = round(self._device.energyCounter, 2) return state_attr
-3,098,059,166,993,918,000
Return the state attributes of the generic device.
homeassistant/components/homematicip_cloud/light.py
device_state_attributes
0x00-0xFF/home-assistant
python
@property def device_state_attributes(self) -> Dict[(str, Any)]: state_attr = super().device_state_attributes current_power_w = self._device.currentPowerConsumption if (current_power_w > 0.05): state_attr[ATTR_CURRENT_POWER_W] = round(current_power_w, 2) state_attr[ATTR_TODAY_ENERGY_KWH] = round(self._device.energyCounter, 2) return state_attr
def __init__(self, hap: HomematicipHAP, device) -> None: 'Initialize the dimmer light device.' super().__init__(hap, device)
4,226,430,284,465,216,000
Initialize the dimmer light device.
homeassistant/components/homematicip_cloud/light.py
__init__
0x00-0xFF/home-assistant
python
def __init__(self, hap: HomematicipHAP, device) -> None: super().__init__(hap, device)
@property def is_on(self) -> bool: 'Return true if device is on.' return ((self._device.dimLevel is not None) and (self._device.dimLevel > 0.0))
-6,862,420,167,665,377,000
Return true if device is on.
homeassistant/components/homematicip_cloud/light.py
is_on
0x00-0xFF/home-assistant
python
@property def is_on(self) -> bool: return ((self._device.dimLevel is not None) and (self._device.dimLevel > 0.0))
@property def brightness(self) -> int: 'Return the brightness of this light between 0..255.' return int(((self._device.dimLevel or 0.0) * 255))
4,879,828,942,923,381,000
Return the brightness of this light between 0..255.
homeassistant/components/homematicip_cloud/light.py
brightness
0x00-0xFF/home-assistant
python
@property def brightness(self) -> int: return int(((self._device.dimLevel or 0.0) * 255))
@property def supported_features(self) -> int: 'Flag supported features.' return SUPPORT_BRIGHTNESS
-7,275,260,559,451,487,000
Flag supported features.
homeassistant/components/homematicip_cloud/light.py
supported_features
0x00-0xFF/home-assistant
python
@property def supported_features(self) -> int: return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs) -> None: 'Turn the light on.' if (ATTR_BRIGHTNESS in kwargs): (await self._device.set_dim_level((kwargs[ATTR_BRIGHTNESS] / 255.0))) else: (await self._device.set_dim_level(1))
5,651,431,970,317,736,000
Turn the light on.
homeassistant/components/homematicip_cloud/light.py
async_turn_on
0x00-0xFF/home-assistant
python
async def async_turn_on(self, **kwargs) -> None: if (ATTR_BRIGHTNESS in kwargs): (await self._device.set_dim_level((kwargs[ATTR_BRIGHTNESS] / 255.0))) else: (await self._device.set_dim_level(1))
async def async_turn_off(self, **kwargs) -> None: 'Turn the light off.' (await self._device.set_dim_level(0))
904,547,101,540,762,200
Turn the light off.
homeassistant/components/homematicip_cloud/light.py
async_turn_off
0x00-0xFF/home-assistant
python
async def async_turn_off(self, **kwargs) -> None: (await self._device.set_dim_level(0))
def __init__(self, hap: HomematicipHAP, device, channel: int) -> None: 'Initialize the dimmer light device.' self.channel = channel if (self.channel == 2): super().__init__(hap, device, 'Top') else: super().__init__(hap, device, 'Bottom') self._color_switcher = {RGBColorState.WHITE: [0.0, 0.0], RGBColorState.RED: [0.0, 100.0], RGBColorState.YELLOW: [60.0, 100.0], RGBColorState.GREEN: [120.0, 100.0], RGBColorState.TURQUOISE: [180.0, 100.0], RGBColorState.BLUE: [240.0, 100.0], RGBColorState.PURPLE: [300.0, 100.0]}
-936,554,559,333,744,100
Initialize the dimmer light device.
homeassistant/components/homematicip_cloud/light.py
__init__
0x00-0xFF/home-assistant
python
def __init__(self, hap: HomematicipHAP, device, channel: int) -> None: self.channel = channel if (self.channel == 2): super().__init__(hap, device, 'Top') else: super().__init__(hap, device, 'Bottom') self._color_switcher = {RGBColorState.WHITE: [0.0, 0.0], RGBColorState.RED: [0.0, 100.0], RGBColorState.YELLOW: [60.0, 100.0], RGBColorState.GREEN: [120.0, 100.0], RGBColorState.TURQUOISE: [180.0, 100.0], RGBColorState.BLUE: [240.0, 100.0], RGBColorState.PURPLE: [300.0, 100.0]}
@property def is_on(self) -> bool: 'Return true if device is on.' return ((self._func_channel.dimLevel is not None) and (self._func_channel.dimLevel > 0.0))
-6,904,967,177,971,977,000
Return true if device is on.
homeassistant/components/homematicip_cloud/light.py
is_on
0x00-0xFF/home-assistant
python
@property def is_on(self) -> bool: return ((self._func_channel.dimLevel is not None) and (self._func_channel.dimLevel > 0.0))
@property def brightness(self) -> int: 'Return the brightness of this light between 0..255.' return int(((self._func_channel.dimLevel or 0.0) * 255))
-5,342,752,628,957,432,000
Return the brightness of this light between 0..255.
homeassistant/components/homematicip_cloud/light.py
brightness
0x00-0xFF/home-assistant
python
@property def brightness(self) -> int: return int(((self._func_channel.dimLevel or 0.0) * 255))
@property def hs_color(self) -> tuple: 'Return the hue and saturation color value [float, float].' simple_rgb_color = self._func_channel.simpleRGBColorState return self._color_switcher.get(simple_rgb_color, [0.0, 0.0])
6,329,802,148,743,832,000
Return the hue and saturation color value [float, float].
homeassistant/components/homematicip_cloud/light.py
hs_color
0x00-0xFF/home-assistant
python
@property def hs_color(self) -> tuple: simple_rgb_color = self._func_channel.simpleRGBColorState return self._color_switcher.get(simple_rgb_color, [0.0, 0.0])
@property def device_state_attributes(self) -> Dict[(str, Any)]: 'Return the state attributes of the generic device.' state_attr = super().device_state_attributes if self.is_on: state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState return state_attr
-7,103,013,381,797,680,000
Return the state attributes of the generic device.
homeassistant/components/homematicip_cloud/light.py
device_state_attributes
0x00-0xFF/home-assistant
python
@property def device_state_attributes(self) -> Dict[(str, Any)]: state_attr = super().device_state_attributes if self.is_on: state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState return state_attr
@property def name(self) -> str: 'Return the name of the generic device.' return f'{super().name} Notification'
9,124,239,975,491,450,000
Return the name of the generic device.
homeassistant/components/homematicip_cloud/light.py
name
0x00-0xFF/home-assistant
python
@property def name(self) -> str: return f'{super().name} Notification'
@property def supported_features(self) -> int: 'Flag supported features.' return (SUPPORT_BRIGHTNESS | SUPPORT_COLOR)
8,128,663,612,521,723,000
Flag supported features.
homeassistant/components/homematicip_cloud/light.py
supported_features
0x00-0xFF/home-assistant
python
@property def supported_features(self) -> int: return (SUPPORT_BRIGHTNESS | SUPPORT_COLOR)
@property def unique_id(self) -> str: 'Return a unique ID.' return f'{self.__class__.__name__}_{self.post}_{self._device.id}'
-2,511,959,092,211,002,000
Return a unique ID.
homeassistant/components/homematicip_cloud/light.py
unique_id
0x00-0xFF/home-assistant
python
@property def unique_id(self) -> str: return f'{self.__class__.__name__}_{self.post}_{self._device.id}'
async def async_turn_on(self, **kwargs) -> None: 'Turn the light on.' hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color) simple_rgb_color = _convert_color(hs_color) brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness) if (not kwargs): brightness = 255 brightness = max(10, brightness) dim_level = (brightness / 255.0) transition = kwargs.get(ATTR_TRANSITION, 0.5) (await self._device.set_rgb_dim_level_with_time(channelIndex=self.channel, rgb=simple_rgb_color, dimLevel=dim_level, onTime=0, rampTime=transition))
-8,156,840,869,278,348,000
Turn the light on.
homeassistant/components/homematicip_cloud/light.py
async_turn_on
0x00-0xFF/home-assistant
python
async def async_turn_on(self, **kwargs) -> None: hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color) simple_rgb_color = _convert_color(hs_color) brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness) if (not kwargs): brightness = 255 brightness = max(10, brightness) dim_level = (brightness / 255.0) transition = kwargs.get(ATTR_TRANSITION, 0.5) (await self._device.set_rgb_dim_level_with_time(channelIndex=self.channel, rgb=simple_rgb_color, dimLevel=dim_level, onTime=0, rampTime=transition))
async def async_turn_off(self, **kwargs) -> None: 'Turn the light off.' simple_rgb_color = self._func_channel.simpleRGBColorState transition = kwargs.get(ATTR_TRANSITION, 0.5) (await self._device.set_rgb_dim_level_with_time(channelIndex=self.channel, rgb=simple_rgb_color, dimLevel=0.0, onTime=0, rampTime=transition))
-6,279,083,896,082,220,000
Turn the light off.
homeassistant/components/homematicip_cloud/light.py
async_turn_off
0x00-0xFF/home-assistant
python
async def async_turn_off(self, **kwargs) -> None: simple_rgb_color = self._func_channel.simpleRGBColorState transition = kwargs.get(ATTR_TRANSITION, 0.5) (await self._device.set_rgb_dim_level_with_time(channelIndex=self.channel, rgb=simple_rgb_color, dimLevel=0.0, onTime=0, rampTime=transition))
@property def exists(self): '\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n ' if os.path.isfile(self.db_loc): log.info('database at %s, does EXIST', self.db_loc) return True else: log.info('databse at %s does NOT EXIST', self.db_loc) return False
1,824,685,546,315,325,000
checks if the db exist and logs it Returns ------- bool bool if the file exist or not
antipetros_discordbot/utility/gidsql/db_action_base.py
exists
official-antistasi-community/Antipetros_Discord_Bot
python
@property def exists(self): '\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n ' if os.path.isfile(self.db_loc): log.info('database at %s, does EXIST', self.db_loc) return True else: log.info('databse at %s does NOT EXIST', self.db_loc) return False
@property def exists(self): '\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n ' if os.path.isfile(self.db_loc): log.info('database at %s, does EXIST', self.db_loc) return True else: log.info('databse at %s does NOT EXIST', self.db_loc) return False
1,824,685,546,315,325,000
checks if the db exist and logs it Returns ------- bool bool if the file exist or not
antipetros_discordbot/utility/gidsql/db_action_base.py
exists
official-antistasi-community/Antipetros_Discord_Bot
python
@property def exists(self): '\n checks if the db exist and logs it\n\n Returns\n -------\n bool\n bool if the file exist or not\n ' if os.path.isfile(self.db_loc): log.info('database at %s, does EXIST', self.db_loc) return True else: log.info('databse at %s does NOT EXIST', self.db_loc) return False
def discounted_reverse_cumsum(data, gamma: float): '\n Use a linear filter to compute the reverse discounted cumulative sum.\n\n .. note::\n `scipy.signal.lfilter` assumes an initialization with 0 by default.\n\n :param data: input data with samples along the 0 axis (e.g. time series)\n :param gamma: discount factor\n :return: cumulative sums for every step\n ' return signal.lfilter([1], [1, (- gamma)], data[::(- 1)], axis=0)[::(- 1)]
-5,288,915,096,824,507,000
Use a linear filter to compute the reverse discounted cumulative sum. .. note:: `scipy.signal.lfilter` assumes an initialization with 0 by default. :param data: input data with samples along the 0 axis (e.g. time series) :param gamma: discount factor :return: cumulative sums for every step
mushroom_rl/core/parallelization_tools/step_sequence.py
discounted_reverse_cumsum
nifunk/GNNMushroomRL
python
def discounted_reverse_cumsum(data, gamma: float): '\n Use a linear filter to compute the reverse discounted cumulative sum.\n\n .. note::\n `scipy.signal.lfilter` assumes an initialization with 0 by default.\n\n :param data: input data with samples along the 0 axis (e.g. time series)\n :param gamma: discount factor\n :return: cumulative sums for every step\n ' return signal.lfilter([1], [1, (- gamma)], data[::(- 1)], axis=0)[::(- 1)]
def discounted_value(rollout: StepSequence, gamma: float): '\n Compute the discounted state values for one rollout.\n\n :param rollout: input data\n :param gamma: temporal discount factor\n :return: state values for every time step in the rollout\n ' rewards = [step.reward for step in rollout] return discounted_reverse_cumsum(rewards, gamma)
3,926,704,981,727,231,500
Compute the discounted state values for one rollout. :param rollout: input data :param gamma: temporal discount factor :return: state values for every time step in the rollout
mushroom_rl/core/parallelization_tools/step_sequence.py
discounted_value
nifunk/GNNMushroomRL
python
def discounted_value(rollout: StepSequence, gamma: float): '\n Compute the discounted state values for one rollout.\n\n :param rollout: input data\n :param gamma: temporal discount factor\n :return: state values for every time step in the rollout\n ' rewards = [step.reward for step in rollout] return discounted_reverse_cumsum(rewards, gamma)
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str]='torch'): '\n Compute the discounted state values for multiple rollouts.\n\n :param rollouts: input data\n :param gamma: temporal discount factor\n :param data_format: data format of the given\n :return: state values for every time step in the rollouts (concatenated sequence across rollouts)\n ' if (data_format == 'torch'): return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts]) elif (data_format == 'numpy'): raise np.array([discounted_value(ro, gamma) for ro in rollouts]) else: raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
645,887,553,901,988,900
Compute the discounted state values for multiple rollouts. :param rollouts: input data :param gamma: temporal discount factor :param data_format: data format of the given :return: state values for every time step in the rollouts (concatenated sequence across rollouts)
mushroom_rl/core/parallelization_tools/step_sequence.py
discounted_values
nifunk/GNNMushroomRL
python
def discounted_values(rollouts: Sequence[StepSequence], gamma: float, data_format: Optional[str]='torch'): '\n Compute the discounted state values for multiple rollouts.\n\n :param rollouts: input data\n :param gamma: temporal discount factor\n :param data_format: data format of the given\n :return: state values for every time step in the rollouts (concatenated sequence across rollouts)\n ' if (data_format == 'torch'): return to.cat([to.from_numpy(discounted_value(ro, gamma).copy()).to(to.get_default_dtype()) for ro in rollouts]) elif (data_format == 'numpy'): raise np.array([discounted_value(ro, gamma) for ro in rollouts]) else: raise pyrado.ValueErr(given=data_format, eq_constraint="'torch' or 'numpy'")
def gae_returns(rollout: StepSequence, gamma: float=0.99, lamb: float=0.95): "\n Compute returns using generalized advantage estimation.\n\n .. seealso::\n [1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using\n Generalized Advantage Estimation', ICLR 2016\n\n :param rollout: sequence of steps\n :param gamma: temporal discount factor\n :param lamb: discount factor\n :return: estimated advantage\n " def _next_value(step: Step) -> float: ' Helper to return `next_value = 0` for last step ' if step.done: return 0.0 return step.next_value deltas = [((step.reward + (gamma * _next_value(step))) - step.value) for step in rollout] cumsum = discounted_reverse_cumsum(deltas, (gamma * lamb)) return cumsum
4,842,705,186,051,923,000
Compute returns using generalized advantage estimation. .. seealso:: [1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using Generalized Advantage Estimation', ICLR 2016 :param rollout: sequence of steps :param gamma: temporal discount factor :param lamb: discount factor :return: estimated advantage
mushroom_rl/core/parallelization_tools/step_sequence.py
gae_returns
nifunk/GNNMushroomRL
python
def gae_returns(rollout: StepSequence, gamma: float=0.99, lamb: float=0.95): "\n Compute returns using generalized advantage estimation.\n\n .. seealso::\n [1] J. Schulmann, P. Moritz, S. Levine, M. Jordan, P. Abbeel, 'High-Dimensional Continuous Control Using\n Generalized Advantage Estimation', ICLR 2016\n\n :param rollout: sequence of steps\n :param gamma: temporal discount factor\n :param lamb: discount factor\n :return: estimated advantage\n " def _next_value(step: Step) -> float: ' Helper to return `next_value = 0` for last step ' if step.done: return 0.0 return step.next_value deltas = [((step.reward + (gamma * _next_value(step))) - step.value) for step in rollout] cumsum = discounted_reverse_cumsum(deltas, (gamma * lamb)) return cumsum
def __init__(self, rollout, index): '\n Constructor\n\n :param rollout: `StepSequence` object to which this step belongs\n :param index: index of this step in the rollout\n ' super(Step, self).__init__(rollout.__dict__, index) self._rollout = rollout
-7,175,570,219,185,015,000
Constructor :param rollout: `StepSequence` object to which this step belongs :param index: index of this step in the rollout
mushroom_rl/core/parallelization_tools/step_sequence.py
__init__
nifunk/GNNMushroomRL
python
def __init__(self, rollout, index): '\n Constructor\n\n :param rollout: `StepSequence` object to which this step belongs\n :param index: index of this step in the rollout\n ' super(Step, self).__init__(rollout.__dict__, index) self._rollout = rollout
def __init__(self, *, complete: Optional[bool]=True, rollout_info=None, data_format: Optional[str]=None, done: Optional[np.ndarray]=None, continuous: Optional[bool]=True, rollout_bounds=None, rewards: Sequence, observations: Sequence, actions: Sequence, **data): "\n Constructor\n\n :param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch\n :param rollout_info: data staying constant through the whole episode\n :param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.\n Will use Tensors if any data argument does, else ndarrays\n :param done: boolean ndarray, specifying for each step whether it led to termination.\n The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.\n :param continuous: true if the steps form one continuous sequence.\n :param rewards: sequence of reward values, determines sequence length\n :param observations: sequence of observation values, the length must be `len(rewards) + 1`\n :param actions: sequence of action values, the length must be `len(rewards)`\n :param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`\n " self.length = len(rewards) if (self.length == 0): raise pyrado.ShapeErr(msg='StepSequence cannot be empty!') self.rollout_info = rollout_info self.continuous = continuous if (data_format is None): for value in data.values(): if (isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor))): data_format = 'torch' break else: data_format = 'numpy' self._data_format = data_format missing_fields = (StepSequence.required_fields - data.keys()) if missing_fields: raise ValueError(f'Missing required data fields: {missing_fields}') self._data_names = [] self.add_data('rewards', rewards) self.add_data('observations', observations) self.add_data('actions', actions) for (name, value) in data.items(): self.add_data(name, value) if (done is None): done = np.zeros(self.length, dtype=np.bool) if (complete and continuous): done[(- 1)] = True else: done = np.asarray(done, dtype=np.bool) assert (done.shape[0] == self.length) self.done = done if continuous: if (rollout_bounds is None): rollout_bounds = [0] rollout_bounds.extend((np.flatnonzero(done) + 1)) if (not done[(- 1)]): rollout_bounds.append(self.length) else: for i in range((len(rollout_bounds) - 1)): assert (rollout_bounds[i] < rollout_bounds[(i + 1)]) assert (rollout_bounds[0] == 0) assert (rollout_bounds[(- 1)] == self.length) self._rollout_bounds = np.array(rollout_bounds) else: self._rollout_bounds = None
-5,813,278,499,522,838,000
Constructor :param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch :param rollout_info: data staying constant through the whole episode :param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays. Will use Tensors if any data argument does, else ndarrays :param done: boolean ndarray, specifying for each step whether it led to termination. The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`. :param continuous: true if the steps form one continuous sequence. :param rewards: sequence of reward values, determines sequence length :param observations: sequence of observation values, the length must be `len(rewards) + 1` :param actions: sequence of action values, the length must be `len(rewards)` :param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`
mushroom_rl/core/parallelization_tools/step_sequence.py
__init__
nifunk/GNNMushroomRL
python
def __init__(self, *, complete: Optional[bool]=True, rollout_info=None, data_format: Optional[str]=None, done: Optional[np.ndarray]=None, continuous: Optional[bool]=True, rollout_bounds=None, rewards: Sequence, observations: Sequence, actions: Sequence, **data): "\n Constructor\n\n :param complete: `False` if the rollout is incomplete, i.e. as part of a mini-batch\n :param rollout_info: data staying constant through the whole episode\n :param data_format: 'torch' to use Tensors, 'numpy' to use ndarrays.\n Will use Tensors if any data argument does, else ndarrays\n :param done: boolean ndarray, specifying for each step whether it led to termination.\n The last step of continuous rollouts, i.e. not mini-batches, is done if `complete` is `True`.\n :param continuous: true if the steps form one continuous sequence.\n :param rewards: sequence of reward values, determines sequence length\n :param observations: sequence of observation values, the length must be `len(rewards) + 1`\n :param actions: sequence of action values, the length must be `len(rewards)`\n :param data: additional data lists, their length must be `len(rewards)` or `len(rewards) + 1`\n " self.length = len(rewards) if (self.length == 0): raise pyrado.ShapeErr(msg='StepSequence cannot be empty!') self.rollout_info = rollout_info self.continuous = continuous if (data_format is None): for value in data.values(): if (isinstance(value, to.Tensor) or (isinstance(value, list) and isinstance(value[0], to.Tensor))): data_format = 'torch' break else: data_format = 'numpy' self._data_format = data_format missing_fields = (StepSequence.required_fields - data.keys()) if missing_fields: raise ValueError(f'Missing required data fields: {missing_fields}') self._data_names = [] self.add_data('rewards', rewards) self.add_data('observations', observations) self.add_data('actions', actions) for (name, value) in data.items(): self.add_data(name, value) if (done is None): done = np.zeros(self.length, dtype=np.bool) if (complete and continuous): done[(- 1)] = True else: done = np.asarray(done, dtype=np.bool) assert (done.shape[0] == self.length) self.done = done if continuous: if (rollout_bounds is None): rollout_bounds = [0] rollout_bounds.extend((np.flatnonzero(done) + 1)) if (not done[(- 1)]): rollout_bounds.append(self.length) else: for i in range((len(rollout_bounds) - 1)): assert (rollout_bounds[i] < rollout_bounds[(i + 1)]) assert (rollout_bounds[0] == 0) assert (rollout_bounds[(- 1)] == self.length) self._rollout_bounds = np.array(rollout_bounds) else: self._rollout_bounds = None
@property def data_format(self) -> str: " Get the name of data format ('torch' or 'numpy'). " return self._data_format
-3,737,586,975,972,980,700
Get the name of data format ('torch' or 'numpy').
mushroom_rl/core/parallelization_tools/step_sequence.py
data_format
nifunk/GNNMushroomRL
python
@property def data_format(self) -> str: " " return self._data_format
@property def data_names(self) -> Sequence[str]: ' Get the list of data attribute names. ' return self._data_names
7,636,364,652,369,576,000
Get the list of data attribute names.
mushroom_rl/core/parallelization_tools/step_sequence.py
data_names
nifunk/GNNMushroomRL
python
@property def data_names(self) -> Sequence[str]: ' ' return self._data_names
@property def rollout_count(self): ' Count the number of sub-rollouts inside this step sequence. ' if (not self.continuous): raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.') return (len(self._rollout_bounds) - 1)
-8,265,467,451,147,833,000
Count the number of sub-rollouts inside this step sequence.
mushroom_rl/core/parallelization_tools/step_sequence.py
rollout_count
nifunk/GNNMushroomRL
python
@property def rollout_count(self): ' ' if (not self.continuous): raise pyrado.ValueErr(msg='Sub-rollouts are only supported on continuous data.') return (len(self._rollout_bounds) - 1)