query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Prepare a list of all records from the payload to send to SQS
def _payload_messages(payloads): return [ message for payload in payloads for message in payload.sqs_messages ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_records(data: List[str]) -> List[dict]:\n records = []\n for d in data:\n records.append(create_record(d))\n\n logger.debug(f\"Formed Kinesis Records batch for PutRecords API: {records}\")\n return records", "def get_events_batch() -> PayloadDictList:\n ...", "def _prepare_payload(self):\n\n requests_json = []\n for qry in self._current_query.queries:\n request = qry.build_request()\n requests_json.append(self._serialize_request(request, len(requests_json)))\n\n return {\"requests\": requests_json}", "def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch", "def send(self, payloads):\n records = self._payload_messages(payloads)\n\n # SQS only supports up to 10 messages so do the send in batches\n for message_batch in self._message_batches(records):\n response = self._send_messages(message_batch)\n self._finalize(response, message_batch)", "def generate_payload(self):\n payload = []\n for i in range(self.elements_per_update):\n payload.append({\n \"uuid\": str(uuid.uuid4()),\n \"symbol\": self.symbols[i % len(self.symbols)],\n \"price\": random.randint(self.min_price, self.max_price)\n })\n return payload", "def process_data():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n input_file = urllib.unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key']).encode('utf-8')\n s3.download_file(input_bucket_name, input_file, input_file)\n output_file = os.path.join(output_dir, os.path.splitext(input_file)[0]+'.csv')\n parse_patient_data(input_file, output_file)\n upload_data(output_file)\n cleanup_files(input_file, output_file)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()", "def lambda_handler(event, context):\n print('Received request')\n item = None\n\n mysql_host = '54.212.197.235'\n mysql_username = 'rts'\n mysql_password = 'SamWangRamsay520-S'\n mysql_dbname = 'rts_kinesis'\n mysql_tablename = 'benchmark_kinesis'\n\n print('Start connection')\n conn = mysql.connector.connect(host=mysql_host,\n user=mysql_username,\n passwd=mysql_password,\n db=mysql_dbname )\n print('End connection')\n '''Write the message to the mysql database'''\n cur = conn.cursor()\n\n #dynamo_db = boto3.resource('dynamodb')\n #table = dynamo_db.Table('benchmark_kinesis')\n _mysql_buffer = [] #ad-hoc message buffering for mysql, equivalent to dynamodb batch-write behavior\n _mysql_buffer_limit = 25\n records = [record for record in event['Records']]\n new_records = deaggregate_records(records)\n #decoded_record_data = [record['kinesis']['data'] for record in new_records]\n #deserialized_data = [decoded_record for decoded_record in records]\n #for data in decoded_record_data:\n for record in new_records:\n\t#d_record = \"%.15g\" % record['kinesis']['partitionKey']\n\t#con_time = \"%.15g\" % time.time()\n\tcreation_time = Decimal(record['kinesis']['partitionKey'])\n\tconsumer_time = Decimal(time.time())\n\tvalue = record['kinesis']['data']\n\t#cur.execute('INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)', (creation_time, consumer_time, value))\n sql = 'INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)'\n _mysql_buffer.append((creation_time, consumer_time, value))\n if len(_mysql_buffer) > _mysql_buffer_limit:\n cur.executemany(sql, _mysql_buffer)\n _mysql_buffer = []\n\t# Add a processed time so we have a rough idea how far behind we are\n #item['processed'] = datetime.datetime.utcnow().isoformat()\n\n conn.commit()\n conn.close()\n cur.close()\n # Print the last item to make it easy to see how we're doing\n #print(json.dumps(item))\n print('Number of records: {}'.format(str(len(new_records))))", "def test_serialize(self):\r\n self.service.listBatchSubscribe(3, [{'EMAIL': '[email protected]'},\r\n {'EMAIL': '[email protected]'}])\r\n self.expect('POST', '/?method=listBatchSubscribe',\r\n (('id', '3'), ('batch[0][EMAIL]', '[email protected]'),\r\n ('batch[1][EMAIL]', '[email protected]'), ('output', 'json'),\r\n ('double_optin', 'true'), ('update_existing', 'false'),\r\n ('replace_interests', 'true'), ('apikey', 'apikey')))", "def batch_push(self, payloads):\n body = json.dumps(payloads)\n\n status, response = self._request('POST', body, BATCH_PUSH_URL,\n 'application/json')\n if not status == 200:\n raise AirshipFailure(status, response)", "def _finalize_payloads(tracking_id, client_id, payloads, **extra_data):\n extra_payload = {\n 'v': '1', 'tid': tracking_id, 'cid': client_id, 'aip': '1'}\n\n for payload in payloads:\n final_payload = dict(payload)\n final_payload.update(extra_payload)\n final_payload.update(extra_data)\n yield final_payload", "def add_records(self, data: dict, execution_context: dict):", "def lambda_handler(event, context):\n for item in json.loads(event[\"Records\"][0][\"body\"]):\n item[\"id\"] = uuid.uuid1().bytes\n for key, value in item.items():\n if key == \"id\":\n item[key] = {\"B\": bytes(value)}\n elif key == \"fiscal_year\":\n item[key] = {\"N\": str(value)}\n elif key == \"emissions_mtco2e\":\n item[key] = {\"N\": str(value)}\n elif key == \"consumption\":\n item[key] = {\"N\": str(value)}\n else:\n item[key] = {\"S\": str(value)}\n\n time.sleep(0.001)\n\n dynamo.put_item(TableName=\"Greenhouse_gas_emissions\", Item=dict(item))", "def toQueue(data):\n\n for host in settings.OTHER_HOSTS:\n settings.SENDER[host['id']].queue.put(dict(**data))", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def _prepare_entity_upload(batch):\n payload = batch\n required_keys = [\"entities\"]\n\n if isinstance(batch, list):\n # It's a list, so we're assuming it's a list of entities\n # TODO Incorporate AtlasEntity\n payload = {\"entities\": batch}\n elif isinstance(batch, dict):\n current_keys = list(batch.keys())\n\n # Does the dict entity conform to the required pattern?\n if not any([req in current_keys for req in required_keys]):\n # Assuming this is a single entity\n # TODO Incorporate AtlasEntity\n payload = {\"entities\": [batch]}\n elif isinstance(batch, AtlasEntity):\n payload = {\"entities\": [batch.to_json()]}\n\n return payload", "def parse_records(raw_records: list) -> Generator[str, None, None]:\n for record in iter_deaggregate_records(raw_records):\n logger.debug(f\"Raw Kinesis record: {record}\")\n\n # Kinesis data is base64 encoded\n raw_data = base64.b64decode(record[\"kinesis\"][\"data\"])\n\n # decompress data if raw data is gzip (log data from CloudWatch Logs subscription filters comes gzipped)\n # gzip magic number: 0x1f 0x8b\n if raw_data[0] == 0x1F and raw_data[1] == 0x8B:\n raw_data = gzip.decompress(raw_data)\n\n data = raw_data.decode()\n payloads = normalize_cloudwatch_messages(data)\n logger.debug(f\"Normalized payloads: {payloads}\")\n\n for payload in payloads:\n yield payload", "def skills_to_payloads(self):\n if isinstance(self.skill_set, str):\n query = self.skill_set\n else:\n query = ' '.join(self.skill_set)\n payload = {'q': query,\n 'l': ''}\n self.payload = payload", "def handler(message):\n records = message.collect()\n list_collect = []\n for record in records:\n # Parse record\n read = json.loads(record[1].decode('utf-8'))\n list_collect.append((read['text'],read['tags']))\n data = (clean(read['text']),read['tags'])\n job = read['index']\n\n data = spark.createDataFrame([data],['cleaned_body','tags'])\n data = model.transform(data)\n d = data.select('features','tags').collect()\n\n keys = retrieve_keys(d[0]['tags'])\n # look to optimize slice length based on keys and throughput\n slice_length = max(len(keys)//10000,min(len(keys)//49,200))\n print(slice_length)\n keys_sliced = [','.join(keys[i:i+slice_length]) for i in range(0,len(keys),slice_length)]\n keys = spark.createDataFrame(keys_sliced, StringType())\n score_udf = udf(lambda r: get_features(r,d[0]['features']), FloatType())\n keys = keys.withColumn('features', score_udf(keys['value'])).collect()\n # need to get top result from zadd\n report_to_redis(job)\n return", "def lambda_handler(event, context):\n\n for record in event['Records']:\n\n bucket = record['s3']['bucket']['name']\n key = unquote_plus(record['s3']['object']['key'])\n\n str_value = s3_utils.download_file_as_string(bucket, key)\n data = json.loads(str_value)\n\n normalized_data = {\n 'meta': {\n 'table': 'parcels',\n 'column_names': [\n 'dataset',\n 'as_of',\n 'apn',\n 'objectid',\n 'city',\n 'x_coordinate',\n 'y_coordinate',\n 'area',\n 'length'\n ]\n }\n }\n\n rows = []\n\n dataset = data['meta']['dataset']\n as_of = data['meta']['datetime']\n\n for r in data['results']:\n\n attr = r['attributes']\n\n temp_dict = {\n 'dataset': dataset,\n 'as_of': as_of,\n 'apn': attr.get('APN_SPACE'),\n 'objectid': attr.get('OBJECTID'),\n 'city': attr.get('CITY'),\n 'x_coordinate': attr.get('X'),\n 'y_coordinate': attr.get('Y'),\n 'area': attr.get('Shape.STArea()'),\n 'length': attr.get('Shape.STLength()')\n }\n\n rows.append(temp_dict)\n\n normalized_data['rows'] = rows\n \n bucket = 'gis-data-normalized'\n file_name = 'normalized_' + key\n s3_utils.upload_json_as_file(normalized_data, bucket, file_name)", "def _build_payload(self, dps):\n dp_count = len(dps)\n payload = []\n start = 0\n delta = 100\n end = delta if dp_count > delta else dp_count\n try:\n for x in range(0, int(dp_count / delta) + 1):\n gauges = []\n counters = []\n for dp in dps[start: end]:\n dp.dimensions['metric_source'] = constants.METRIC_SOURCE\n payload_obj = {\n 'metric': dp.metric_name,\n 'value': dp.value,\n 'dimensions': dp.dimensions,\n 'timestamp': dp.timestamp\n }\n if dp.metric_type == 'gauge':\n gauges.append(payload_obj)\n elif dp.metric_type == 'counter':\n counters.append(payload_obj)\n payload.append({\n 'gauges': gauges,\n 'counters': counters\n })\n start = end\n end = end + delta\n if end > dp_count:\n end = dp_count\n except Exception as e:\n self._logger.error(\"Exception while building payload : {0}\".format(e))\n\n return payload", "def sqs_messages(queue: str) -> Generator[Dict[str, Any], None, None]:\n\n while True:\n response = get_client(\"sqs\").receive_message(QueueUrl=queue)\n if \"Messages\" not in response:\n break\n msg = json.loads(response[\"Messages\"][0][\"Body\"])\n records = json.loads(msg[\"Message\"])\n retd = {}\n retd[\"key\"] = records[\"Records\"][0][\"s3\"][\"object\"][\"key\"]\n retd[\"bucket\"] = records[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n retd[\"ReceiptHandle\"] = response[\"Messages\"][0][\"ReceiptHandle\"]\n yield retd", "def bulk_create(cls, raw_list):\n\t\tresource_list = [cls(**item) for item in raw_list]\n\t\tdb.session.add_all(resource_list)\n\t\tdb.session.commit()\n\n\t\treturn resource_list", "def get_all_records(self, data: dict, execution_context: dict):", "def _itemsToResponse(self, items):\n itemsToSend = []\n count = 0\n if items:\n size = 0\n while size < self._maxSize:\n try:\n item = items.pop()\n except (KeyError, IndexError):\n # We're done.\n # Note: because records is an iterable (list or set)\n # we're catching both KeyError and IndexError.\n break\n size = size + len(item)\n itemsToSend.append(item)\n count += 1\n\n response = {\"items\": itemsToSend}\n\n if items:\n response[\"continuation\"] = self._storeContinuation(items, \"items\")\n\n return response", "def batch(self, reqs):\n return self.connection.batch_(reqs)", "def create(self, record_count, start_id, lock=None):\n\n message_reference_beginning = self.create_random_string(10)\n\n records = []\n\n for i in range(start_id, record_count + start_id):\n record = self.__create_record(i, message_reference_beginning)\n records.append(record)\n\n return records", "def smap(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if record.has_key(self.joinkey1):\n record['__joinorder__'] = 1\n task.collect(record[self.joinkey1], 1, happy.json.encode(record))\n if record.has_key(self.joinkey2):\n record['__joinorder__'] = 2\n task.collect(record[self.joinkey2], 2, happy.json.encode(record))", "def build_payload(parameters):\n payload = []\n for param in parameters:\n \"\"\"\n Do not include path parameters in the payload\n \"\"\"\n if param['paramType'] != 'path':\n field_name = clean_param(param['name'])\n field = flatten_param(field_name)\n if is_array_param(param):\n field_name += '[]'\n payload.append(\"'{0}': {1},\".format(field_name, check_param(field)))\n return payload", "def ingest_many(self, data):\n raise NotImplementedError()", "def scrape_nf_detail(event):\n for record in event:\n body = json.loads(record.body)\n nf_ids = body.get(\"nf_ids\")\n resource_type = body.get(\"resource_type\")\n batch = body.get(\"batch\")\n scrape_type = body.get(\"scrape_type\")\n s3_data = {}\n for nf_id in nf_ids:\n scraper = UnogsScraper(nf_id)\n s3_data[nf_id] = scraper.get_data()\n s3 = S3Client()\n # todo decouple s3 key and notification msg\n identifier = f\"{scrape_type}_data-{batch}\"\n key = s3.build_key(resource_type, identifier)\n s3.put(key, s3_data)\n send_sqs_msg(\n queue_name=NF_ETL_QUEUE,\n body={\n \"s3_paths\": [key],\n \"resource_type\": resource_type,\n },\n )", "def shopify_create_product_data_queue(self, instance, template_ids=''):\n instance.connect_in_shopify()\n only_alphabets = []\n if template_ids:\n # Below one line is used to find only character values from template ids.\n only_alphabets = re.findall(\"[a-zA-Z]+\", template_ids)\n if len(template_ids.split(',')) <= 50:\n # template_ids is a list of all template ids which response did not given by\n # shopify.\n template_ids = list(set(re.findall(re.compile(r\"(\\d+)\"),template_ids)))\n results = shopify.Product().find(ids=','.join(template_ids))\n if results:\n _logger.info('Length of Shopify Products %s import from instance name: %s' % (\n len(results), instance.name))\n template_ids = [template_id.strip() for template_id in template_ids]\n # Below process to identify which id response did not give by Shopify.\n [template_ids.remove(str(result.id)) for result in results if str(result.id) in template_ids]\n else:\n raise Warning(_('Please enter the product template ids 50 or less'))\n else:\n if not instance.shopify_last_date_product_import:\n results = shopify.Product().find(status='active', limit=250)\n if len(results) >= 250:\n results = self.shopify_list_all_products(results)\n #results = self.get_product(results)\n else:\n # updated_at_min =datetime.strptime(pytz.utc.localize(instance.shopify_last_date_product_import).astimezone(\n # pytz.timezone(instance.shopify_store_time_zone[12:] or 'UTC')).strftime(\n # '%Y-%m-%d %H:%M:%S'), \"%Y-%m-%d %H:%M:%S\")\n results = shopify.Product().find(status='active',\n updated_at_min=instance.shopify_last_date_product_import,limit=250) # Change by bhavesh jadav 13/12/2019 limit=250\n if len(results) >= 250:\n results=self.shopify_list_all_products(results)\n if results:\n instance.shopify_last_date_product_import = datetime.now()\n without_gift_card_products = []\n for result in results:\n if result.to_dict().get('variants')[0].get('fulfillment_service') != 'gift_card':\n without_gift_card_products.append(result)\n results = without_gift_card_products\n if not results:\n _logger.info(\n 'No Products found to be imported from Shopify.')\n return False\n _logger.info('Total synced products - {}'.format(len(results)))\n count = 0\n one_time_create = True\n product_queue_list = []\n for result in results:\n if one_time_create:\n product_queue_id = self.shopify_create_product_queue(instance)\n product_queue_list.append(product_queue_id.id)\n _logger.info('Shopify Product Queue created. Queue name is {}'.format(\n product_queue_id.name))\n one_time_create = False\n if template_ids or only_alphabets:\n product_queue_id.message_post(body=\"%s products are not imported\" %(','.join(template_ids+only_alphabets)))\n self.shopify_create_product_data_queue_line(result, instance, product_queue_id)\n count = count + 1\n if count == 100:\n count = 0\n one_time_create = True\n return product_queue_list", "def send_to_all(apigatewaymanagementapi, connection_ids, data):\n dynamodb = boto3.client('dynamodb')\n for connection_id in connection_ids:\n try:\n apigatewaymanagementapi.post_to_connection(Data=data, ConnectionId=connection_id['connectionId']['S'])\n except Exception as e:\n print(e)\n # Remove connection id from DDB\n dynamodb.delete_item(\n TableName=os.environ.get('CONNECTION_TABLE_NAME'),\n Key={'connectionId': {'S': connection_id['connectionId']['S']}}\n )", "def _create_batch_list(self):\n return [None] * self.bufsize", "def create_inbound(self, keys):", "def _flush_enqueued(self):\n\n msgs = self.RPC.query.all()\n for msg in msgs:\n if msg.enqueued:\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n msg.delete()", "def on_incoming_records(self, connection: ConnectionInterface) -> None:\n self.generate_metadata()\n\n df = connection.record_containers[0].build_dataframe()\n df[\"optional_value\"] = self.workflow_config[\"Value\"]\n\n self.output_anchor.push_records(\n generate_records_from_df(df, self.output_anchor.record_info)\n )\n\n connection.clear_records()", "def _recordsToResponse(self, records):\n fieldsList = []\n count = 0\n if records:\n size = 0\n while size < self._maxSize:\n try:\n record = records.pop()\n except (KeyError, IndexError):\n # We're done.\n # Note: because records is an iterable (list or set)\n # we're catching both KeyError and IndexError.\n break\n pickled = pickle.dumps(self.recordToDict(record))\n size = size + len(pickled)\n fieldsList.append(pickled)\n count += 1\n\n response = {\"items\": fieldsList}\n\n if records:\n response[\"continuation\"] = self._storeContinuation(records, \"records\")\n\n return response", "def add_elasticsearch_records(self, data_list):\n actions = [self.create_data_record(data_dict) for data_dict in data_list]\n self.actions_buffer.extend(actions)", "def write_multiple_records(self, table, records):\n payload = None\n response = []\n if not isinstance(table, str):\n raise ValueError(\"table must be a str.\")\n\n if isinstance(records, dict):\n # Single record\n response.append(self.insert_or_update(table, records))\n\n if isinstance(records, list):\n # Multiple records\n for record in records:\n response.append(self.insert_or_update(table, record))\n\n return response", "def populate_notifications_sql(request):\n input_json, output_json = request, {}\n try:\n for i in input_json['notification_id_list']:\n populate_notification_params = dict(zip(['super_notification_id', 'notification_status',\n 'profile_id', 'added_by', 'last_modified_by'],\n [i, 1, input_json['profile_id'],\n input_json['profile_id'], input_json['profile_id']]))\n serializer_var = serializer_save(IndividualNotificationsSerializer, populate_notification_params)\n output_json = dict(zip(['Status', 'Message', 'Payload'],\n ['Success', 'Notifications was populated successfully', None]))\n return output_json\n except Exception as ex:\n output_json = dict(\n zip(['Status', 'Message', 'Payload'], ['Failure', f'Unable to create Notification.{ex}', None]))\n return output_json", "def publish_list(self, messages: list) -> None:", "def publish_list(self, messages: list) -> None:", "def __prepare_smarty_request_list(self, address_list):\n single_request_batch_partition = Batch()\n addresses_per_request = 0\n request_list = []\n for address in address_list:\n if addresses_per_request == SmartyAddressService.MAX_ADDRESSES_PER_REQUEST:\n request_list.append(single_request_batch_partition)\n single_request_batch_partition = Batch()\n addresses_per_request = 0\n single_request_batch_partition.add(Lookup(address.input_string))\n self.__total_addresses_in_request_list += 1\n addresses_per_request+=1\n \n if addresses_per_request>0:\n request_list.append(single_request_batch_partition)\n return request_list", "def generate(entities_to_proceed):\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"", "def _PrepareRequestData(self, events, tmp_dir):\n request_body = []\n att_seq = 0\n for event in events:\n for att_id, att_path in event.attachments.items():\n att_newname = '%s_%03d' % (os.path.basename(att_path), att_seq)\n att_seq += 1\n if self._gpg:\n att_path = self._EncryptFile(att_path, tmp_dir)\n request_body.append((att_newname, open(att_path, 'rb')))\n event.attachments[att_id] = att_newname\n serialized_event = datatypes.Event.Serialize(event)\n if self._gpg:\n serialized_event = self._EncryptData(serialized_event)\n request_body.append(('event', serialized_event))\n return request_body", "def get_events_batch(self) -> PayloadDictList:\n batch = self.event_buffer\n self.event_buffer = []\n return batch", "def get_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def create_complete_payload(data,query_result_record_id): # pylint: disable=too-many-locals\r\n output_json = {}\r\n if len(query_result_record_id['ResultSet']['Rows']) > 1:\r\n print(\"inside if\")\r\n columns_list_dict = query_result_record_id['ResultSet']['Rows'][0]\r\n values_list_dict = query_result_record_id['ResultSet']['Rows'][1]\r\n column_list = columns_list_dict[\"Data\"]\r\n values_list = values_list_dict[\"Data\"]\r\n actual_dict={}\r\n for index,list_value in enumerate(zip(column_list[1:-1], values_list[1:-1])):#pylint: disable=unused-variable\r\n key = list_value[0][\"VarCharValue\"]\r\n value=\"\"\r\n if len(list_value[1]) != 0:\r\n value = list_value[1][\"VarCharValue\"]\r\n else:\r\n continue\r\n temp_dict = {key:value}\r\n actual_dict.update(temp_dict)\r\n #print(actual_dict)\r\n new_dict = {k.lower(): v for k, v in data.items()}\r\n print(new_dict)\r\n for data_keys in actual_dict:\r\n if data_keys.lower() not in new_dict:\r\n #print(data_keys.lower())\r\n temp = {data_keys:actual_dict[data_keys]}\r\n new_dict.update(temp)\r\n #print(data)\r\n #output_json = json.dumps(new_dict)\r\n output_json = new_dict\r\n else:\r\n print(\"inside else\")\r\n output_json = {}\r\n return output_json", "def put_records_batch(\n client, stream_name: str, records: list, max_retries: int, max_batch_size: int = 500\n) -> None or List[dict]:\n\n retry_list = []\n\n for batch_index, batch in enumerate(split_list(records, max_batch_size)):\n records_to_send = create_records(batch)\n retries_left = max_retries\n\n while len(records_to_send) > 0:\n kinesis_response = client.put_records(\n Records=records_to_send, StreamName=stream_name,\n )\n\n if kinesis_response[\"FailedRecordCount\"] == 0:\n break\n else:\n index: int\n record: dict\n for index, record in enumerate(kinesis_response[\"Records\"]):\n if \"ErrorCode\" in record:\n # original records list and response record list have same order, guaranteed:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records\n logger.error(\n f\"A record failed with error: {record['ErrorCode']} {record['ErrorMessage']}\"\n )\n retry_list.append(records_to_send[index])\n\n records_to_send = retry_list\n retry_list = []\n\n if retries_left == 0:\n error_msg = (\n f\"No retries left, giving up on records: {records_to_send}\"\n )\n logger.error(error_msg)\n return records_to_send\n\n retries_left -= 1\n\n logger.info(f\"Waiting 500 ms before retrying\")\n time.sleep(0.5)\n\n return None", "def upload_table(tablebogus, n, append, bogus):\n # bucket_name = \"your-bucket-name\"\n # source_file_name = \"local/path/to/file\"\n # destination_blob_name = \"storage-object-name\"\n fake = Faker()\n storage_client = storage.Client()\n bucket = storage_client.bucket(\"brick-layer-testing\")\n outstring = \"\"\n for i in range(0, int(n)):\n populated = {}\n for record in tablebogus:\n # print(ast.literal_eval(record[\"args\"]))\n if record[\"dist\"] == \"fk\":\n record[\"fk_type\"] = getffktype(record[\"from\"].split(\".\")[0], record[\"from\"].split(\".\")[1])\n record[\"bq_type\"] = getfbqtype(record[\"from\"].split(\".\")[0], record[\"from\"].split(\".\")[1])\n record[\"fk_args\"] = getfargs(record[\"from\"].split(\".\")[0], record[\"from\"].split(\".\")[1])\n if (record[\"fk_type\"] == \"past_datetime\"):\n populated[record[\"field_name\"]] = getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"]))).isoformat()\n elif (record[\"fk_type\"] == \"random_element\"):\n populated[record[\"field_name\"]] = random.choice(ast.literal_eval(json.dumps(record[\"fk_args\"][\"elements\"][1:-1].split(\", \"))))\n elif (record[\"fk_type\"] == \"paragraph\"):\n record[\"fk_args\"] = {\n \"nb_sentences\": 3,\n \"variable_nb_sentences\": True\n }\n # print(json.dumps(record[\"fk_args\"]).replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"true\", \"True\"))\n populated[record[\"field_name\"]] = getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"]).replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"'\",'\"').replace(\"true\", \"True\")))\n # print(ast.literal_eval(record[\"args\"])[\"elements\"][1:-1].split(\", \"))\n # print(populated[record[\"name\"]])\n elif (record[\"fk_type\"] == \"longitude\" or record[\"fk_type\"] == \"latitude\"):\n populated[record[\"field_name\"]] = float(getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"]))))\n else:\n # print(record)\n populated[record[\"field_name\"]] = getattr(fake, record[\"fk_type\"])(**ast.literal_eval(json.dumps(record[\"fk_args\"])))\n outstring += json.dumps(populated) + \"\\n\"\n\n # purg_filename = tablebogus[0]['table_name']+datetime.now().isoformat()+\".txt\"\n # file = open(purg_filename, \"w\")\n # file.write(outstring)\n blob = bucket.blob(tablebogus[0]['table_name'])\n\n blob.upload_from_string(outstring)\n # os.remove(purg_filename)\n\n print(\n ''' \n -> {} uploaded to The Cloud.\n\n '''.format(\n tablebogus[0]['dataset']+\".\"+tablebogus[0]['table_name']\n )\n )\n\n from google.cloud import bigquery\n client = bigquery.Client()\n dataset_ref = tablebogus[0]['dataset']\n\n ids = []\n for i in list(client.list_datasets()):\n ids.append(i.dataset_id)\n if dataset_ref in ids:\n dataset_ref = client.dataset(dataset_ref)\n else:\n dataset_ref = client.create_dataset(dataset_ref) # Make an API request.\n # print(\"Created dataset {}.{}\".format(client.project, dataset.dataset_id))\n print(\" -> This is where I am :: \" + tablebogus[0]['table_name'])\n # dataset_ref = client.dataset(dataset_id)\n job_config = bigquery.LoadJobConfig()\n sch_lst = []\n for field in tablebogus:\n sch_lst.append(bigquery.SchemaField(field['field_name'], field['bq_type']))\n if append:\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND\n else:\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n job_config.schema = sch_lst\n job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON\n uri = \"gs://brick-layer-testing/\"+tablebogus[0]['table_name']\n\n load_job = client.load_table_from_uri(\n uri,\n dataset_ref.table(tablebogus[0]['table_name']),\n location=\"US\", # Location must match that of the destination dataset.\n job_config=job_config,\n ) # API request\n print(\" -> Starting to move shit over to BQ {}\".format(load_job.job_id))\n\n load_job.result() # Waits for table load to complete.\n # print(\"Job finished.\")\n\n destination_table = client.get_table(dataset_ref.table(tablebogus[0]['table_name']))\n print(\" -> There are {} bogus rows.\".format(destination_table.num_rows))\n\n blob = bucket.blob(tablebogus[0]['table_name'])\n blob.delete()\n print(\" -> Tidying up...\")\n # extract schema.json\n # make fakeout.json\n # upload to cloud storage\n # move from cloud storage to bq", "def transform_fair_data(raw_fair_data: List[dict]) -> List[EngagementRecord]:\n return [_transform_data_row(row) for row in raw_fair_data]", "async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json", "def normalize_cloudwatch_messages(payload: str) -> List[str]:\n # Normalize messages from CloudWatch (subscription filters) and pass through anything else\n # https://docs.aws.amazon.com/ja_jp/AmazonCloudWatch/latest/logs/SubscriptionFilters.html\n\n logger.debug(f\"Normalizer input: {payload}\")\n\n if len(payload) < 1:\n logger.error(f\"Got weird record, skipping: {payload}\")\n return []\n\n # check if data is JSON and parse\n try:\n payload_json = loads(payload)\n if type(payload_json) is not dict:\n logger.error(f\"Top-level JSON data is not an object, giving up: {payload}\")\n return []\n\n except JSONDecodeError:\n return [payload]\n\n if \"messageType\" not in payload_json:\n return [payload]\n\n # messageType is present in payload, must be coming from CloudWatch\n logger.debug(\n f\"Got payload looking like CloudWatch Logs via subscription filters: {payload_json}\"\n )\n\n return extract_data_from_json_cwl_message(payload_json)", "def process(self, send_now=False):\n\t\tfinal_recipients = self.final_recipients()\n\t\tqueue_separately = (final_recipients and self.queue_separately) or len(final_recipients) > 20\n\t\tif not (final_recipients + self.final_cc()):\n\t\t\treturn []\n\n\t\tqueue_data = self.as_dict(include_recipients=False)\n\t\tif not queue_data:\n\t\t\treturn []\n\n\t\tif not queue_separately:\n\t\t\trecipients = list(set(final_recipients + self.final_cc() + self.bcc))\n\t\t\tq = EmailQueue.new({**queue_data, **{\"recipients\": recipients}}, ignore_permissions=True)\n\t\t\tsend_now and q.send()\n\t\telse:\n\t\t\tif send_now and len(final_recipients) >= 1000:\n\t\t\t\t# force queueing if there are too many recipients to avoid timeouts\n\t\t\t\tsend_now = False\n\t\t\tfor recipients in frappe.utils.create_batch(final_recipients, 1000):\n\t\t\t\tfrappe.enqueue(\n\t\t\t\t\tself.send_emails,\n\t\t\t\t\tqueue_data=queue_data,\n\t\t\t\t\tfinal_recipients=recipients,\n\t\t\t\t\tjob_name=frappe.utils.get_job_name(\n\t\t\t\t\t\t\"send_bulk_emails_for\", self.reference_doctype, self.reference_name\n\t\t\t\t\t),\n\t\t\t\t\tnow=frappe.flags.in_test or send_now,\n\t\t\t\t\tqueue=\"long\",\n\t\t\t\t)", "def _make_request(self, method, body):\n return self.client.insert_rows_json(self.table_ref, [row['json'] for row in body['rows']])", "def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list", "def _get_items_as_rows(items_to_add: Sequence[Any], batch_number: int,\n result: str, operation: str,\n timestamp: str) -> List[Dict[str, Any]]:\n rows = []\n for item in items_to_add:\n rows.append({\n _ITEM_RESULTS_TABLE_COLUMN_ITEM_ID:\n item.item_id if isinstance(item, failure.Failure) else item,\n _ITEM_RESULTS_TABLE_COLUMN_BATCH_ID:\n batch_number,\n _ITEM_RESULTS_TABLE_COLUMN_OPERATION:\n operation,\n _ITEM_RESULTS_TABLE_COLUMN_RESULT:\n result,\n _ITEM_RESULTS_TABLE_COLUMN_ERROR:\n item.error_msg if isinstance(item, failure.Failure) else '',\n _ITEM_RESULTS_TABLE_COLUMN_TIMESTAMP:\n timestamp,\n })\n return rows", "def map(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if happy.flow.isIterable(self.aggkey):\n outkey = ''\n for ak in self.aggkey:\n if record.has_key(ak):\n outkey = outkey + record[ak] + \":\"\n task.collect(outkey, json) \n elif record.has_key(self.aggkey):\n if (record[self.aggkey]):\n task.collect(record[self.aggkey], json)", "def record(records: list,\n method=\"\",\n method_uuid=\"\",\n indicator=\"\",\n indicator_uuid=\"\",\n indicator_unit=\"\",\n flow=\"\",\n flow_uuid=\"\",\n flow_category=\"\",\n flow_unit=\"\",\n cas_number=\"\",\n location=\"\",\n location_uuid=\"\",\n factor=0.0) -> list:\n records.append([\n method,\n method_uuid,\n indicator,\n indicator_uuid,\n indicator_unit,\n flow,\n flow_uuid,\n flow_category,\n flow_unit,\n cas_number,\n location,\n location_uuid,\n factor])\n return records", "def store_documents(self, documents: list):\n results = app.Results()\n entries = [\n { \n 'Id': str(uuid1()),\n 'MessageBody': json.dumps(doc)\n }\n for doc in documents\n ]\n ids = [ e['Id'] for e in entries ]\n self.Logger.info(f'Store {ids} in sqs')\n self.Logger.debug(f'Saving {entries} in sqs {self.sqs_queue_url}')\n self.sqs_client.send_message_batch(\n QueueUrl=self.sqs_queue_url,\n Entries=entries\n )\n results.ActionStatus = 0\n results.Results = ids\n return results", "def bulkupload_entitie_records(self, entity_upload_parameters, tmp_file, progress=None):\n records = self.service_client.factory.create(\"ns2:ArrayOfstring\")\n tmp_csv_file = io.open(tmp_file, encoding='utf-8-sig')\n\n records.string = [x.strip() for x in tmp_csv_file.readlines()]\n \n try:\n #print(self.service_client)\n response = self.service_client.UploadEntityRecords(\n AccountId=self._authorization_data.account_id,\n EntityRecords=records,\n ResponseMode=entity_upload_parameters.response_mode\n )\n if self.need_to_fall_back_to_async(response):\n headers = self.service_client.get_response_header()\n operation = BulkUploadOperation(\n request_id=response.RequestId,\n authorization_data=self._authorization_data,\n poll_interval_in_milliseconds=self._poll_interval_in_milliseconds,\n environment=self._environment,\n tracking_id=headers['TrackingId'] if 'TrackingId' in headers else None,\n **self.suds_options\n )\n file_path = self.download_upload_result(operation, entity_upload_parameters, progress)\n return self.read_result_from_bulk_file(file_path)\n else:\n return self.read_bulkupsert_response(response) \n except Exception as ex:\n if 'OperationNotSupported' == operation_errorcode_of_exception(ex):\n return self.bulkupload_entities(entity_upload_parameters, tmp_file, progress)\n else:\n raise ex", "def push_bq_records(client, dataset, table, records, sleep = 300, max_batch = 100, print_failed_records = True, retry_on_fail = True):\n if len(records) == 0:\n return\n if len(records) > max_batch:\n split = len(records) // 2\n push_bq_records(client, dataset, table, records[0:split], sleep, max_batch)\n push_bq_records(client, dataset, table, records[split:], sleep, max_batch)\n else:\n try:\n succ = client.push_rows(dataset, table, records)\n if not succ:\n if retry_on_fail:\n print(\"Push to BigQuery table was unsuccessful. Waiting %s seconds and trying one more time.\" % sleep)\n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch, print_failed_records, False)\n else:\n if print_failed_records:\n print(\"\\nRecord 0:\")\n print(records[0])\n if len(records) > 1:\n print(\"\\nRecord %s:\" % (len(records) - 1))\n print(records[len(records)-1])\n raise RuntimeError('Push to BigQuery table was unsuccessful. See above for sample record(s) if requested.')\n except BrokenPipeError:\n print(\"BrokenPipeError while pushing %s records. Waiting %s seconds and trying again.\" % (len(records), sleep)) \n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch)", "def _send_data(self):\n \n # Do not send more than 100 datasets each time (totally arbitrary)\n MAX_DATA_SETS_PER_POST = 100\n data_to_send = self._data_buffer[0:MAX_DATA_SETS_PER_POST]\n data_to_keep = self._data_buffer[MAX_DATA_SETS_PER_POST:]\n\n # Prepare data string with the values in data buffer\n now = time.time()\n data_string = '[' \n for (timestamp, data) in data_to_send:\n data_string += '['\n data_string += str(round(timestamp-now,2))\n for sample in data:\n data_string += ','\n data_string += str(sample)\n data_string += '],'\n # Remove trailing comma and close bracket\n data_string = data_string[0:-1]+']'\n\n self._log.debug(\"Data string: \" + data_string)\n \n # Prepare URL string of the form\n # 'http://domain.tld/emoncms/input/bulk.json?apikey=\n # 12345&data=[[-10,10,1806],[-5,10,1806],[0,10,1806]]'\n url_string = self._settings['protocol'] + self._settings['domain'] + \\\n self._settings['path'] + \"/input/bulk_json?apikey=\" + \\\n self._settings['apikey'] + \"&data=\" + data_string\n self._log.debug(\"URL string: \" + url_string)\n\n # Send data to server\n self._log.info(\"Sending to \" + \n self._settings['domain'] + self._settings['path'])\n try:\n result = urllib2.urlopen(url_string, timeout=60)\n except urllib2.HTTPError as e:\n self._log.warning(\"Couldn't send to server, HTTPError: \" + \n str(e.code))\n except urllib2.URLError as e:\n self._log.warning(\"Couldn't send to server, URLError: \" + \n str(e.reason))\n except httplib.HTTPException:\n self._log.warning(\"Couldn't send to server, HTTPException\")\n except Exception:\n import traceback\n self._log.warning(\"Couldn't send to server, Exception: \" + \n traceback.format_exc())\n else:\n if (result.readline() == 'ok'):\n self._log.debug(\"Send ok\")\n # Send ok -> empty buffer\n self._data_buffer = data_to_keep\n return True\n else:\n self._log.warning(\"Send failure\")", "def create_batch(client, generator: DataGenerator):\n try:\n event_data_batch = client.create_batch()\n for device in generator.devices:\n # event_data_batch.add(EventData(gen.generate_payload(device)))\n event_data_batch.add(EventData(generator.generate_payload(device)))\n return event_data_batch\n except Exception as e:\n print(str(e))", "def produce_query_batches(self):\n pass", "def SendPayload(s, payload):\n for line in payload:\n s.send(line)\n RecvAndSleep(s)", "def prepare_data(qids_raw, conditions_raw, outputs_raw):\n\n qids = []\n conditions = []\n outputs = []\n dictionaries_standardization = []\n for qid_raw, condition_raw, output_raw in zip(qids_raw, conditions_raw, outputs_raw):\n qid, condition, output, dictionary = preprocess_sample(qid_raw, condition_raw, output_raw)\n qids.append(qid)\n conditions.append(condition)\n outputs.append(output)\n dictionaries_standardization.append(dictionary)\n\n return qids, conditions, outputs, dictionaries_standardization", "def generate_data(self):\n print(\"generate_data - init\")\n with open(self.input_file, \"r\") as f:\n\n # read JSON data from input file\n data = json.loads(f.read())\n\n for idx, row in enumerate(data): \n # serialize Python dict to string\n msg = self.serialize_json(row)\n #print(f\"Linha: {row}\")\n self.send(self.topic, msg)\n self.flush()\n #print(\"Sleeping\")\n time.sleep(1)", "def post_process(data):\n for record in data[\"Records\"]:\n for name, value in record.items():\n if type(value) == list:\n newlist = []\n for entry in value:\n newlist.append(post_process_pair(name, entry))\n record[name] = newlist\n else:\n record[name] = post_process_pair(name, value)", "def handle_offer(event, context):\n for record in event[\"Records\"]:\n print(record)\n update = record.get('dynamodb', {}).get('NewImage')\n if not update:\n print(\"No updates: {}\".format(record))\n return\n offer = translate_dynamo(update)\n\n print(\"Found offer: {}\".format(offer))\n offer_msg = {\n \"ID\": offer[\"offerID\"],\n \"prices\": offer[\"prices\"]\n }\n print(\"Sending Offer: {}\".format(offer_msg))\n ok, msg = adapter.send(\"B\", offer_msg)\n if not ok:\n print(msg)", "def lambda_handler(event, context):\n logger.debug(event)\n\n product_list = PRODUCT_LIST\n\n return {\n \"statusCode\": 200,\n \"headers\": HEADERS,\n \"body\": json.dumps({\"products\": product_list}),\n }", "def _es_push_results(self, query_name, records):\n logger.debug(f\"Pushing {query_name}: {records}\")\n for c in self.es_clients:\n c.send_to_es(query_name, records)", "def handler(event, context):\n message = [record['body'] for record in event.get('Records', [])]\n email_record = json.loads(message[0])[\"Records\"][0]\n\n new_email = [(email_record['s3']['bucket']['name'],\n urllib.parse.unquote(email_record['s3']['object']['key']))]\n\n if new_email:\n LOG.info(\"Changed/new object notification received from S3 bucket to the sqs queue\")\n for bucket, s3_key in new_email:\n LOG.info(\"Processing S3 bucket://%s/%s\", bucket, s3_key)\n email_body = S3.Object(bucket, s3_key).get()['Body'].read().decode('utf-8')\n\n # Process PBS job info and push the metadata doc to AWS ES\n _process_pbs_job_info(email_body)\n else:\n LOG.info(\"No new/updated email record found in the S3 bucket\")", "def run(self, event, context):\n logger.debug('Number of Records: %d', len(event.get('Records', [])))\n\n config = load_config()\n env = load_env(context)\n\n for record in event.get('Records', []):\n payload = StreamPayload(raw_record=record)\n classifier = StreamClassifier(config=config)\n classifier.map_source(payload)\n\n # If the kinesis stream or s3 bucket is not in our config,\n # go onto the next record\n if not payload.valid_source:\n continue\n\n if payload.service == 's3':\n self.s3_process(payload, classifier)\n elif payload.service == 'kinesis':\n self.kinesis_process(payload, classifier)\n elif payload.service == 'sns':\n self.sns_process(payload, classifier)\n else:\n logger.info('Unsupported service: %s', payload.service)\n\n # returns the list of generated alerts\n if self.return_alerts:\n return self.alerts\n # send alerts to SNS\n self.send_alerts(env, payload)", "def sQRecords(self, ts_start, limit):\n try: ts=int(ts_start)\n except: \n print \"** expecting integer 'ts_start'\"\n return\n \n try: l=int(limit)\n except: \n print \"** expecting integer 'limit'\"\n return\n \n #print \"sQRecords: ts_start: %s -- limit: %s\" % (ts_start, limit)\n self.setup()\n try: records=self.db.getRecords(ts, l)\n except: records=None\n \n rs=self.formatRecordSet(records)\n self.Records(rs)", "def send_multiple_data(self, data_array, trip_id):\n l = []\n for d in data_array:\n l.append((trip_id, json.dumps(d[0])))\n print l\n if len(l) == 0:\n return\n with closing(self.db.cursor()) as cursor:\n query = \"INSERT INTO Data (Trip, DataString) VALUES (%s, %s)\"\n cursor.executemany(query, l)\n self.db.commit()", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def process_elements(self, events: Iterable[Dict[str, Any]]) -> List[EthereumEvent]:\n tx_hashes = list(OrderedDict.fromkeys([event['transactionHash'] for event in events]))\n ethereum_txs = self.index_service.txs_create_or_update_from_tx_hashes(tx_hashes) # noqa: F841\n ethereum_events = [EthereumEvent.objects.from_decoded_event(event) for event in events]\n return EthereumEvent.objects.bulk_create(ethereum_events, ignore_conflicts=True)", "def lambda_handler(event, context):\n result = []\n conn = pymysql.connect(rds_host, user=name, passwd=password, db=db_name, connect_timeout=5)\n with conn:\n cur = conn.cursor()\n homeID = event['homeID']\n select_part = \"SELECT inc.IncidentID, inc.DateRecorded, inc.BadIncidentFlag, inc.ImagePaths, inc.FriendlyMatchFlag, inc.MicrophonePath, inc.UltrasonicPath \"\n table_part = \"FROM IncidentData inc \"\n where_part = \"WHERE inc.AccountID=%s ORDER BY inc.DateRecorded DESC\" % (homeID)\n query = select_part + table_part + where_part\n #print(query)\n try:\n cur.execute(query)\n except Exception as e:\n cur.close()\n return {\n \"statusCode\": 413,\n \"error\" : str(e)\n }\n cols = cur.description \n result = [{cols[index][0]:col for index, col in enumerate(value)} for value in cur.fetchall()]\n print(result)\n print(result[0])\n cur.close()\n if len(result)>0:\n return {\n 'statusCode' : 200,\n 'message': \"Retrieved records successfully\",\n 'body' : '[' + str(result[0]) + ']'\n }\n else:\n return {\n 'statusCode' : 411,\n 'message': \"No records found\",\n 'body' : \" \"\n }", "def do_bulk(self, args):\n pass", "def postponed_send(self):\n\n for event in self._event_list:\n self._http_post([event], postpone=True)\n\n # clear event_list for future use\n self._event_list = []", "def handler(kinesis_records, context):\n data = kinesis_records[0].parse()\n detail = data.get('detail')\n return publish({\n \"eventSource\": data['source'],\n \"awsRegion\": data['region'],\n \"eventTime\": data['time'],\n \"eventName\": detail['eventName'],\n \"userIdentity\": {\n \"principalId\": detail['userIdentity']['principalId']\n },\n \"requestParameters\": {\n \"sourceIPAddress\": detail['sourceIPAddress']\n },\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"bucket\": {\n \"name\": detail['requestParameters']['bucketName'],\n \"arn\": detail['resources'][1]['ARN']\n },\n \"object\": {\n \"key\": detail['requestParameters']['key'],\n \"size\": detail['additionalEventData']['bytesTransferredIn']\n }\n }\n })", "def gen_metadata_msg_enqueue_tasks(queue_msg_list: List[str],\n queue_client_list: List[QueueClient],\n tc: TelemetryClient) -> None:\n\n tasks = []\n for idx, queue_msg in enumerate(queue_msg_list):\n output_obj = json.loads(queue_msg)\n\n queue_index = idx % len(queue_client_list)\n logging.debug(\n f\"{HEADER} Try to send message to ingest queue {queue_index}, queue_msg: {queue_msg}\")\n\n base64_message = base64.b64encode(queue_msg.encode('ascii')).decode('ascii')\n\n file_url = output_obj['data']['url']\n size = int(output_obj['data']['contentLength'])\n\n tc.track_event(METADATA_HANDLE_EVENT_NAME,\n {'FILE_URL': file_url},\n {METADATA_HANDLE_EVENT_NAME + '_SIZE': size,\n METADATA_HANDLE_EVENT_NAME + '_COUNT': 1})\n\n # round robin to enqueue message\n task = asyncio.ensure_future(send_queue_messages(\n queue_client_list[queue_index], base64_message, queue_msg))\n tasks.append(task)\n tc.flush()\n return tasks", "def cleanup_record(schema, record):\n if not isinstance(record, dict) and not isinstance(record, list):\n return record\n\n elif isinstance(record, list):\n nr = []\n for item in record:\n nr.append(cleanup_record(schema, item))\n return nr\n\n elif isinstance(record, dict):\n nr = {}\n for key, value in record.items():\n nkey = bigquery_transformed_key(key)\n nr[nkey] = cleanup_record(schema, value)\n return nr\n\n else:\n raise Exception(f\"unhandled instance of record: {record}\")", "def bulk(self) -> None:\n helpers.bulk(self.client, self.gen_business_data(BUSINESS_FP))\n helpers.bulk(self.client, self.gen_review_data(REVIEW_FP))\n helpers.bulk(self.client, self.gen_tip_data(TIP_FP))", "def handle_record_sequence(self, record_sequence):\n result = []\n for record in record_sequence:\n result.append(self.handle_record(record))\n return result", "def __init__(self, *records: ScalarSequence):\n self._records = [r for r in records if r]", "def bulk_transform(self, list_of_data):\n new_list = []\n\n for item in list_of_data:\n new_list.append(self.transform(item))\n\n return new_list", "def createFeedItems(self):\r\n for item in self.item_data:\r\n self.initCreateFeedItem(item)\r\n self.createItem(item)", "def _batch(self, batch_request_entries):\n necessary_keys = [\"id\", \"version\", \"method\", \"params\"]\n\n results = []\n\n for (idx, request) in enumerate(batch_request_entries):\n error = None\n result = None\n\n # assert presence of important details\n for necessary_key in necessary_keys:\n if not necessary_key in request.keys():\n raise FakeBitcoinProxyException(\"Missing necessary key {} for _batch request number {}\".format(necessary_key, idx))\n\n if isinstance(request[\"params\"], list):\n method = getattr(self, request[\"method\"])\n result = method(*request[\"params\"])\n else:\n # matches error message received through python-bitcoinrpc\n error = {\"message\": \"Params must be an array\", \"code\": -32600}\n\n results.append({\n \"error\": error,\n \"id\": request[\"id\"],\n \"result\": result,\n })\n\n return results", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data = {}\n data[\"qid\"] = (\"i-\" + str(result.parliamentary_item_id))\n if type(result)==domain.AgendaItem:\n g = u\" \" + result.group.type + u\" \" + result.group.short_name\n else:\n g = u\"\" # !+ g?\n data[\"subject\"] = result.short_name\n data[\"title\"] = result.short_name\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"%ss/obj-%i\" % (\n result.type, result.parliamentary_item_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= u\"\"\n # remember original domain object\n data[\"id\"] = result.parliamentary_item_id\n data[\"_obj\"] = result\n # append processed result item\n data_list.append(data)\n self._data = data_list", "def bulk_create():\n logger.info(\"Creating persuasions in bulk\")\n try:\n request_data = json.loads(request.data)\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.create, data): data for data in request_data}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will create shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def enqueue(self, content_object, start, end=None, batch_content_object=None,\n extra_params=None, send_expired=True):\n enqueued = []\n for ab_test in ABTest.objects.filter(stream=self):\n if not ab_test.is_enabled:\n continue\n message = ab_test.random_message()\n send_time = message.send_time(start, end)\n if send_time:\n if send_time <= datetime.datetime.now():\n if send_expired:\n message.send(content_object,\n blacklisted_emails=message.blacklisted_emails(),\n extra_params=extra_params)\n else:\n if batch_content_object:\n enqueued.append(Queue.objects.create(message=message,\n content_object=content_object, send_time=send_time,\n batch_content_object=batch_content_object))\n else:\n enqueued.append(Queue.objects.create(message=message,\n content_object=content_object, send_time=send_time))\n return enqueued", "def get_data(queue, item_count):\n return [loads(queue.get()) for _ in range(item_count)]", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data ={}\n data[\"qid\"]= (\"m_\" + str(result.motion_id))\n data[\"subject\"] = u\"M \" + str(result.motion_number) + u\" \" + result.short_name\n data[\"title\"] = result.short_name\n if result.approval_date:\n data[\"result_item_class\"] = (\"workflow-state-\" + \n result.status + \"sc-after-\" + \n datetime.date.strftime(result.approval_date, \"%Y-%m-%d\"))\n else:\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"motions/obj-\" + str(result.motion_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def emit(self, record):\n if self.list is not None:\n try:\n self.r.lpush(self.list, json.dumps(self.format(record)))\n except Exception:\n self.handleError(record)", "def parse_rrs(payload, offset, quantity):\n rrs = []\n for i in range(quantity):\n subtype = get_record_type(payload, offset)\n # print \"subtype \" + subtype\n rr, length = subtype.fromData(payload, offset)\n rrs.append(rr)\n offset += length\n \n return rrs, offset" ]
[ "0.6159722", "0.59462416", "0.5789384", "0.5695923", "0.56770617", "0.5449017", "0.54378134", "0.54224366", "0.5411029", "0.53040165", "0.52972186", "0.529558", "0.52868986", "0.5283321", "0.52753913", "0.52752817", "0.5261119", "0.52518207", "0.5238154", "0.5229178", "0.52088416", "0.5186427", "0.51823896", "0.5179464", "0.5124709", "0.5102164", "0.5085752", "0.5081701", "0.5079915", "0.50737673", "0.50547355", "0.5051971", "0.5050031", "0.5036819", "0.503589", "0.50223374", "0.50211483", "0.49971056", "0.49969882", "0.49850872", "0.4984554", "0.49765477", "0.49711847", "0.49711847", "0.49640587", "0.49632683", "0.4960791", "0.49563244", "0.4946758", "0.49415308", "0.49361488", "0.49333298", "0.49226525", "0.4916225", "0.49098977", "0.49085104", "0.48997274", "0.4899388", "0.48941287", "0.48884824", "0.4884131", "0.48751453", "0.48526436", "0.48512796", "0.48407215", "0.48289248", "0.48141813", "0.4810478", "0.48103637", "0.480695", "0.48045242", "0.4803273", "0.48019758", "0.47998834", "0.47998616", "0.4799761", "0.47956818", "0.47908643", "0.47815922", "0.47778222", "0.47774982", "0.47667602", "0.47635493", "0.47602546", "0.47590736", "0.47584492", "0.47577044", "0.47567958", "0.47427252", "0.47375873", "0.4735361", "0.47339934", "0.4733671", "0.47276202", "0.47179294", "0.47157094", "0.4712978", "0.4703801", "0.4700686", "0.46971053" ]
0.56031376
5
Send a list of records to SQS, batching as necessary
def send(self, payloads): records = self._payload_messages(payloads) # SQS only supports up to 10 messages so do the send in batches for message_batch in self._message_batches(records): response = self._send_messages(message_batch) self._finalize(response, message_batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _message_batches(cls, records):\n # Dump the records to a list of minimal json\n records_json = [\n json.dumps(record, separators=(',', ':')) for record in records\n ]\n\n current_batch_size = 0\n current_batch = []\n for record in records_json:\n line_len = len(record)\n # Check if the max size of the batch has been reached or if the current\n # record will exceed the max batch size and start a new batch\n if ((len(current_batch) == cls.MAX_BATCH_COUNT) or\n (current_batch_size + line_len > cls.MAX_BATCH_SIZE)):\n yield current_batch[:]\n current_batch_size = 0\n del current_batch[:]\n\n if line_len > cls.MAX_BATCH_SIZE:\n LOGGER.error('Record too large (%d) to send to SQS:\\n%s', line_len, record)\n cls._log_failed(1)\n continue\n\n # Add the record to the batch\n current_batch_size += line_len\n current_batch.append(record)\n\n # yield the result of the last batch (no need to copy via slicing)\n if current_batch:\n yield current_batch", "def put_records_batch(\n client, stream_name: str, records: list, max_retries: int, max_batch_size: int = 500\n) -> None or List[dict]:\n\n retry_list = []\n\n for batch_index, batch in enumerate(split_list(records, max_batch_size)):\n records_to_send = create_records(batch)\n retries_left = max_retries\n\n while len(records_to_send) > 0:\n kinesis_response = client.put_records(\n Records=records_to_send, StreamName=stream_name,\n )\n\n if kinesis_response[\"FailedRecordCount\"] == 0:\n break\n else:\n index: int\n record: dict\n for index, record in enumerate(kinesis_response[\"Records\"]):\n if \"ErrorCode\" in record:\n # original records list and response record list have same order, guaranteed:\n # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kinesis.html#Kinesis.Client.put_records\n logger.error(\n f\"A record failed with error: {record['ErrorCode']} {record['ErrorMessage']}\"\n )\n retry_list.append(records_to_send[index])\n\n records_to_send = retry_list\n retry_list = []\n\n if retries_left == 0:\n error_msg = (\n f\"No retries left, giving up on records: {records_to_send}\"\n )\n logger.error(error_msg)\n return records_to_send\n\n retries_left -= 1\n\n logger.info(f\"Waiting 500 ms before retrying\")\n time.sleep(0.5)\n\n return None", "def _send_messages(self, batched_messages):\n @backoff.on_predicate(backoff.fibo,\n lambda resp: len(resp.get('Failed', [])) > 0,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n max_value=self.MAX_BACKOFF_FIBO_VALUE,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n @backoff.on_exception(backoff.expo, self.EXCEPTIONS_TO_BACKOFF,\n max_tries=self.MAX_BACKOFF_ATTEMPTS,\n on_backoff=backoff_handler(debug_only=False),\n on_success=success_handler(),\n on_giveup=giveup_handler())\n def _send_messages_helper(entries):\n \"\"\"Inner helper function for sending messages with backoff_handler\n\n Args:\n entries (list<dict>): List of SQS SendMessageBatchRequestEntry items\n \"\"\"\n LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url)\n\n response = self.queue.send_messages(Entries=entries)\n\n if response.get('Successful'):\n LOGGER.info(\n 'Successfully sent %d message(s) to %s with MessageIds %s',\n len(response['Successful']),\n self.queue.url,\n ', '.join(\n '\\'{}\\''.format(resp['MessageId'])\n for resp in response['Successful']\n )\n )\n\n if response.get('Failed'):\n self._check_failures(response) # Raise an exception if this is our fault\n self._strip_successful_records(entries, response)\n\n return response\n\n message_entries = [\n {\n 'Id': str(idx),\n 'MessageBody': message\n } for idx, message in enumerate(batched_messages)\n ]\n\n # The try/except here is to catch any raised errors at the end of the backoff\n try:\n return _send_messages_helper(message_entries)\n except self.EXCEPTIONS_TO_BACKOFF:\n LOGGER.exception('SQS request failed')\n # Use the current length of the message_entries in case some records were\n # successful but others were not\n self._log_failed(len(message_entries))\n return", "def send_messages_to_ks(records: List[str], stream_name: str):\n log.info('Sending message to Kinesis Stream')\n client = boto3.client('kinesis')\n return client.put_records(\n Records=[\n {\n 'Data': record + '\\n',\n 'PartitionKey': '1'\n } for record in records],\n StreamName=stream_name\n )", "def batch_push(self, payloads):\n body = json.dumps(payloads)\n\n status, response = self._request('POST', body, BATCH_PUSH_URL,\n 'application/json')\n if not status == 200:\n raise AirshipFailure(status, response)", "def push_bq_records(client, dataset, table, records, sleep = 300, max_batch = 100, print_failed_records = True, retry_on_fail = True):\n if len(records) == 0:\n return\n if len(records) > max_batch:\n split = len(records) // 2\n push_bq_records(client, dataset, table, records[0:split], sleep, max_batch)\n push_bq_records(client, dataset, table, records[split:], sleep, max_batch)\n else:\n try:\n succ = client.push_rows(dataset, table, records)\n if not succ:\n if retry_on_fail:\n print(\"Push to BigQuery table was unsuccessful. Waiting %s seconds and trying one more time.\" % sleep)\n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch, print_failed_records, False)\n else:\n if print_failed_records:\n print(\"\\nRecord 0:\")\n print(records[0])\n if len(records) > 1:\n print(\"\\nRecord %s:\" % (len(records) - 1))\n print(records[len(records)-1])\n raise RuntimeError('Push to BigQuery table was unsuccessful. See above for sample record(s) if requested.')\n except BrokenPipeError:\n print(\"BrokenPipeError while pushing %s records. Waiting %s seconds and trying again.\" % (len(records), sleep)) \n time.sleep(sleep)\n push_bq_records(client, dataset, table, records, sleep, max_batch)", "def _send_batch(self, service_checks: list):\n for service_check in service_checks:\n self._send(service_check)", "def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH", "def send_to_all(apigatewaymanagementapi, connection_ids, data):\n dynamodb = boto3.client('dynamodb')\n for connection_id in connection_ids:\n try:\n apigatewaymanagementapi.post_to_connection(Data=data, ConnectionId=connection_id['connectionId']['S'])\n except Exception as e:\n print(e)\n # Remove connection id from DDB\n dynamodb.delete_item(\n TableName=os.environ.get('CONNECTION_TABLE_NAME'),\n Key={'connectionId': {'S': connection_id['connectionId']['S']}}\n )", "def _send_batch(self, base_url, endpoint, batch, dataset_id=None, dataset_version=None, retries=0):\n try:\n params = {'data': base64.b64encode(json.dumps(batch).encode()).decode()}\n if dataset_id:\n params['dataset_id'] = dataset_id\n params['token'] = self.token\n if dataset_version:\n params['dataset_version'] = dataset_version\n response = self.request(base_url, [endpoint], params, 'POST')\n msg = \"Sent \" + str(len(batch)) + \" items on \" + time.strftime(\"%Y-%m-%d %H:%M:%S\") + \"!\"\n Mixpanel.LOGGER.debug(msg)\n return response\n except BaseException as be:\n Mixpanel.LOGGER.debug('Exception in _send_batch')\n Mixpanel.LOGGER.debug(be)\n Mixpanel.LOGGER.warning(\"Failed to import batch, dumping to file import_backup.txt\")\n with open('import_backup.txt', 'a+') as backup:\n json.dump(batch, backup)\n backup.write('\\n')", "def _dispatch_batches(self, base_url, endpoint, item_list, prep_args, dataset_id=None, dataset_version=None):\n pool = ThreadPool(processes=self.pool_size)\n batch = []\n\n # Decide which _prep function to use based on the endpoint\n if endpoint == 'import' or endpoint == 'import-events':\n prep_function = Mixpanel._prep_event_for_import\n elif endpoint == 'engage' or endpoint == 'import-people':\n prep_function = Mixpanel._prep_params_for_profile\n else:\n Mixpanel.LOGGER.warning(\n 'endpoint must be \"import\", \"engage\", \"import-events\" or \"import-people\", found: ' + str(endpoint))\n return\n\n if base_url == self.BETA_IMPORT_API:\n batch_size = 1000\n else:\n batch_size = 50\n\n for item in item_list:\n if prep_args is not None:\n # Insert the given item as the first argument to be passed to the _prep function determined above\n prep_args[0] = item\n params = prep_function(*prep_args)\n if params:\n batch.append(params)\n else:\n batch.append(item)\n\n if len(batch) == batch_size:\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n batch = []\n\n # If there are fewer than batch_size updates left ensure one last call is made\n if len(batch):\n # Add an asynchronous call to _send_batch to the thread pool\n pool.apply_async(self._send_batch, args=(base_url, endpoint, batch, dataset_id, dataset_version),\n callback=Mixpanel._response_handler_callback)\n pool.close()\n pool.join()", "def batch_process(self, message_list, action, userId='me'):\n\n list_of_ids = []\n\n for key, value in message_list.items():\n list_of_ids.append(value)\n\n chunks = [list_of_ids[x:x+1000] for x in range(0, len(list_of_ids), 1000)]\n\n for page in range(0, len(chunks)):\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'batchModify')\n body = { \n \"ids\": chunks[page],\n \"removeLabelIds\": [\"INBOX\"],\n }\n else:\n resource = getattr(self.connection.users().messages(), 'batchDelete')\n body = { \n \"ids\": chunks[page],\n }\n\n dynamic_request = resource(userId=userId, body=body)\n response = dynamic_request.execute()\n print(f'[√] Bulk Action: SUCCESS {len(chunks[page])} Messages have been {action}d! - {page}')\n print(f'[√] Bulk Action: SUCCESS Total Number of Processed Messages: {len(list_of_ids)}')\n return True", "def beat_inbox_sms_bulk():\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()\n\n while list_of_sms_notifications:\n save_smss.apply_async((None, list_of_sms_notifications, receipt_id_sms), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: SMS receipt {receipt_id_sms} sent to in-flight.\")\n receipt_id_sms, list_of_sms_notifications = sms_bulk.poll()", "def batch(self, reqs):\n return self.connection.batch_(reqs)", "def ExecuteBatchQueue(self):\n\t\tself.client.ExecuteBatch(self.batch_queue, 'https://www.google.com/m8/feeds/contacts/default/full/batch')\n\t\tself.ClearBatchQueue();", "def apns_send_bulk_message(registration_ids, data, **kwargs):\n\tsocket = _apns_create_socket(APNS_SOCKET)\n\tfor registration_id in registration_ids:\n\t\t_apns_send(registration_id, data, socket=socket, **kwargs)\n\n\tsocket.close()", "async def mass_send(self, messages: List[Sms]) -> List[int]:\n raise NotImplementedError", "def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()", "def send_message_bulk(session_ids, message, status=200):\n for session_id in session_ids:\n TalkBackEvent.from_session_id(session_id).send_message(message, status)", "def send_batch(cls, subject, body, recipients, chunk_size=settings.MAILGUN_BATCH_CHUNK_SIZE):\n\n body, recipients = cls._recipient_override(body, recipients)\n responses = []\n\n recipients = iter(recipients)\n chunk = list(islice(recipients, chunk_size))\n while len(chunk) > 0:\n params = dict(\n to=chunk,\n subject=subject,\n text=body\n )\n params['recipient-variables'] = json.dumps({email: {} for email in chunk})\n responses.append(cls._mailgun_request(requests.post, 'messages', params))\n chunk = list(islice(recipients, chunk_size))\n\n return responses", "def put_ids_to_queue(ids_list):\n LOGGER.debug('pushing %s ads to the queue', len(ids_list))\n for advert_id in ids_list:\n fetch_single_advert.delay(advert_id)", "def flush_batch(self, batch):\n inserts = []\n replacements = []\n\n for action_type, data in batch:\n if action_type == processor.INSERT:\n inserts.append(data)\n elif action_type == processor.REPLACE:\n replacements.append(data)\n\n if inserts:\n write_rows(\n self.clickhouse,\n self.dist_table_name,\n inserts\n )\n\n if self.metrics:\n self.metrics.timing('inserts', len(inserts))\n\n if replacements:\n for key, replacement in replacements:\n self.producer.produce(\n self.replacements_topic,\n key=six.text_type(key).encode('utf-8'),\n value=json.dumps(replacement).encode('utf-8'),\n on_delivery=self.delivery_callback,\n )\n\n self.producer.flush()", "def send_to_kafka(rows):\n producer = connect_kafka_producer()\n for row in rows:\n print(row.asDict())\n producer.send(TOPIC_NAME, value=row.asDict())\n producer.flush()", "def beat_inbox_email_bulk():\n receipt_id_email, list_of_email_notifications = email_bulk.poll()\n\n while list_of_email_notifications:\n save_emails.apply_async((None, list_of_email_notifications, receipt_id_email), queue=QueueNames.BULK_DATABASE)\n current_app.logger.info(f\"Batch saving with Bulk Priority: email receipt {receipt_id_email} sent to in-flight.\")\n receipt_id_email, list_of_email_notifications = email_bulk.poll()", "def process_data():\n for message in get_messages_from_sqs():\n try:\n message_content = json.loads(message.body)\n input_file = urllib.unquote_plus(message_content\n ['Records'][0]['s3']['object']\n ['key']).encode('utf-8')\n s3.download_file(input_bucket_name, input_file, input_file)\n output_file = os.path.join(output_dir, os.path.splitext(input_file)[0]+'.csv')\n parse_patient_data(input_file, output_file)\n upload_data(output_file)\n cleanup_files(input_file, output_file)\n except:\n message.change_visibility(VisibilityTimeout=0)\n continue\n else:\n message.delete()", "def bulk_index_records(records):\n indexer = RecordIndexer()\n\n click.echo('Bulk indexing {} records...'.format(len(records)))\n indexer.bulk_index([str(r.id) for r in records])\n indexer.process_bulk_queue()\n click.echo('Indexing completed!')", "def queue_emails(spoofs, message, count):\n\t\tqueues = []\n\t\tnumber_of_spoofs = len(spoofs)\n\t\tmessages_per_queue = count // number_of_spoofs\n\t\textra_to_distribute = count - (messages_per_queue * number_of_spoofs)\n\t\tbatch = Batch(size=count, complete=0)\n\t\tbatch.save()\n\t\tpk = batch.pk\n\n\t\t# going deep into each queue\n\t\tfor x in range(number_of_spoofs):\n\n\t\t\tspoof = spoofs[x]\n\t\t\tmessage['From'] = spoof.username\n\t\t\tqueue = Queue(spoof.username, connection=Redis())\n\t\t\tqueues.append(queue)\n\n\t\t\tfor y in range(messages_per_queue):\n\t\t\t\tqueue.enqueue_call(func=send, args=spoof.task_arguments + (message, pk))\n\n\t\t# panning across each queue\n\t\tfor x in range(extra_to_distribute):\n\t\t\tspoof = spoofs[x]\n\t\t\tmessage['From'] = spoof.username\n\t\t\tqueue = queues[x]\n\t\t\tqueue.enqueue_call(func=send ,args=(spoof.task_arguments + (message, pk)))\n\n\t\treturn pk", "def do_bulk(self, args):\n pass", "def postponed_send(self):\n\n for event in self._event_list:\n self._http_post([event], postpone=True)\n\n # clear event_list for future use\n self._event_list = []", "def ExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _send_bulk_mail(\n recipient_ids, sender_id, intent, email_subject, email_html_body,\n sender_email, sender_name, instance_id=None):\n _require_sender_id_is_valid(intent, sender_id)\n\n recipients_settings = user_services.get_users_settings(recipient_ids)\n recipient_emails = [user.email for user in recipients_settings]\n\n cleaned_html_body = html_cleaner.clean(email_html_body)\n if cleaned_html_body != email_html_body:\n log_new_error(\n 'Original email HTML body does not match cleaned HTML body:\\n'\n 'Original:\\n%s\\n\\nCleaned:\\n%s\\n' %\n (email_html_body, cleaned_html_body))\n return\n\n raw_plaintext_body = cleaned_html_body.replace('<br/>', '\\n').replace(\n '<br>', '\\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\\n<p>')\n cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)\n\n def _send_bulk_mail_in_transaction(instance_id=None):\n \"\"\"Sends the emails in bulk to the recipients.\"\"\"\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_bulk_mail(\n sender_name_email, recipient_emails, email_subject,\n cleaned_plaintext_body, cleaned_html_body)\n\n if instance_id is None:\n instance_id = email_models.BulkEmailModel.get_new_id('')\n email_models.BulkEmailModel.create(\n instance_id, recipient_ids, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())\n\n transaction_services.run_in_transaction(\n _send_bulk_mail_in_transaction, instance_id)", "def _send_bulk_mail_in_transaction(instance_id=None):\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_bulk_mail(\n sender_name_email, recipient_emails, email_subject,\n cleaned_plaintext_body, cleaned_html_body)\n\n if instance_id is None:\n instance_id = email_models.BulkEmailModel.get_new_id('')\n email_models.BulkEmailModel.create(\n instance_id, recipient_ids, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())", "def emit(self, span_datas):\n spans = []\n for span_data in span_datas:\n start_timestamp_mus = timestamp_to_microseconds(span_data.start_time)\n end_timestamp_mus = timestamp_to_microseconds(span_data.end_time)\n duration_mus = end_timestamp_mus - start_timestamp_mus\n\n start_time_ms = start_timestamp_mus // 1000\n duration_ms = duration_mus // 1000\n\n span = Span(\n name=span_data.name,\n tags=span_data.attributes,\n guid=span_data.span_id,\n trace_id=span_data.context.trace_id,\n parent_id=span_data.parent_span_id,\n start_time_ms=start_time_ms,\n duration_ms=duration_ms,\n )\n\n spans.append(span)\n\n try:\n response = self.client.send_batch(spans, self._common)\n except Exception:\n _logger.exception(\"New Relic send_spans failed with an exception.\")\n return\n\n if not response.ok:\n _logger.error(\n \"New Relic send_spans failed with status code: %r\", response.status\n )\n\n return response", "def _batch_write(self):\n if self.to_put:\n db.put(self.to_put)\n self.to_put = []\n if self.to_delete:\n db.delete(self.to_delete)\n self.to_delete = []", "def batch(self):\n return self._client.batch()", "def gcm_send_bulk_message(setting_type, registration_ids, data, collapse_key=None, delay_while_idle=False, time_to_live=0):\n\n\targs = data, collapse_key, delay_while_idle, time_to_live\n\n\t# GCM only allows up to 1000 reg ids per bulk message\n\t# https://developer.android.com/google/gcm/gcm.html#request\n\tmax_recipients = SETTINGS.get(\"GCM_MAX_RECIPIENTS\")\n\tif len(registration_ids) > max_recipients:\n\t\tret = []\n\t\tfor chunk in _chunks(registration_ids, max_recipients):\n\t\t\tret.append(_gcm_send_json(setting_type, chunk, *args))\n\t\treturn ret\n\n\treturn _gcm_send_json(setting_type, registration_ids, *args)", "def _es_push_results(self, query_name, records):\n logger.debug(f\"Pushing {query_name}: {records}\")\n for c in self.es_clients:\n c.send_to_es(query_name, records)", "def bulk_queue_graphile_worker_jobs(jobs: Sequence[GraphileWorkerJob]):\n values: List[str] = []\n params: List[Any] = []\n for job in jobs:\n values.append(\"(%s, %s::json, %s::timestamptz, %s, %s::jsonb)\")\n params.append(job.task_identifier)\n params.append(json.dumps(job.payload))\n params.append(job.run_at.isoformat())\n params.append(job.max_attempts)\n params.append(json.dumps(job.flags) if job.flags else None)\n _execute_graphile_worker_query(BULK_INSERT_JOBS_SQL.format(values=\", \".join(values)), params=params)", "def bulkupload_entitie_records(self, entity_upload_parameters, tmp_file, progress=None):\n records = self.service_client.factory.create(\"ns2:ArrayOfstring\")\n tmp_csv_file = io.open(tmp_file, encoding='utf-8-sig')\n\n records.string = [x.strip() for x in tmp_csv_file.readlines()]\n \n try:\n #print(self.service_client)\n response = self.service_client.UploadEntityRecords(\n AccountId=self._authorization_data.account_id,\n EntityRecords=records,\n ResponseMode=entity_upload_parameters.response_mode\n )\n if self.need_to_fall_back_to_async(response):\n headers = self.service_client.get_response_header()\n operation = BulkUploadOperation(\n request_id=response.RequestId,\n authorization_data=self._authorization_data,\n poll_interval_in_milliseconds=self._poll_interval_in_milliseconds,\n environment=self._environment,\n tracking_id=headers['TrackingId'] if 'TrackingId' in headers else None,\n **self.suds_options\n )\n file_path = self.download_upload_result(operation, entity_upload_parameters, progress)\n return self.read_result_from_bulk_file(file_path)\n else:\n return self.read_bulkupsert_response(response) \n except Exception as ex:\n if 'OperationNotSupported' == operation_errorcode_of_exception(ex):\n return self.bulkupload_entities(entity_upload_parameters, tmp_file, progress)\n else:\n raise ex", "def test_cart_item_write_batch_lambda(self, dynamo):\n dynamo.return_value = self.dynamo_accessor.dynamo_client\n\n cart_id = '123'\n write_params = {\n 'entity_type': 'samples',\n 'catalog': self.catalog,\n 'filters': {},\n 'cart_id': cart_id,\n 'batch_size': 1000\n }\n write_response = self.app_module.cart_item_write_batch(write_params, None)\n inserted_items = self.dynamo_accessor.query(table_name=config.dynamo_cart_item_table_name,\n key_conditions={'CartId': cart_id})\n\n self.assertEqual(write_response['count'], len(list(inserted_items)))", "def store_documents(self, documents: list):\n results = app.Results()\n entries = [\n { \n 'Id': str(uuid1()),\n 'MessageBody': json.dumps(doc)\n }\n for doc in documents\n ]\n ids = [ e['Id'] for e in entries ]\n self.Logger.info(f'Store {ids} in sqs')\n self.Logger.debug(f'Saving {entries} in sqs {self.sqs_queue_url}')\n self.sqs_client.send_message_batch(\n QueueUrl=self.sqs_queue_url,\n Entries=entries\n )\n results.ActionStatus = 0\n results.Results = ids\n return results", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def _send_data(self):\n \n # Do not send more than 100 datasets each time (totally arbitrary)\n MAX_DATA_SETS_PER_POST = 100\n data_to_send = self._data_buffer[0:MAX_DATA_SETS_PER_POST]\n data_to_keep = self._data_buffer[MAX_DATA_SETS_PER_POST:]\n\n # Prepare data string with the values in data buffer\n now = time.time()\n data_string = '[' \n for (timestamp, data) in data_to_send:\n data_string += '['\n data_string += str(round(timestamp-now,2))\n for sample in data:\n data_string += ','\n data_string += str(sample)\n data_string += '],'\n # Remove trailing comma and close bracket\n data_string = data_string[0:-1]+']'\n\n self._log.debug(\"Data string: \" + data_string)\n \n # Prepare URL string of the form\n # 'http://domain.tld/emoncms/input/bulk.json?apikey=\n # 12345&data=[[-10,10,1806],[-5,10,1806],[0,10,1806]]'\n url_string = self._settings['protocol'] + self._settings['domain'] + \\\n self._settings['path'] + \"/input/bulk_json?apikey=\" + \\\n self._settings['apikey'] + \"&data=\" + data_string\n self._log.debug(\"URL string: \" + url_string)\n\n # Send data to server\n self._log.info(\"Sending to \" + \n self._settings['domain'] + self._settings['path'])\n try:\n result = urllib2.urlopen(url_string, timeout=60)\n except urllib2.HTTPError as e:\n self._log.warning(\"Couldn't send to server, HTTPError: \" + \n str(e.code))\n except urllib2.URLError as e:\n self._log.warning(\"Couldn't send to server, URLError: \" + \n str(e.reason))\n except httplib.HTTPException:\n self._log.warning(\"Couldn't send to server, HTTPException\")\n except Exception:\n import traceback\n self._log.warning(\"Couldn't send to server, Exception: \" + \n traceback.format_exc())\n else:\n if (result.readline() == 'ok'):\n self._log.debug(\"Send ok\")\n # Send ok -> empty buffer\n self._data_buffer = data_to_keep\n return True\n else:\n self._log.warning(\"Send failure\")", "def store_documents(self, documents: list):\n requests = [\n {'PutRequest': {'Item': Item}} \n for Item in documents\n ]\n ticks = [d['symbol'] for d in documents]\n size = getsizeof(requests)\n exceptions = self.dynamo_client.exceptions\n errors = (exceptions.ProvisionedThroughputExceededException)\n\n self.Logger.info(\n f'Writing batch of {ticks} into dynamodb '\n f'with size {size} bytes',\n extra={\"message_info\": {\"Type\": \"DynamoDB write\", \"Tickers\": ticks, \"Size\": size}}\n )\n \n try:\n response = self.dynamo_resource.batch_write_item(\n RequestItems={self.table_name: requests},\n ReturnConsumedCapacity = 'INDEXES')\n \n self.Logger.debug(f'{response}')\n \n if response['UnprocessedItems']:\n raise RuntimeError('UnprocessedItems in batch write')\n except errors as ex:\n raise app.AppException(ex, f'dynamodb throughput exceed')\n\n return True", "def create_records(data: List[str]) -> List[dict]:\n records = []\n for d in data:\n records.append(create_record(d))\n\n logger.debug(f\"Formed Kinesis Records batch for PutRecords API: {records}\")\n return records", "def BeginExecuteBatch(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def save_multiple_records(self, records, collection_name):\n\n try:\n self.logger.info('in save_multiple_records()')\n collection = self.get_db()[collection_name]\n record_ids = collection.insert_many(records)\n self.logger.info('out save_multiple_records()')\n return record_ids\n except Exception as e:\n self.logger.error(f'Error occurred while saving multiple records {e}')", "def write(self, batch):\n cursor = connection.cursor()\n while batch:\n values = [\n \"(%s, %s)\" % (\n psycopg2.Binary(k),\n psycopg2.Binary(v)\n ) for k, v in batch[:self.BATCH_SIZE]]\n sql = \"INSERT INTO %s(k,v) VALUES %s\" % (self.table, \",\".join(values))\n batch = batch[self.BATCH_SIZE:]\n cursor.execute(sql)\n cursor.execute(\"COMMIT\")", "def send_data(queue, data):\n for obj in data:\n queue.put(dumps(obj, protocol=-1))", "def batch_write(client, resources, batch_size=MAX_DYNAMO_BATCH_SIZE, batch_counter_step=MAX_DYNAMO_BATCH_SIZE):\n idx = 0\n item_count = 0\n\n batch = defaultdict(list)\n for idx, batch_resources in enumerate(chunk(resources, batch_size)):\n batch.clear()\n for resource in batch_resources:\n batch[getmeta(resource).table_name(client)].append(\n {'PutRequest': {'Item': resource.to_dynamo_dict(skip_null_fields=True)}}\n )\n item_count += 1\n\n if (idx % batch_counter_step) == 0:\n logger.info(\"Loading batch: %s\", idx)\n\n client.batch_write_item(RequestItems=batch)\n\n logger.info(\"Loaded %s records in %s batches.\", item_count, idx + 1)", "def process(self, send_now=False):\n\t\tfinal_recipients = self.final_recipients()\n\t\tqueue_separately = (final_recipients and self.queue_separately) or len(final_recipients) > 20\n\t\tif not (final_recipients + self.final_cc()):\n\t\t\treturn []\n\n\t\tqueue_data = self.as_dict(include_recipients=False)\n\t\tif not queue_data:\n\t\t\treturn []\n\n\t\tif not queue_separately:\n\t\t\trecipients = list(set(final_recipients + self.final_cc() + self.bcc))\n\t\t\tq = EmailQueue.new({**queue_data, **{\"recipients\": recipients}}, ignore_permissions=True)\n\t\t\tsend_now and q.send()\n\t\telse:\n\t\t\tif send_now and len(final_recipients) >= 1000:\n\t\t\t\t# force queueing if there are too many recipients to avoid timeouts\n\t\t\t\tsend_now = False\n\t\t\tfor recipients in frappe.utils.create_batch(final_recipients, 1000):\n\t\t\t\tfrappe.enqueue(\n\t\t\t\t\tself.send_emails,\n\t\t\t\t\tqueue_data=queue_data,\n\t\t\t\t\tfinal_recipients=recipients,\n\t\t\t\t\tjob_name=frappe.utils.get_job_name(\n\t\t\t\t\t\t\"send_bulk_emails_for\", self.reference_doctype, self.reference_name\n\t\t\t\t\t),\n\t\t\t\t\tnow=frappe.flags.in_test or send_now,\n\t\t\t\t\tqueue=\"long\",\n\t\t\t\t)", "def submit_to_queue(queue_df, conn, table_name):\n queue_df.to_sql(con=conn, name=table_name, if_exists='replace', index=False)\n print 'Inserted ' + str(len(queue_df)) + ' records to the task_queue'", "def batch_process(self, delete_list=[], update_list=[]):\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n payload = {\n 'updated': update_list,\n 'deleted': delete_list,\n }\n\n r = self.requests.post(\n self.request_url,\n data=json.dumps(payload),\n headers=self.default_headers,\n timeout=30\n )\n\n return r.status_code, r.json()", "def batch_process_async(self, delete_list=[], update_list=[]):\n headers = update_dict(self.default_headers, {self.API_VERSION_HEADER: self.API_VERSIONS[\"v2\"]})\n self.request_url = \"{0}/{1}\".format(self.API_URL, self.USER_BULK_ENDPOINT)\n\n payload = {\n 'updated': update_list,\n 'deleted': delete_list,\n }\n\n r = self.requests.post(self.request_url, data=json.dumps(payload), headers=headers, timeout=30)\n\n return r.status_code, r.json()", "def _send_messages_helper(entries):\n LOGGER.info('Sending %d message(s) to %s', len(entries), self.queue.url)\n\n response = self.queue.send_messages(Entries=entries)\n\n if response.get('Successful'):\n LOGGER.info(\n 'Successfully sent %d message(s) to %s with MessageIds %s',\n len(response['Successful']),\n self.queue.url,\n ', '.join(\n '\\'{}\\''.format(resp['MessageId'])\n for resp in response['Successful']\n )\n )\n\n if response.get('Failed'):\n self._check_failures(response) # Raise an exception if this is our fault\n self._strip_successful_records(entries, response)\n\n return response", "def batch_execute(self, conn):\n def batches(data, batch_size) -> list:\n \"\"\"Return batches of length `batch_size` from any object that\n supports iteration without knowing length.\"\"\"\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv\n\n columns = ColumnCollection(self.columns)\n if self.header:\n self.columns = [columns.get(h) for h in next(self.data)]\n columns = ColumnCollection(self.columns)\n\n total = 0\n query = BulkInsertQuery(self.table, columns)\n for batch in batches(self.data, self.batch_size):\n total += query.execute(conn, batch) or 0\n yield total", "def batch_add_documents(rows: tuple, client: firestore.Client, context: Context = None) -> bool:\n if len(rows) < 1:\n return\n logger.debug('Beginning batch add.')\n\n batch = client.batch()\n col = client.collection(config.FIRESTORE_IDENTITY_POOL)\n batch_size = 0\n\n for row in rows:\n doc_id = row.pop('mpi')\n doc_ref = col.document(doc_id)\n batch.set(doc_ref, row)\n batch_size += 1\n \n try:\n batch.commit()\n logger.info(f\"Committed batch of {batch_size} records\")\n return True\n except Exception as e:\n logger.error(e)\n return False", "def post(self, batch):\n num_jobs = len(batch)\n plural = \"\" if num_jobs == 1 else \"s\"\n log.info(\"> Sending batch request with %s job%s\", num_jobs, plural)\n data = []\n for i, job in enumerate(batch):\n if job.finished:\n raise Finished(job)\n else:\n job.finished = True\n log.info(\"> {%s} %s\", i, job)\n data.append(dict(job, id=i))\n response = self.resource.post(data)\n log.info(\"< Received batch response for %s job%s\", num_jobs, plural)\n return response", "def process_es_bulk(pub_list, es):\n bulk_response = es.bulk(\n body=''.join(pub_list),\n refresh='wait_for',\n request_timeout=3600,\n )\n if bulk_response.get('errors'):\n logger.error('failed on bulk indexing:\\n%s',\n bulk_response)\n raise IndexingErrorException()\n return len(pub_list)", "def write_multiple_records(self, table, records):\n payload = None\n response = []\n if not isinstance(table, str):\n raise ValueError(\"table must be a str.\")\n\n if isinstance(records, dict):\n # Single record\n response.append(self.insert_or_update(table, records))\n\n if isinstance(records, list):\n # Multiple records\n for record in records:\n response.append(self.insert_or_update(table, record))\n\n return response", "def add_to_queue(self, items):\n\n for i in items:\n self.r.rpush(self.joblist, i)", "def jsonrpc_puttxn_batch(self, txns, broadcast = True):\n if ADD_NETWORK_DELAY:\n time.sleep(random.uniform(NETWORK_DELAY_MIN, NETWORK_DELAY_MAX))\n\n if self.node.storage.txns_received == 0:\n self.node.storage.time_measurement = time.time()\n self.node.storage.txns_received += 1\n if broadcast:\n self.node.storage.broadcast_txn_batch(txns)\n for txn in txns:\n self.jsonrpc_puttxn(txn, broadcast = False)", "def _send_pending_messages():\n\n queryset = models.Message.objects.filter(status=models.STATUS_PENDING)\\\n .order_by(\"-priority\", \"created_at\")\n\n connection = _get_real_backend()\n paginator = Paginator(list(queryset), getattr(settings, \"DJMAIL_MAX_BULK_RETRY_SEND\", 10))\n\n for page_index in paginator.page_range:\n connection.open()\n for message_model in paginator.page(page_index).object_list:\n email = message_model.get_email_message()\n sended = connection.send_messages([email])\n\n if sended == 1:\n message_model.status = models.STATUS_SENT\n message_model.sent_at = timezone.now()\n else:\n message_model.retry_count += 1\n\n message_model.save()\n connection.close()", "def send_msgs():\n\n scheduled = ScheduledMessage.query.filter( (ScheduledMessage.send_date<=datetime.datetime.now()) & (ScheduledMessage.sent=='f') ).all()\n print \"scheduled msgs = \", scheduled\n\n for msg in scheduled:\n user = User.query.filter_by(user_id=msg.user_id).one()\n contact = Contact.query.filter_by(contact_id=msg.contact_id).one()\n messages = Message.query.filter((Message.created_by==user.user_id) | (Message.created_by==1)).all()\n random_int = random.randint(0, len(messages) - 1)\n msg_text = messages[random_int].msg_text\n gmail.SendMessage(user.email, contact.email, 'Hey', msg_text, msg_text)\n msg.sent = True\n # schedule next message\n next_msg = ScheduledMessage(user_id=user.user_id, \n contact_id=contact.contact_id,\n send_date=msg.send_date + datetime.timedelta(days=contact.contact_period),\n sent=False)\n db.session.add(next_msg)\n db.session.commit()\n print \"sent message\"\n\n return \"All scheduled messages sent.\"", "def test_send_queued_mail(self):\n # Make sure that send_queued_sms with empty queue does not raise error\n call_command('send_queued_sms')\n\n # Make sure bulk sms runs successfully\n smses = []\n for i in range(0, 300):\n # create 3 failed sms\n if i % 100 == 0:\n sms = SMS(to='+6280000000000', status=STATUS.queued, backend_alias='error')\n else:\n sms = SMS(to='+6280000000000', status=STATUS.queued, backend_alias='dummy')\n smses.append(sms)\n\n SMS.objects.bulk_create(smses)\n\n call_command('send_queued_sms')\n\n self.assertEqual(SMS.objects.filter(status=STATUS.sent).count(), 297)\n self.assertEqual(SMS.objects.filter(status=STATUS.failed).count(), 3)", "def test_adding_a_batch(created_job, bulk_request):\n bulk_request.reset_mock()\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <batchInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>BATCHONE</id>\n <jobId>THEJOBID</jobId>\n <state>Queued</state>\n </batchInfo>\n '''\n\n fake_data = [('1', '2'), ('3', '4')]\n created_job.add_batch(['Id', 'Name'], iter(fake_data))\n\n assert created_job.pending_batches == ['BATCHONE']\n\n bulk_request.assert_called_once_with(\n 'post',\n 'https://salesforce/services/async/34.0/job/THEJOBID/batch',\n content_type='text/csv; charset=UTF-8',\n data=mock.ANY\n )\n\n data = bulk_request.call_args[1]['data']\n assert b''.join(data) == b'Id,Name\\r\\n1,2\\r\\n3,4\\r\\n'", "def batch_index(self, records_uuids, request_timeout=None):\n LOGGER.info(f\"Starting task `batch_index for {len(records_uuids)} records\")\n return InspireRecordIndexer().bulk_index(records_uuids, request_timeout)", "def SendPayload(s, payload):\n for line in payload:\n s.send(line)\n RecvAndSleep(s)", "def write_batch(self, batch):\n for item in batch:\n self.write_buffer.buffer(item)\n key = self.write_buffer.get_key_from_item(item)\n if self.write_buffer.should_write_buffer(key):\n self._write_current_buffer_for_group_key(key)\n self.increment_written_items()\n self._check_items_limit()", "def _send(self, batch):\n return self.agent.emitBatch(batch)", "def send_bulk_transactional_sms(self, phone_numbers, message):\n provider = self._pick_provider()\n return provider.send_bulk_transactional_sms(phone_numbers, message)", "def _flush_batch(self) -> None:\n batch_len = len(self._current_batch)\n if batch_len == 0:\n self.logger.debug('Nothing to flush.')\n return\n\n self.logger.debug(f'Flushing batch size {batch_len}')\n\n with self.LOCK:\n to_process_batch = list(self._current_batch)\n self._current_batch = list()\n\n log_event = EventFactory.create_log_event(to_process_batch, self.logger)\n\n self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event)\n\n if log_event is None:\n self.logger.exception('Error dispatching event: Cannot dispatch None event.')\n return\n\n try:\n self.event_dispatcher.dispatch_event(log_event)\n except Exception as e:\n self.logger.error(f'Error dispatching event: {log_event} {e}')", "def send_all(messages: List[Message], smtp_url: str) -> None:\n with smtplib.SMTP(smtp_url) as smtp:\n for message in messages:\n smtp.send_message(message.as_mime())", "def lambda_handler(event, context):\n print('Received request')\n item = None\n\n mysql_host = '54.212.197.235'\n mysql_username = 'rts'\n mysql_password = 'SamWangRamsay520-S'\n mysql_dbname = 'rts_kinesis'\n mysql_tablename = 'benchmark_kinesis'\n\n print('Start connection')\n conn = mysql.connector.connect(host=mysql_host,\n user=mysql_username,\n passwd=mysql_password,\n db=mysql_dbname )\n print('End connection')\n '''Write the message to the mysql database'''\n cur = conn.cursor()\n\n #dynamo_db = boto3.resource('dynamodb')\n #table = dynamo_db.Table('benchmark_kinesis')\n _mysql_buffer = [] #ad-hoc message buffering for mysql, equivalent to dynamodb batch-write behavior\n _mysql_buffer_limit = 25\n records = [record for record in event['Records']]\n new_records = deaggregate_records(records)\n #decoded_record_data = [record['kinesis']['data'] for record in new_records]\n #deserialized_data = [decoded_record for decoded_record in records]\n #for data in decoded_record_data:\n for record in new_records:\n\t#d_record = \"%.15g\" % record['kinesis']['partitionKey']\n\t#con_time = \"%.15g\" % time.time()\n\tcreation_time = Decimal(record['kinesis']['partitionKey'])\n\tconsumer_time = Decimal(time.time())\n\tvalue = record['kinesis']['data']\n\t#cur.execute('INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)', (creation_time, consumer_time, value))\n sql = 'INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)'\n _mysql_buffer.append((creation_time, consumer_time, value))\n if len(_mysql_buffer) > _mysql_buffer_limit:\n cur.executemany(sql, _mysql_buffer)\n _mysql_buffer = []\n\t# Add a processed time so we have a rough idea how far behind we are\n #item['processed'] = datetime.datetime.utcnow().isoformat()\n\n conn.commit()\n conn.close()\n cur.close()\n # Print the last item to make it easy to see how we're doing\n #print(json.dumps(item))\n print('Number of records: {}'.format(str(len(new_records))))", "def send_messages(self, partition, *msg):\n if self.async:\n for m in msg:\n self.queue.put((partition, create_message(m)))\n resp = []\n else:\n messages = [create_message(m) for m in msg]\n req = ProduceRequest(self.topic, partition, messages)\n try:\n resp = self.client.send_produce_request([req], acks=self.req_acks,\n timeout=self.ack_timeout)\n except Exception as e:\n log.exception(\"Unable to send messages\")\n raise e\n return resp", "def _http_post(\n self, batched_event_list, validation_hit=False, postpone=False, date=None\n ):\n self._check_date_not_in_future(date)\n status_code = None # Default set to know if batch loop does not work and to bound status_code\n\n # set domain\n domain = self._base_domain\n if validation_hit is True:\n domain = self._validation_domain\n logger.info(f\"Sending POST to: {domain}\")\n\n # loop through events in batches of 25\n batch_number = 1\n for batch in batched_event_list:\n url = f\"{domain}?measurement_id={self.measurement_id}&api_secret={self.api_secret}\"\n request = {\"client_id\": self.client_id, \"events\": batch}\n self._add_user_props_to_hit(request)\n\n # make adjustments for postponed hit\n request[\"events\"] = (\n {\"name\": batch[\"name\"], \"params\": batch[\"params\"]}\n if (postpone)\n else batch\n )\n\n if date is not None:\n logger.info(f\"Setting event timestamp to: {date}\")\n assert (\n postpone is False\n ), \"Cannot send postponed historical hit, ensure postpone=False\"\n\n ts = self._datetime_to_timestamp(date)\n ts_micro = self._get_timestamp(ts)\n request[\"timestamp_micros\"] = int(ts_micro)\n logger.info(f\"Timestamp of request is: {request['timestamp_micros']}\")\n\n if postpone:\n # add timestamp to hit\n request[\"timestamp_micros\"] = batch[\"_timestamp_micros\"]\n\n req = urllib.request.Request(url)\n req.add_header(\"Content-Type\", \"application/json; charset=utf-8\")\n jsondata = json.dumps(request)\n json_data_as_bytes = jsondata.encode(\"utf-8\") # needs to be bytes\n req.add_header(\"Content-Length\", len(json_data_as_bytes))\n result = urllib.request.urlopen(req, json_data_as_bytes)\n\n status_code = result.status\n logger.info(f\"Batch Number: {batch_number}\")\n logger.info(f\"Status code: {status_code}\")\n batch_number += 1\n\n return status_code", "def _send_batch_predict_multi_request(\n input_data: [],\n data_type: str,\n sc: SeldonClient,\n retries: int,\n batch_id: str,\n payload_type: str,\n) -> [str]:\n\n indexes = [x[0] for x in input_data]\n\n seldon_puid = input_data[0][1]\n instance_ids = [f\"{seldon_puid}-item-{n}\" for n, _ in enumerate(input_data)]\n loaded_data = [json.loads(data[2]) for data in input_data]\n\n predict_kwargs = {}\n tags = {\n \"batch_id\": batch_id,\n }\n predict_kwargs[\"meta\"] = tags\n predict_kwargs[\"headers\"] = {SELDON_PUID_HEADER: seldon_puid}\n\n try:\n # Process raw input format\n if data_type == \"raw\":\n raw_data, payload_type, raw_input_tags = _extract_raw_data_multi_request(\n loaded_data, predict_kwargs[\"meta\"]\n )\n predict_kwargs[\"raw_data\"] = raw_data\n else:\n # Initialise concatenated array for data\n arrays = [np.array(arr) for arr in loaded_data]\n for arr in arrays:\n if arr.shape[0] != 1:\n raise ValueError(\n \"When using mini-batching each row should contain single instance.\"\n )\n concat = np.concatenate(arrays)\n predict_kwargs[\"data\"] = concat\n logger.debug(f\"calling sc.predict with {predict_kwargs}\")\n except Exception as e:\n error_resp = {\n \"status\": {\"info\": \"FAILURE\", \"reason\": str(e), \"status\": 1},\n \"meta\": tags,\n }\n logger.error(f\"Exception: {e}\")\n str_output = json.dumps(error_resp)\n return [str_output]\n\n try:\n for i in range(retries):\n try:\n seldon_payload = sc.predict(**predict_kwargs)\n assert seldon_payload.success\n response = seldon_payload.response\n break\n except (requests.exceptions.RequestException, AssertionError) as e:\n logger.error(\n f\"Exception: {e}, retries {i+1} / {retries} for batch_id(s)={indexes}\"\n )\n if i == (retries - 1):\n raise\n\n except Exception as e:\n output = []\n for batch_index, batch_instance_id in zip(indexes, instance_ids):\n error_resp = {\n \"status\": {\"info\": \"FAILURE\", \"reason\": str(e), \"status\": 1},\n \"meta\": dict(\n batch_index=batch_index, batch_instance_id=batch_instance_id, **tags\n ),\n }\n logger.error(f\"Exception: {e}\")\n output.append(json.dumps(error_resp))\n return output\n\n # Take the response create new responses for each request\n responses = []\n\n # If tensor then prepare the ndarray\n if payload_type == \"tensor\":\n tensor = np.array(response[\"data\"][\"tensor\"][\"values\"])\n shape = response[\"data\"][\"tensor\"][\"shape\"]\n tensor_ndarray = tensor.reshape(shape)\n\n for i in range(len(input_data)):\n try:\n new_response = copy.deepcopy(response)\n if data_type == \"raw\":\n # This is for tags from model to take priority (match BATCH_SIZE: 1 behaviour)\n new_response[\"meta\"][\"tags\"] = {\n **raw_input_tags[i],\n **new_response[\"meta\"][\"tags\"],\n }\n if payload_type == \"ndarray\":\n # Format new responses for each original prediction request\n new_response[\"data\"][\"ndarray\"] = [response[\"data\"][\"ndarray\"][i]]\n new_response[\"meta\"][\"tags\"][\"batch_index\"] = indexes[i]\n new_response[\"meta\"][\"tags\"][\"batch_instance_id\"] = instance_ids[i]\n\n responses.append(json.dumps(new_response))\n elif payload_type == \"tensor\":\n # Format new responses for each original prediction request\n new_response[\"data\"][\"tensor\"][\"shape\"][0] = 1\n new_response[\"data\"][\"tensor\"][\"values\"] = np.ndarray.tolist(\n tensor_ndarray[i]\n )\n new_response[\"meta\"][\"tags\"][\"batch_index\"] = indexes[i]\n new_response[\"meta\"][\"tags\"][\"batch_instance_id\"] = instance_ids[i]\n responses.append(json.dumps(new_response))\n else:\n raise RuntimeError(\n \"Only `ndarray` and `tensor` input are currently supported for batch size greater than 1.\"\n )\n except Exception as e:\n error_resp = {\n \"status\": {\"info\": \"FAILURE\", \"reason\": str(e), \"status\": 1},\n \"meta\": tags,\n }\n logger.error(\"Exception: %s\" % e)\n responses.append(json.dumps(error_resp))\n\n return responses", "def retrieve(self) -> Iterator[SQSMessage]:\n while True:\n try:\n sqs = SQSClientFactory(boto3).from_env()\n\n res = sqs.receive_message(\n QueueUrl=self.queue_url,\n WaitTimeSeconds=3,\n MaxNumberOfMessages=10,\n )\n\n messages = res.get(\"Messages\", [])\n if not messages:\n LOGGER.info(\"queue was empty\")\n\n s3_events = [SQSMessage(msg) for msg in messages]\n for sqs_message in s3_events:\n yield sqs_message\n\n sqs.delete_message(\n QueueUrl=self.queue_url,\n ReceiptHandle=sqs_message.receipt_handle,\n )\n\n except Exception as e:\n LOGGER.error(traceback.format_exc())\n time.sleep(2)", "def put_record(self, tag, json_str):\n a = 0\n while a < 2000:\n if a % 100 == 0 and a != 0:\n logger.info(\"A batch of 100 simple json records have been sent\")\n self.firehose_client.put_record(DeliveryStreamName=self.get_stream_name(tag),\n Record={\n 'Data': json_str\n }\n )\n a = a + 1\n logger.info(\"Records were placed successfully!!\")", "def _batch_insert(bq_client, table, rows):\n total_rows = len(rows)\n inserted_rows = 0\n batch = 1\n logger.info(\"Inserting %d rows into table %s\", total_rows,\n table.full_table_id)\n while inserted_rows < total_rows:\n start = (batch - 1) * MAX_BQ_INSERT_SIZE\n end = batch * MAX_BQ_INSERT_SIZE\n batch_rows = rows[start:end]\n inserted_rows += len(batch_rows)\n errors = _insert_rows(bq_client, table, batch_rows)\n if errors:\n print_bq_insert_errors(batch_rows, errors)\n logger.error(\n \"The program has been terminated due to BigQuery insertion \"\n \"errors.\")\n exit(1)\n else:\n logger.info(\"Batch %d: inserted rows %d to %d\", batch, start + 1,\n min(end, len(rows)))\n batch += 1\n logger.info(\"All rows inserted.\")", "def _chunk_send(self, metrics):\n messages = self._create_messages(metrics)\n request = self._create_request(messages)\n packet = self._create_packet(request)\n\n response = None\n\n for host_addr in self.zabbix_uri:\n logger.debug('Sending data to %s', host_addr)\n\n # create socket object\n connection_ = socket.socket()\n if self.socket_wrapper:\n connection = self.socket_wrapper(connection_)\n else:\n connection = connection_\n\n connection.settimeout(self.timeout)\n\n try:\n # server and port must be tuple\n connection.connect(host_addr)\n connection.sendall(packet)\n except socket.timeout:\n logger.error('Sending failed: Connection to %s timed out after %d seconds', host_addr, self.timeout)\n connection.close()\n continue\n except socket.error as err:\n # In case of error we should close connection, otherwise\n # we will close it after data will be received.\n logger.warning('Sending failed: %s', getattr(err, 'msg', str(err)))\n connection.close()\n continue\n\n try:\n response = self._get_response(connection)\n\n logger.debug('%s response: %s', host_addr, response)\n except socket.error as err:\n logger.error('Sending failed: %s', getattr(err, 'msg', str(err)))\n raise socket.error(response)\n\n break\n\n if response is None:\n logger.error('Sending failed: no servers available')\n raise socket.error()\n\n if response and (\"response\" not in response or response.get('response') != 'success'):\n logger.debug('Response error: %s}', response)\n raise socket.error(response)\n\n return response", "def send_mass_messages(self, recipient_list, sender, message=\"\", subject=\"\"):\n try:\n for s in recipient_list:\n self.send_message(to=s, sender=sender, message=message, subject=subject)\n except TypeError:\n return -1\n return 1", "def update_batch(self, *args, **kwargs):\n pass", "def _batch(self, batch_request_entries):\n necessary_keys = [\"id\", \"version\", \"method\", \"params\"]\n\n results = []\n\n for (idx, request) in enumerate(batch_request_entries):\n error = None\n result = None\n\n # assert presence of important details\n for necessary_key in necessary_keys:\n if not necessary_key in request.keys():\n raise FakeBitcoinProxyException(\"Missing necessary key {} for _batch request number {}\".format(necessary_key, idx))\n\n if isinstance(request[\"params\"], list):\n method = getattr(self, request[\"method\"])\n result = method(*request[\"params\"])\n else:\n # matches error message received through python-bitcoinrpc\n error = {\"message\": \"Params must be an array\", \"code\": -32600}\n\n results.append({\n \"error\": error,\n \"id\": request[\"id\"],\n \"result\": result,\n })\n\n return results", "def _batch_request(self, jobs):\n return generate_batch_request(jobs, self._batch_request_size)", "def enqueue(self, content_object, start, end=None, batch_content_object=None,\n extra_params=None, send_expired=True):\n enqueued = []\n for ab_test in ABTest.objects.filter(stream=self):\n if not ab_test.is_enabled:\n continue\n message = ab_test.random_message()\n send_time = message.send_time(start, end)\n if send_time:\n if send_time <= datetime.datetime.now():\n if send_expired:\n message.send(content_object,\n blacklisted_emails=message.blacklisted_emails(),\n extra_params=extra_params)\n else:\n if batch_content_object:\n enqueued.append(Queue.objects.create(message=message,\n content_object=content_object, send_time=send_time,\n batch_content_object=batch_content_object))\n else:\n enqueued.append(Queue.objects.create(message=message,\n content_object=content_object, send_time=send_time))\n return enqueued", "def _send(self, msg, adb_info):\n packed = msg.pack()\n _LOGGER.debug(\"bulk_write(%d): %r\", len(packed), packed)\n self._transport.bulk_write(packed, adb_info.transport_timeout_s)\n\n if msg.data:\n _LOGGER.debug(\"bulk_write(%d): %r\", len(msg.data), msg.data)\n self._transport.bulk_write(msg.data, adb_info.transport_timeout_s)", "def send_bulk_transactional_sms(self, phone_numbers, message):\n if not settings.CAN_SEND_SMS: # So that we do not send SMS while development\n return\n if not phone_numbers:\n logger.warning('No phone number received for meaasge: {0}'.format(message))\n raise MissingPhoneNumberException('No phone number received to send the SMS to')\n request_data = {\n 'From': self.exo_phone,\n \"To[]\": phone_numbers,\n 'Body': message\n }\n logger.info('Sending SMS to {0}. SMS content {1}'.format(phone_numbers, message))\n sms_response = requests.post(self.EXOTEL_SMS_URL.format(self.sid, self.token), data=request_data).json()\n logger.info(sms_response)\n for res in sms_response:\n if res.get('RestException'):\n logger.warn('SMS sending failed. Rsponse from exotel - {0}'.format(sms_response))\n elif res.get('SMSMessage') and res['SMSMessage']['Status'] \\\n not in self.EXOTEL_SUCCESS_STATUS_LIST:\n raise MessageSendingFailed('The service provider failed to send the SMS')", "def update_all_queues(batchserver_name):\n server,created = getBatchServer(batchserver_name)\n if server.queues_lastupdate and (datetime.datetime.now()-server.queues_lastupdate).total_seconds()<GlobalConfiguration.objects.get(pk=1).max_lastupdate:\n logging.debug(\"Queue info is new enough for server: %s\" % batchserver_name)\n return\n\n conn = pbs.pbs_connect(batchserver_name.encode('iso-8859-1', 'replace'))\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n statqueues = pbs.pbs_statque(conn, \"\" , [], \"\")\n pbs.pbs_disconnect(conn)\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n \n for sq in statqueues:\n queue,created = getQueue(sq.name, server)\n attr_dict = dict([ (x.name,x.value) for x in sq.attribs])\n update_one_queue_from_pbs_data(queue, attr_dict)\n queue.save()\n server.queues_lastupdate = datetime.datetime.now()\n server.save()", "def bulk_update(self, request):\n serializer = MasterySerializer(\n data=request.data,\n many=True,\n )\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def _send_multiple(self, what, values, address):\n\n print('_send_multiple: please override me.')", "def bulk_index(data):\n\n def bulk_api_string(item):\n return f\"{{\\\"index\\\":{{}}\\n{json.dumps(item)}\"\n\n body = '\\n'.join([bulk_api_string(item) for item in data]) + '\\n'\n\n return make_request(\n requests.post,\n url=f\"{connection.hostname}:{connection.port}/{connection.index}/_bulk\",\n headers={'Content-Type': 'application/json'},\n auth=auth,\n data=body\n )", "def sqs_messages(queue: str) -> Generator[Dict[str, Any], None, None]:\n\n while True:\n response = get_client(\"sqs\").receive_message(QueueUrl=queue)\n if \"Messages\" not in response:\n break\n msg = json.loads(response[\"Messages\"][0][\"Body\"])\n records = json.loads(msg[\"Message\"])\n retd = {}\n retd[\"key\"] = records[\"Records\"][0][\"s3\"][\"object\"][\"key\"]\n retd[\"bucket\"] = records[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n retd[\"ReceiptHandle\"] = response[\"Messages\"][0][\"ReceiptHandle\"]\n yield retd", "def _send_messages(number_range, partition=0, topic=topic, producer=kafka_producer, request=request):\n messages_and_futures = [] # [(message, produce_future),]\n for i in number_range:\n # request.node.name provides the test name (including parametrized values)\n encoded_msg = '{}-{}-{}'.format(i, request.node.name, uuid.uuid4()).encode('utf-8')\n future = kafka_producer.send(topic, value=encoded_msg, partition=partition)\n messages_and_futures.append((encoded_msg, future))\n kafka_producer.flush()\n for (msg, f) in messages_and_futures:\n assert f.succeeded()\n return [msg for (msg, f) in messages_and_futures]", "def batch_submit_messages(self, nmessages, submission_timespan,\n alwaysdrop_rate) -> None:\n # pylint: disable=too-many-locals\n assert self.rcomm\n assert self.chan\n assert alwaysdrop_rate <= 1\n assert submission_timespan >= 0\n\n _LOGGER.info(\"SUBMIT Beginning to submit %d messages\", nmessages)\n\n\n def delayed_send(msgtype, msgbody):\n def really_send():\n self.invoker.invoke(self.chan.send_message, msgtype, msgbody)\n delay = self.rand.random() * submission_timespan # seconds\n _TRACE(\"SCHEDULING_SEND delay: %f\", delay)\n self.scheduler.schedule(delay, really_send)\n\n # Send messages at random times\n for _ in range(nmessages):\n id_ = self.nextid.next()\n alwaysdrop = self.rand.random() < alwaysdrop_rate\n mrec = new_message_record(id_, alwaysdrop)\n self.msgmap.set(id_, mrec)\n if alwaysdrop:\n self.droppedmsgs.appendleft(mrec)\n delayed_send(mrec.msgtype, mrec.msgbody)\n\n def receive_all():\n # Pick up all messages received so far and verify them...\n def process():\n for rmsg in _utils.getsome(self.chan.poll_received_message):\n self.process_received_message(rmsg)\n self.invoker.invoke(process)\n\n # Poll for received messages at random times\n recv_attempts = max(nmessages // 100, 10)\n max_receivedelay = submission_timespan + self.transport.get_maxdelay()\n for i in range(recv_attempts):\n delay = self.rand.random() * submission_timespan\n # Special case of i == 0 - we schedule our final receive attempt to\n # be AFTER the last packet is actually sent by the transport (after\n # a potential delay)\n extra_delay = 1 # TODO: need to compute this more carefully\n if i == 0:\n delay = max_receivedelay + extra_delay\n self.scheduler.schedule(delay, receive_all)", "def queue_enqueue(queue, targets):\n\n enqueued = []\n for target in map(lambda x: x.strip(), targets):\n if target != '':\n enqueued.append({'target': target, 'queue_id': queue.id})\n if enqueued:\n db.session.execute(Target.__table__.insert().values(enqueued))\n db.session.commit()", "def toQueue(data):\n\n for host in settings.OTHER_HOSTS:\n settings.SENDER[host['id']].queue.put(dict(**data))", "def _send_multiple(self, what, values, address, **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag_multiple(what, values)\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n '--address ' + address +\n ' ' + tag_string\n )\n\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR enip _send multiple: '), error", "def upload_bulk_sms_file(batch_id, file_path):\n batch = Batch.objects.get(id=batch_id)\n batch.add_messages(read_messages_from_file(file_path))\n batch.status = Batch.PENDING\n batch.save()", "def send_messages(self, bot, update, messages):\n\n for msg in messages:\n self.send_message(bot, update, msg)" ]
[ "0.6739715", "0.66852796", "0.6401511", "0.63491935", "0.6338009", "0.63261616", "0.63134557", "0.63052964", "0.6195632", "0.6183819", "0.6173889", "0.61398053", "0.6103908", "0.6066584", "0.6057908", "0.60467565", "0.6021644", "0.6018628", "0.5985069", "0.5927403", "0.5885165", "0.5869879", "0.5855847", "0.5767411", "0.5758047", "0.57554823", "0.57550955", "0.57363623", "0.5708484", "0.5701976", "0.57012355", "0.5684697", "0.565922", "0.5657735", "0.56512046", "0.5645165", "0.5641704", "0.5632202", "0.56140053", "0.5613709", "0.5607816", "0.5598084", "0.557629", "0.55746514", "0.5565334", "0.5563441", "0.5559816", "0.5547086", "0.5546746", "0.5543068", "0.55396235", "0.553209", "0.5529634", "0.55139416", "0.55101085", "0.55065876", "0.54924697", "0.549165", "0.5490277", "0.54796547", "0.5476211", "0.5476134", "0.54742336", "0.5473384", "0.5471772", "0.5470549", "0.5470405", "0.54673", "0.5465479", "0.5458125", "0.5431261", "0.54302937", "0.5423882", "0.5401668", "0.5392642", "0.5391397", "0.53897715", "0.5362518", "0.53449893", "0.5343151", "0.5324251", "0.5317391", "0.53140104", "0.5300177", "0.52954084", "0.5290528", "0.52901185", "0.52874804", "0.5285043", "0.5281696", "0.52802306", "0.5267148", "0.5264967", "0.5264702", "0.52582806", "0.5257433", "0.52489144", "0.52485824", "0.5247174", "0.5241101" ]
0.7458315
0
Method to follow another user that is, to create a unidirectional link from one user to the other.
def follow(self, user_index, following_index): if user_index >= self.num_users or following_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user_index} and {following_index} were requested." ) if self.users_hat[following_index, user_index] == 0: self.users_hat[following_index, user_index] = 1 elif self.is_verbose(): self.log(f"User {following_index} was already following user {user_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def follow_user(cls, user, following):\r\n pass", "def follow_user(cls, user, following):\n pass", "def follow(self, follower, followee):\n pass", "def follow(self, other):\n\t\tif not self.follows(other):\n\t\t\tself.followed.append(other)", "def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, followerId, followeeId):\n\n # 把 followeeId append到他的 follow 属性中\n if followerId == followeeId: # 不能自己关注自己\n return\n # 实例化一个user(followerID)\n follower = UserInfo()\n follower.user_id = followerId \n follower.follows.append(followeeId) \n self.user_pool[followerId] = follower", "def follow(source_id, destination_id):\n if source_id == destination_id:\n return \"You can't follow yourself!\"\n\n Forward.objects.get_or_create(source_id=source_id,\n destination_id=destination_id)\n Backward.objects.get_or_create(destination_id=destination_id,\n source_id=source_id)", "def follow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('insert into follower (who_id, whom_id) values (?, ?)',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are now following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))", "def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId:\n return\n if followerId not in self.users.keys():\n self.users[followerId] = user()\n if followeeId not in self.users.keys():\n self.users[followeeId] = user()\n self.users[followerId].followees[followeeId] = self.users[followeeId]", "def follow(self, user):\n if not self.is_following(user):\n f = Follow(follower=self, followed=user)\n db.session.add(f)", "def follow_user(self, user):\n self.nav_user(user)\n follow_button = self.driver.find_element_by_xpath(\n \"//button[contains(text(), 'Follow')]\")\n follow_button.click()\n time.sleep(1)\n self.driver.get(self.base_url)", "def follow(self, followerId, followeeId):\r\n if followerId != followeeId:\r\n self.follows[followerId].add(followeeId)", "def author_following(self):\n\t\tpass", "def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId: return\n self.users[followerId].add(followeeId)", "def followUser(following):\n\n cur, user_id, con = initialise(3, True)\n cur.execute(\"INSERT INTO followers (user, following) VALUES ((SELECT username FROM users WHERE id = ?), ?)\", (user_id, following))\n finish(con)", "async def follow(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/follow', method='post',\n data=data)\n return e", "def follow(self, followerId: int, followeeId: int) -> None:\n self.user_followed[followerId].append(followeeId)", "def remote_follow(request):\n remote_user = request.POST.get(\"remote_user\")\n try:\n if remote_user[0] == \"@\":\n remote_user = remote_user[1:]\n remote_domain = remote_user.split(\"@\")[1]\n except (TypeError, IndexError):\n remote_domain = None\n\n wf_response = subscribe_remote_webfinger(remote_user)\n user = get_object_or_404(models.User, id=request.POST.get(\"user\"))\n\n if wf_response is None:\n data = {\n \"account\": remote_user,\n \"user\": user,\n \"error\": \"not_supported\",\n \"remote_domain\": remote_domain,\n }\n return TemplateResponse(request, \"ostatus/subscribe.html\", data)\n\n if isinstance(wf_response, WebFingerError):\n data = {\n \"account\": remote_user,\n \"user\": user,\n \"error\": str(wf_response),\n \"remote_domain\": remote_domain,\n }\n return TemplateResponse(request, \"ostatus/subscribe.html\", data)\n\n url = wf_response.replace(\"{uri}\", urllib.parse.quote(user.remote_id))\n return redirect(url)", "def add_follow(follow_id):\n\n want_to_follow_user = User.query.get_or_404(follow_id)\n if want_to_follow_user.private:\n # =========== NEED TO IMPLEMENT ====================\n # send them a request to follow\n want_to_follow_user.from_users.append(g.user) \n db.session.commit()\n flash(\"Your request has been sent\", \"success\")\n return redirect(f\"/users/{g.user.id}/following\")\n\n g.user.following.append(want_to_follow_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def follow_user(username):\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n mongo.db.users.update({'_id': g.user['_id']}, {\n '$push': {'follows': whom_id}})\n flash('You are now following \"%s\"' % username)\n if redis_obj.get(user_ID):\n return redirect(url_for('user_timeline', username=username, userId=pickle.loads(r.get(user_ID))))\n else:\n redis_obj.delete(str(g.user['_id']))\n print \"Invalidating cache after Follow\"\n return redirect(url_for('user_timeline', username=username))", "def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.users:\n self._create_user(followeeId)\n if followerId not in self.users:\n self._create_user(followerId)\n self.users[followerId].add(followeeId)", "def follow(self, followerId, followeeId):\n if followerId in self.follows:\n self.follows[followerId].add(followeeId)\n else:\n self.follows[followerId] = set([followeeId])", "def follow_by_id(self, uid: int) -> None:\n self.api.follow(uid)", "def post(self, request, username):\n\n # Retrieve the user from the user table if the user exists\n try:\n user_details = User.objects.get(username=username)\n current_user = request.user\n # If a user is trying to follow themselves then stop the request\n if user_details.profile.id == current_user.profile.id:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['CANNOT_FOLLOW_SELF']},\n status=status.HTTP_400_BAD_REQUEST\n )\n # Otherwise follow the author the current user has indicated\n current_user.profile.follows.add(user_details.profile)\n\n # notify user of new follower\n send_notifications(request,\n notification_type=\"user_followed\",\n instance=current_user,\n recipients=[user_details])\n\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['USER_FOLLOW_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_201_CREATED\n )\n except User.DoesNotExist:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['USER_NOT_FOUND']},\n status=status.HTTP_404_NOT_FOUND\n )", "async def follow(follow):\n await follow.edit(\n f\"`FOLLOW {DEFAULTUSER} ON` \\n\\n\"\n f\"[InstaGram](https://www.instagram.com/mayur_karaniya) \\n\\n\"\n f\"[FaceBook](https://www.facebook.com/mkaraniya) \\n\\n\"\n f\"[YouTube](https://www.youtube.com/channel/UCeKQxQK7XZ3jGi3541uWATg?sub_confirmation=1) \"\n )", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId not in self.users.keys() or followeeId not in self.users.keys():\n return\n if followeeId not in self.users[followerId].followees.keys():\n return\n self.users[followerId].followees.pop(followeeId)\n\n\n\n # Your Twitter object will be instantiated and called as such:\n # obj = Twitter()\n # obj.postTweet(userId,tweetId)\n # param_2 = obj.getNewsFeed(userId)\n # obj.follow(followerId,followeeId)\n # obj.unfollow(followerId,followeeId)", "def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}", "def follow_someone(screen_name):\n twitter.create_friendship(screen_name=screen_name)", "def follow(whomUserName,whoUserName):\n\n whomuser = query_db('select * from user where username = ?',\n [whomUserName], one=True)\n whouser = query_db('select * from user where username = ?',\n [whoUserName], one=True)\n\n\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [whouser['user_id'], whomuser['user_id']],one=True) is not None\n\n if whouser is None:\n return jsonify({'message':'User trying to follow another user which does not exist'}),404\n\n if whomuser is None:\n return jsonify({'message':'User getting followed does not exist yet'}),404\n\n if not followed:\n db = get_db()\n\n db.execute('''insert into follower (\n who_id, whom_id) values (?, ?)''',\n [whouser['user_id'], whomuser['user_id']])\n db.commit()\n flash('Operation successful')\n return jsonify({'message': 'Successfully following'}), 201\n else:\n return jsonify({'message':'Specified user is already following another user'}),403", "def set_follow(self, follow):\n self.follow = follow", "def follow_user(self, target):\n try:\n if self.api.me().friends_count > 1990:\n return\n except Exception, e:\n print e\n\n \"Rate limit exceeded. Clients may not make more than 350 requests per hour.\"\n if \"Clients\" in str(e):\n continue\n # import pdb; pdb.set_trace()\n return\n\n try:\n self.api.create_friendship(target.hunted.screen_name)\n self.log.debug(\"Followed: %s\" % target.hunted.screen_name)\n except Exception, e:\n self.log.exception(\"Could not follow %s\" %\n target.hunted.screen_name)\n else:\n # Write record of new follow to db\n target.status = Target.PURGATORY\n target.save()", "def test_following_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def follow(self, followerId: int, followeeId: int) -> None:\n self.followees[followerId].add(followeeId)", "def follow(request):\n username = request.POST[\"user\"]\n to_follow = get_user_from_username(request.user, username)\n clear_cache(request.user, to_follow)\n\n follow_request, created = models.UserFollowRequest.objects.get_or_create(\n user_subject=request.user,\n user_object=to_follow,\n )\n\n if not created:\n # this request probably failed to connect with the remote\n # that means we should save to trigger a re-broadcast\n follow_request.save()\n\n if is_api_request(request):\n return HttpResponse()\n return redirect(to_follow.local_path)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.users:\n self._create_user(followeeId)\n if followerId not in self.users:\n self._create_user(followerId)\n if followerId != followeeId and followeeId in self.users[followerId]:\n self.users[followerId].remove(followeeId)", "def follow(self, followerId: 'int', followeeId: 'int') -> 'None':\n self.followees[followerId].add(followeeId)", "def follow_reciprocated(self, target):\n if random.randint(1, 1000) == 1: # 1 in 20 are public @replies\n self.tweet_user(target)\n else:\n try:\n self.dm_user(target)\n except:\n pass", "def follow_user(self:'InstaClient', user:str, nav_to_user:bool=True):\n # Check User Vadility\n profile = self.get_profile(user)\n if not profile:\n raise InvalidUserError(user)\n\n # Navigate to User Page\n self._nav_user(user, check_user=False)\n \n if self._check_existence(EC.presence_of_element_located((By.XPATH, Paths.MESSAGE_USER_BTN))):\n # User already followed\n pass\n else:\n follow_button = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.FOLLOW_BTN)), url=ClientUrls.NAV_USER.format(user))\n self._press_button(follow_button)\n profile.requested_by_viewer = True\n return profile", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.followList.get(followerId, [followerId]):\n self.followList[followerId] = self.followList.get(followerId, [followerId]) + [followeeId]\n # print(self.followList)", "def unfollow(request, usertostopfollow):\n stop_follow = Member.objects.get(user__username=usertostopfollow)\n user = Member.objects.get(user=request.user)\n user.following.remove(stop_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def user_follow_users(self, ids=None, **kwargs):\n return self._put(\n API.MY_FOLLOWING.value, type=\"user\", ids=\",\".join(ids or []), **kwargs\n )", "def follow(self, followerId: int, followeeId: int) -> None:\n self.follow_map[followerId].add(followeeId)", "def unsafe_follow_by_username(self, username: str) -> None:\n uid = self.username_to_id(username)\n self.api.follow(uid)", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def unfollow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('delete from follower where who_id=? and whom_id=?',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are no longer following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))", "def new_unfollow(self, user_id, user_name):\n url_unfollow = self.url_unfollow % (user_id)\n try:\n unfollow = self._send_post_request(url_unfollow)\n if unfollow.status_code == 200:\n self.unfollow_counter += 1\n log_string = \"Unfollow: %s #%i.\" % (user_name,\n self.unfollow_counter)\n self.log.debug(log_string)\n return unfollow\n except:\n self.log.debug(\"Exept on unfollow!\")\n return False", "def add_following(self, user_id):\n sleep(360) # too much follows => function ban\n self.following.append(user_id)\n return perform_with_ran_delay(self.instagram.follow, user_id)", "def add_to_following(sender, instance, created, **kwargs):\r\n sender_= instance.sender\r\n receiver_ = instance.receiver\r\n if instance.status == 'accepted':\r\n sender_.following.add(receiver_.user)", "def follow_project(cls, user, project):\r\n pass", "def unfollow(self, user):\n if self.is_following(user):\n self.followed.remove(user)\n return self", "def follow(self, followerId, followeeId):\n if followerId not in self.follow_map:\n self.follow_map[followerId] = set()\n \n self.follow_map[followerId].add(followeeId)", "def follow(current_user,user_id):\n if request.method == \"POST\":\n #follee = request.get_json('user_id')\n if User.query.filter_by(userid= user_id):\n follow = Follows(userid =user_id, follower_id =current_user.userid)\n db.session.add(follow)\n db.session.commit()\n return jsonify({'message' :'You are now following'})\n return jsonify({'message' :'User doesnt exist..Try again'})\n return jsonify({'errors' : 'Method Invalid'})", "def unfollow(self, followerId: int, followeeId: int) -> None:\n following = self.user_followed[followerId]\n if followeeId in following:\n following.remove(followeeId)\n self.user_followed[followerId] = following", "def follow_project(cls, user, project):\n pass", "def follow(request):\n user_id = request.POST.get('user_id')\n action = request.POST.get('action')\n if user_id and action:\n try:\n user = User.objects.get(id=user_id)\n if request.user == user:\n return JsonResponse({'status':0,'msg':'You can\\'t follow yourself'})\n if action == 'follow':\n if request.user.is_following(user):\n return JsonResponse({'status':0,'msg':'already following'})\n else:\n request.user.follow(user)\n return JsonResponse({'status':1,'msg':'followed'})\n else:\n if request.user.is_following(user):\n request.user.unfollow(user)\n return JsonResponse({'status':1,'msg':'un-followed'})\n else:\n return JsonResponse({'status':0,'msg':'not following'})\n except User.DoesNotExist:\n return JsonResponse({'status':0,'msg':'user not found'})\n return JsonResponse({'status':0})", "def add_follow(follow_id):\n followed_user = User.query.get_or_404(follow_id)\n if not g.user or g.user.id == follow_id or followed_user.is_blocking(g.user):\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n g.user.following.append(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def follow(user, people):\n api = get_api(user)\n current_screen_name = api.VerifyCredentials().GetScreenName()\n\n # don't let a user follow themselves\n screen_names = [person.twitter_screen_name for person in people]\n if current_screen_name in screen_names: screen_names.remove(current_screen_name)\n\n followed = []\n not_followed = []\n\n for screen_name in screen_names:\n try:\n api.CreateFriendship(screen_name=screen_name)\n followed.append(screen_name)\n except twitter.TwitterError:\n not_followed.append(screen_name)\n\n return 'followed %s people' % len(followed)", "async def twitter_follow(self, ctx, handle):\n discord_channel = ctx.message.channel\n\n # Check for required permissions\n if not discord_channel.permissions_for(discord_channel.server.me).embed_links:\n raise TwitterError('\\N{WARNING SIGN} The `Embed Links` permission in this channel is required to display tweets properly. \\N{WARNING SIGN}')\n\n sane_handle = handle.lower().lstrip('@')\n conf = dutils.get(self.conf.follows, screen_name=sane_handle)\n if conf is None:\n # New Twitter channel, retrieve the user info\n partial = functools.partial(self.api.get_user, screen_name=sane_handle)\n try:\n user = await self.bot.loop.run_in_executor(None, partial)\n except tweepy.TweepError as e:\n if e.api_code == 50:\n raise TwitterError('User \"{}\" not found.'.format(handle)) from e\n else:\n log.error(str(e))\n raise TwitterError('Unknown error, this has been logged.') from e\n\n # The Twitter API does not support following protected users\n # https://dev.twitter.com/streaming/overview/request-parameters#follow\n if user.protected:\n raise TwitterError('This channel is protected and cannot be followed.')\n\n # Register the new channel\n conf = FollowConfig(user.id_str, user.screen_name)\n self.conf.follows.append(conf)\n\n try:\n # Restart the stream\n await self.stream.start()\n except tweepy.TweepError as e:\n self.conf.follows.remove(conf)\n log.error(str(e))\n raise TwitterError('Unknown error, this has been logged.') from e\n elif dutils.get(conf.discord_channels, id=discord_channel.id):\n raise TwitterError('Already following {} on this channel.'.format(handle))\n\n # Add new Discord channel\n conf.discord_channels.append(ChannelConfig(discord_channel.id))\n self.conf.save()\n await self.bot.say('\\N{OK HAND SIGN}')", "async def link(self, ctx):\n if not is_linked(ctx.author.id):\n token = str(uuid.uuid4())\n valid_until = int((datetime.utcnow() + timedelta(days=1)).timestamp())\n add_token(ctx.author.display_name, ctx.author.id, token, valid_until, str(ctx.author.avatar_url))\n web_base_url = get_setting('web_base_url')\n await ctx.author.send(f\"Please visit {web_base_url}/link/{token} to link your Spotify account. \"\n f\"This link will expire after 24 hours.\")\n if ctx.guild is not None:\n await ctx.message.add_reaction('📬')\n else:\n await ctx.reply(\"You have already linked a spotify account!\")", "def post(self, request, username):\n user_exists = User.objects.filter(username=username).exists()\n if not user_exists:\n return Response(\n {'error': 'user with that name was not found'},\n status.HTTP_404_NOT_FOUND)\n # we check if the user is already followed\n followed_user = User.objects.get(username=username)\n already_followed = Follow.is_user_already_followed(\n followed_user_id=followed_user.id,\n user_id=self.request.user.id\n )\n if already_followed:\n return Response({'error': 'user already followed'},\n status.HTTP_400_BAD_REQUEST)\n if followed_user.id == self.request.user.id:\n return Response({'error': \"you cannot follow yourself.\"},\n status.HTTP_400_BAD_REQUEST)\n data = {\n \"followed_user\": followed_user.id,\n \"user\": self.request.user.id}\n serializer = FollowSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response({'message': 'user followed successfully'},\n status.HTTP_201_CREATED)", "def userFollowers(nick):\n if (len(nick) != 1):\n print \"Has d'introduir només un nick\"\n return\n i.userFollow(nick[0])", "def save_model(self, request, obj, form, change):\n From = User.objects.get(id=obj.From.id)\n To = User.objects.get(id=obj.To.id)\n From.following_numIn()\n To.followed_numIn()\n obj.save()", "def user_follow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n if user_id in self._users_following.get(self.user_id, []):\n self.logger.debug(\"User %s already followed\", user_id)\n return False\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/create/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following.pop(self.user_id) # reset\n return result[\"friendship_status\"][\"following\"] is True", "def unfollow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 1:\n self.users_hat[following_index, user_index] = 0\n elif self.is_verbose():\n self.log(f\"User {following_index} was not following user {user_index}\")", "def player_simple2(user, user2):\n link = reverse('wouso.interface.profile.views.user_profile', args=(user.id,))\n\n if user == user2:\n name = _('You')\n else:\n name = unicode(user)\n\n return u'<a href=\"%s\">%s</a>' % (link, name)", "def follow(self, followerId: int, followeeId: int) -> None:\n # Time Complexity: O(1)\n if followerId != followeeId:\n if followerId not in self.followees:\n self.followees[followerId] = set()\n\n self.followees[followerId].add(followeeId)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n self.users[followerId].discard(followeeId)", "def reply_this(self, user, text):\n parent = self.get_parent()\n reply_news = News.objects.create(\n user=user, content=text, reply=True, parent=parent\n )\n notification_handler(\n user,\n parent.user,\n Notification.REPLY,\n action_object=reply_news,\n id_value=str(parent.uuid_id),\n key=\"social_update\",\n )", "def follow_route(request):\n\n db_conn = request['db_conn']\n current_user = get_current_user(request)\n if not current_user:\n return abort(401)\n follow_data = dict(**request['params'])\n follow_data['user_id'] = current_user['id']\n follow, errors = insert_follow(follow_data, db_conn)\n if errors:\n return 400, {\n 'errors': errors,\n 'ref': '4Qn9oWVWiGKvXSONQKHSy1T6'\n }\n return 200, {'follow': deliver_follow(follow, access='private')}", "def remote_follow_page(request):\n user = get_user_from_username(request.user, request.GET.get(\"user\"))\n data = {\"user\": user}\n return TemplateResponse(request, \"ostatus/remote_follow.html\", data)", "def follow(self, request, pk):\n logged_in_photographer = get_object_or_404(Photographer,\n user=request.user)\n photographer_to_follow = get_object_or_404(Photographer, id=pk)\n logged_in_photographer.follow_handshake(photographer_to_follow)\n return Response({'status': status.HTTP_200_OK})", "def test_user_already_followed(self):\n self.authorize_user(self.user)\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response2 = self.client.post(self.follow_url, format='json')\n self.assertEqual(response2.content,\n b'{\"detail\": {\"error\": \"user already followed\"}}')\n self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)", "def unfollowUser(following):\n \n cur, user_id, con = initialise(3, True)\n cur.execute(\"DELETE FROM followers WHERE user = (SELECT username FROM users WHERE id = ?) AND following = ?\", (user_id, following))\n finish(con)", "def post(self, request, *args, **kwargs):\n user_articles = []\n\n following_username = token_payload(request)\n\n followed_username = self.kwargs.get(self.look_url_kwarg)\n try:\n followed_user = User.objects.get(username=followed_username)\n\n serializer = self.serializer_class(\n data={\n \"following_username\": following_username,\n \"followed_username\": followed_username\n })\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n # bio of followed_user\n followed_user_id = followed_user.id\n followed_user_profile = Profile.objects.get(\n profile_user=followed_user_id)\n serializer_profile = self.serializer_classprofile(\n followed_user_profile)\n\n # articles of followed user\n followed_user_articles = Article.objects.filter(\n author=followed_user_id)\n\n for article in followed_user_articles:\n serializer_article = self.serializer_classarticle(\n article) # pragma: no cover\n\n return Response({\n \"following_user\": following_username,\n \"followed_user\": serializer_profile.data,\n \"followed_articles\": user_articles\n },\n status=status.HTTP_201_CREATED)\n except User.DoesNotExist:\n raise APIException({\n 'error': 'User does not exist'\n })", "def follow(user, actor, send_action=True):\n follow,created = Follow.objects.get_or_create(user=user, object_id=actor.pk,\n content_type=ContentType.objects.get_for_model(actor))\n if send_action and created:\n action.send(user, verb=_('started following'), target=actor)\n return follow", "def follow_friend():\n print \"followuser\"\n username = request.args.get('username')\n print \"JSON Data\", username\n # username= req_data[username]\n whom_id = get_user_id(username)\n print \"whom_id:\", whom_id\n if whom_id is None:\n abort(404)\n follow_query(whom_id)\n flash('You are now following \"%s\"' % username)\n name = {'name of following user': username}\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)", "def set_follower(self, follower):\n self.follower = follower", "def follow(username):\n follow_form = FollowForm()\n unfollow_form = UnfollowForm()\n\n if follow_form.validate_on_submit():\n try:\n current_user.follow(username)\n flash('Followed {}'.format(username))\n except ValueError as excep:\n flash(str(excep))\n return render_template('user_history.html',\n username=username,\n follow_form=follow_form,\n unfollow_form=unfollow_form)\n\n return render_template('user_history.html',\n username=username,\n follow_form=follow_form,\n unfollow_form=unfollow_form)", "def test_is_following(self):\n\n self.u1.following.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_following(self.u2))\n self.assertFalse(self.u2.is_following(self.u1))", "def link(ctx, note1, note2):\n directory = ctx.obj[\"config\"][\"owner\"][\"dir\"]\n\n note1, note2 = Note(directory, note1), Note(directory, note2)\n\n if note1.filename == note2.filename:\n Utils.display_error(\n \"Cannot create a link between a note and itself.\", \"yellow\")\n\n with open(note1.path, \"a\") as file:\n file.write(\"[{}]({})\\n\".format(note2.filename, note2.filename))\n\n with open(note2.path, \"a\") as file:\n file.write(\"[{}]({})\\n\".format(note1.filename, note1.filename))\n\n click.secho(\"Success! {} <-> {}\".format(note1.filename,\n note2.filename), fg=\"green\")", "def test_is_followed_by(self):\n\n self.u1.followers.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_followed_by(self.u2))\n self.assertFalse(self.u2.is_followed_by(self.u1))", "def make_link(self, first_id, second_id):\n self.task_controller.make_link(first_id, second_id)", "def checkUsers(u1, u2):\n if type(u1) == int:\n try:\n user1 = mt.User.objects.filter(Id = u1).get()\n except:\n\n return None\n else:\n user1 = u1\n try:\n user2 = mt.User.objects.filter(Id = u2).get()\n except:\n return user1\n mt.follows(\n followee= user1,\n follower= user2\n ).save()\n return user1", "def show_following(user_id):\n\n\n user = User.query.get_or_404(user_id)\n return render_template('users/following.html', user=user)", "def unfollow_user(username):\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n mongo.db.users.update({'_id': g.user['_id']}, {\n '$pull': {'follows': whom_id}})\n flash('You are no longer following \"%s\"' % username)\n if redis_obj.get(user_ID):\n return redirect(url_for('user_timeline', username=username, userId=pickle.loads(redis_obj.get(user_ID))))\n else:\n redis_obj.delete(session['user_id'])\n print \"Invalidating cache after Unfollow\"\n return redirect(url_for('user_timeline', username=username))", "def test_following_non_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "async def unfollow(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/unfollow', method='post',\n data=data)\n return e", "def unfollow(self, followerId, followeeId):\n if followerId in self.follows:\n if followeeId in self.follows[followerId]:\n self.follows[followerId].remove(followeeId)", "def unfollow_user(request, course_id, followed_user_id):\r\n user = cc.User.from_django_user(request.user)\r\n followed_user = cc.User.find(followed_user_id)\r\n user.unfollow(followed_user)\r\n return JsonResponse({})", "def test_follow_with_auth(self):\n self.authorize_user(self.user)\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n response = self.client.post(self.follow_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def users_followers(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/followers.html', user=user)", "def unfollow(self, followerId, followeeId):\r\n self.follows[followerId].discard(followeeId)", "def subscribe_user_follow(self,\n from_id: Union[str, None],\n to_id: Union[str, None],\n callback_func: Union[Callable[[UUID, dict], None], None]) -> Tuple[bool, UUID]:\n param_dict = {\"first\": 1,\n \"from_id\": from_id,\n \"to_id\": to_id}\n url = build_url(TWITCH_API_BASE_URL + \"users/follows\", param_dict, remove_none=True)\n uuid = get_uuid()\n return self._generic_subscribe('/users/follows', url, uuid, callback_func), uuid", "def share_link(cls, user, link):", "def share_link(cls, user, link):", "def make_link(first, second):\n manager = Actions()\n manager.make_link(first, second)" ]
[ "0.7963307", "0.7816804", "0.74662054", "0.7300386", "0.7281261", "0.7074805", "0.7074805", "0.6866235", "0.68443894", "0.68353206", "0.6758741", "0.67564857", "0.67033875", "0.6687655", "0.6658397", "0.6617403", "0.6589921", "0.65871906", "0.65700394", "0.6544247", "0.6530751", "0.65226394", "0.6491002", "0.6427437", "0.64006996", "0.6385767", "0.6385197", "0.6375482", "0.63655144", "0.63522816", "0.63401425", "0.63367814", "0.6328521", "0.6294988", "0.6231784", "0.62308735", "0.6218363", "0.62143487", "0.6203529", "0.6181932", "0.61740357", "0.61589116", "0.6148494", "0.61390054", "0.6121527", "0.60985017", "0.6088886", "0.608812", "0.60880333", "0.6061088", "0.60407174", "0.604048", "0.6027422", "0.60068315", "0.60025746", "0.5986469", "0.59827185", "0.5972224", "0.5967686", "0.59586847", "0.59549236", "0.59493583", "0.5948346", "0.5937541", "0.5933459", "0.5930473", "0.59247667", "0.5924303", "0.5913927", "0.58974355", "0.58960134", "0.58844054", "0.58731073", "0.5866933", "0.5838765", "0.583809", "0.5823972", "0.58219004", "0.5818363", "0.5816773", "0.581368", "0.5788361", "0.57725036", "0.5766397", "0.5763242", "0.5754636", "0.57515615", "0.5743327", "0.57345605", "0.5731445", "0.57231253", "0.5716732", "0.570934", "0.5708939", "0.57062364", "0.5694452", "0.5692263", "0.5676764", "0.5676764", "0.5666776" ]
0.6643213
15
Method to unfollow another user that is, to delete the unidirectional link that goes from one user to the other.
def unfollow(self, user_index, following_index): if user_index >= self.num_users or following_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user_index} and {following_index} were requested." ) if self.users_hat[following_index, user_index] == 1: self.users_hat[following_index, user_index] = 0 elif self.is_verbose(): self.log(f"User {following_index} was not following user {user_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def unfollow(request, usertostopfollow):\n stop_follow = Member.objects.get(user__username=usertostopfollow)\n user = Member.objects.get(user=request.user)\n user.following.remove(stop_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def unfollow(self, user):\n if self.is_following(user):\n self.followed.remove(user)\n return self", "def unfollowUser(following):\n \n cur, user_id, con = initialise(3, True)\n cur.execute(\"DELETE FROM followers WHERE user = (SELECT username FROM users WHERE id = ?) AND following = ?\", (user_id, following))\n finish(con)", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}", "def unfollow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('delete from follower where who_id=? and whom_id=?',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are no longer following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))", "def unfollow(self, followerId: int, followeeId: int) -> None:\n self.users[followerId].discard(followeeId)", "def unfollow(self, followerId, followeeId):\r\n self.follows[followerId].discard(followeeId)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId not in self.users.keys() or followeeId not in self.users.keys():\n return\n if followeeId not in self.users[followerId].followees.keys():\n return\n self.users[followerId].followees.pop(followeeId)\n\n\n\n # Your Twitter object will be instantiated and called as such:\n # obj = Twitter()\n # obj.postTweet(userId,tweetId)\n # param_2 = obj.getNewsFeed(userId)\n # obj.follow(followerId,followeeId)\n # obj.unfollow(followerId,followeeId)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n following = self.user_followed[followerId]\n if followeeId in following:\n following.remove(followeeId)\n self.user_followed[followerId] = following", "def unfollow(self, followerId, followeeId):\n if followerId in self.follows:\n if followeeId in self.follows[followerId]:\n self.follows[followerId].remove(followeeId)", "def unfollow_user(request, course_id, followed_user_id):\r\n user = cc.User.from_django_user(request.user)\r\n followed_user = cc.User.find(followed_user_id)\r\n user.unfollow(followed_user)\r\n return JsonResponse({})", "def unfollow_user(username):\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n mongo.db.users.update({'_id': g.user['_id']}, {\n '$pull': {'follows': whom_id}})\n flash('You are no longer following \"%s\"' % username)\n if redis_obj.get(user_ID):\n return redirect(url_for('user_timeline', username=username, userId=pickle.loads(redis_obj.get(user_ID))))\n else:\n redis_obj.delete(session['user_id'])\n print \"Invalidating cache after Unfollow\"\n return redirect(url_for('user_timeline', username=username))", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId != followeeId and followeeId in self.followList.get(followerId, []):\n self.followList[followerId].remove(followeeId)\n # print(self.followList)", "def unfollow(source_id, destination_id):\n Forward.objects.filter(source_id=source_id,\n destination_id=destination_id).delete()\n Backward.objects.filter(destination_id=destination_id,\n source_id=source_id).delete()", "async def unlink(self, ctx):\n # Remove all link tokens and spotify details for this user\n remove_tokens(ctx.author.id)\n remove_spotify_details(ctx.author.id)\n await ctx.reply(\"All your linked accounts were removed, if you had any!\")", "def unfollow(alias):\n s = db.Series.alias_lookup(alias)\n s.following = False\n db.session.commit()\n output.series('Removing follow for {}'.format(s.name))", "def unfollow_profile(self):\n self.find_clickable_element(self.ISFOLLOWED_BTN).click()", "def unfollow(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def unfollow(self, followerId, followeeId):\n # 在user_pool 中查询这个用户 follower\n if self.user_pool[followerId]:\n # 如果在用户的关注列表中才删除\n if followeeId in self.user_pool[followerId].follows:\n self.user_pool[followerId].follows.remove(followeeId)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.users:\n self._create_user(followeeId)\n if followerId not in self.users:\n self._create_user(followerId)\n if followerId != followeeId and followeeId in self.users[followerId]:\n self.users[followerId].remove(followeeId)", "def unfollow(user, actor, send_action=False):\n Follow.objects.filter(user = user, object_id = actor.pk,\n content_type = ContentType.objects.get_for_model(actor)).delete()\n if send_action:\n action.send(user, verb=_('stopped following'), target=actor)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n self.followees[followerId].discard(followeeId)", "def unfollow_me(self):\n return self.follow_me('unfollow_topic')", "def delete(self, request, username):\n\n # Retrieve the user from the user table if the user exists\n try:\n user_to_unfollow = User.objects.get(username=username)\n current_user = request.user\n # If a user is trying to unfollow themselves then stop the request\n if user_to_unfollow.profile.id == current_user.profile.id:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['CANNOT_UNFOLLOW_SELF']},\n status=status.HTTP_400_BAD_REQUEST\n )\n # Check if the user to be unfollowed\n # is in the current users following list\n try:\n profile_id = user_to_unfollow.profile.id\n user_being_followed = CustomFollows.objects.get(\n to_profile_id=profile_id,\n from_profile_id=current_user.profile.id\n )\n # If not tell the user the request can't happen\n # Because they don't follow the user\n except Exception as e:\n return Response(\n {\n \"errors\": FOLLOW_USER_MSGS['USER_UNFOLLOWED_ALREADY']\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n # Otherwise unfollow the user as requested\n current_user.profile.follows.remove(user_to_unfollow.profile)\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['USER_UNFOLLOW_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_200_OK\n )\n # End request if we cannot find the user we want to unfollow.\n except User.DoesNotExist:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['USER_NOT_FOUND']},\n status=status.HTTP_404_NOT_FOUND\n )", "def unfollow(self, followerId, followeeId):\n if followerId in self.follow_map and followeeId in self.follow_map[followerId]:\n self.follow_map[followerId].remove(followeeId)", "def user_unfollow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/destroy/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following[self.user_id].pop(user_id, None)\n return result[\"friendship_status\"][\"following\"] is False", "def unfollow_friend(username):\n\n if not g.user:\n print \"401\"\n abort(401)\n whom_id = get_user_id(username)\n print whom_id\n if whom_id is None:\n abort(404)\n unfollow_query(whom_id)\n flash('You are no longer following \"%s\"' % username)\n name = {'name of unfollowing user': username}\n ############### REDIS cache invalidate #####################\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)", "def unfollow(self,id):\n # DELETE /followings/$id\n debugMain('unfollow')\n resource = '/followings/%s'%id\n requestUrl = self.apiRootUrls[0] + resource\n debugRequest('unfollowing: %s'%requestUrl)\n r = self.session.delete(requestUrl)\n \n debugDetail('request headers:')\n debugJson(r.request.headers)\n debugDetail()\n debugDetail(' -- -- -- --')\n debugDetail()\n debugDetail('response headers:')\n debugJson(r.headers)\n debugDetail()\n \n if r.status_code is not 200:\n debugError('failed to unfollow.')\n debugDetail()\n return False\n return True", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followeeId in self.follow_map[followerId]:\n self.follow_map[followerId].remove(followeeId)", "def unfollow(self, followerId: 'int', followeeId: 'int') -> 'None':\n self.followees[followerId].discard(followeeId)", "async def unfollow(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/unfollow', method='post',\n data=data)\n return e", "def test_unfollow_user_without_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.delete(self.unfollow_url,\n data=self.followed_user)\n self.assertEqual(response.data['detail'],\n \"Authentication credentials were not provided.\")\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_un_following_existing_user(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n # Time Complexity: O(1)\n if followerId in self.followees and followeeId in self.followees[followerId]:\n self.followees[followerId].remove(followeeId)", "def unfollow_route(request, follow_id):\n\n db_conn = request['db_conn']\n current_user = get_current_user(request)\n if not current_user:\n return abort(401)\n follow = get_follow({'id': follow_id}, db_conn)\n if not follow:\n return abort(404)\n if follow['user_id'] != current_user['id']:\n return abort(403)\n errors = delete_follow(follow['id'], db_conn)\n if errors:\n return 400, {\n 'errors': errors,\n 'ref': 'iGmpx8UwoFcKNmSKq9Aocy1a'\n }\n return 200, {}", "def delete_model(self, request, obj):\n From = User.objects.get(id=obj.From.id)\n To = User.objects.get(id=obj.To.id)\n From.following_numDe()\n To.followed_numDe()\n obj.delete()", "def removeFollower(self,id):\n # DELETE /followers/$id\n pass", "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "def new_unfollow(self, user_id, user_name):\n url_unfollow = self.url_unfollow % (user_id)\n try:\n unfollow = self._send_post_request(url_unfollow)\n if unfollow.status_code == 200:\n self.unfollow_counter += 1\n log_string = \"Unfollow: %s #%i.\" % (user_name,\n self.unfollow_counter)\n self.log.debug(log_string)\n return unfollow\n except:\n self.log.debug(\"Exept on unfollow!\")\n return False", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "def unfollow_user(self:'InstaClient', user:str, nav_to_user=True, check_user=True):\n profile = self.get_profile(user)\n if not profile:\n raise InvalidUserError(user)\n\n LOGGER.debug('INSTACLIENT: User <{}> is valid'.format(user))\n self._nav_user(user, check_user=False)\n \n if profile.requested_by_viewer:\n requested_btn = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.REQUESTED_BTN)))\n self._press_button(requested_btn)\n confirm_unfollow = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.CONFIRM_UNFOLLOW_BTN)))\n self._press_button(confirm_unfollow)\n LOGGER.debug(f'Cancelled Follow Request for user <{user}>')\n \n elif self._check_existence(EC.presence_of_element_located((By.XPATH, Paths.UNFOLLOW_BTN))):\n unfollow_btn = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.UNFOLLOW_BTN)))\n self._press_button(unfollow_btn)\n time.sleep(1)\n confirm_unfollow = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.CONFIRM_UNFOLLOW_BTN)))\n self._press_button(confirm_unfollow)\n LOGGER.debug('INSTACLIENT: Unfollowed user <{}>'.format(user))", "def stop_following(follow_id):\n\n followed_user = User.query.get(follow_id)\n g.user.following.remove(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def unlink(self, link_id):", "def unfollow(self, followerId, star):\n if followerId in self.followstar and star in self.followstar[followerId]:\n self.followstar[followerId].remove(star)", "def test_unfollow_user_successfully(self):\n self.authorize_user(self.user)\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.delete(self.unfollow_url,\n data=self.followed_user)\n self.assertEqual(response.content,\n b'{\"message\":\"user unfollowed\"}')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def unfollow(username):\n follow_form = FollowForm()\n unfollow_form = UnfollowForm()\n\n if unfollow_form.validate_on_submit():\n try:\n current_user.unfollow(username)\n flash('Unfollowed {}'.format(username))\n except ValueError as excep:\n flash(str(excep))\n return render_template('user_history.html',\n username=username,\n follow_form=follow_form,\n unfollow_form=unfollow_form)\n\n return render_template('user_history.html',\n username=username,\n follow_form=follow_form,\n unfollow_form=unfollow_form)", "def unfriend(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_friend(user_id, target_id)", "def delete(self, request, *args, **kwargs):\n\n following_username = token_payload(request)\n\n followed_username = self.kwargs.get(self.look_url_kwarg)\n try:\n followed_user = User.objects.get(username=followed_username)\n followed_user_id = followed_user.id\n\n is_following = FollowUser.objects.filter(\n following_username=following_username)\n for user in is_following:\n if user.following_username == following_username and user.followed_username == followed_username:\n returned_user = FollowUser.objects.get(pk=user.id)\n returned_user.delete()\n\n followed_user_profile = Profile.objects.get(\n profile_user=followed_user_id)\n serializer_profile = self.serializer_classprofile(\n followed_user_profile)\n return Response({\n \"user\": following_username,\n \"unfollowing\": serializer_profile.data\n })\n return Response({\n \"message\": 'User has been unfollowed or You are unfollowing a user you were not orignally following'\n }, status=status.HTTP_400_BAD_REQUEST)\n except:\n return Response({\n \"message\": 'User does not exist'\n }, status=status.HTTP_400_BAD_REQUEST)", "def delete_follow_request(request):\n username = request.POST[\"user\"]\n requester = get_user_from_username(request.user, username)\n\n follow_request = get_object_or_404(\n models.UserFollowRequest, user_subject=requester, user_object=request.user\n )\n follow_request.raise_not_deletable(request.user)\n\n follow_request.delete()\n return redirect(f\"/user/{request.user.localname}\")", "async def twitter_unfollow(self, ctx, handle):\n sane_handle = handle.lower().lstrip('@')\n conf = dutils.get(self.conf.follows, screen_name=sane_handle)\n chan_conf = dutils.get(conf.discord_channels, id=ctx.message.channel.id) if conf is not None else None\n\n if chan_conf is None:\n raise TwitterError('Not following {} on this channel.'.format(handle))\n\n # Remove the Discord channel from the Twitter channel conf\n conf.discord_channels.remove(chan_conf)\n if not conf.discord_channels:\n # If there are no more Discord channel to feed, unfollow the Twitter channel\n self.conf.follows.remove(conf)\n del conf\n\n # Update the tweepy stream\n if len(self.conf.follows) > 0:\n await self.stream.start()\n else:\n self.stream.stop()\n\n self.conf.save()\n\n await self.bot.say('\\N{OK HAND SIGN}')", "def delete(self, request, username):\n followed_user_exists = User.objects.filter(username=username).exists()\n if not followed_user_exists:\n return Response({'error': 'user not found'},\n status.HTTP_404_NOT_FOUND)\n followed_user = User.objects.get(username=username)\n user_exists = Follow.is_user_already_followed(\n followed_user_id=followed_user.id,\n user_id=request.user.id\n )\n if user_exists:\n instance = Follow.objects.filter(\n user=self.request.user.id, followed_user=followed_user.id\n )\n instance.delete()\n return Response({'message': 'user unfollowed'},\n status.HTTP_200_OK)\n return Response({'message': 'user not in followers'},\n status.HTTP_404_NOT_FOUND)", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "def stop_following(follow_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n followed_user = User.query.get_or_404(follow_id)\n g.user.following.remove(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def auto_unfollow_nonfollowers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs here that you want to keep following even if they don't\n # follow you back\n users_keep_following = set([])\n\n not_following_back = following - followers\n\n # make sure the \"already followed\" file exists\n if not os.path.isfile(ALREADY_FOLLOWED_FILE):\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n out_file.write(\"\")\n\n # update the \"already followed\" file with users who didn't follow back\n already_followed = set(not_following_back)\n af_list = []\n with open(ALREADY_FOLLOWED_FILE) as in_file:\n for line in in_file:\n af_list.append(int(line))\n\n already_followed.update(set(af_list))\n del af_list\n\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n for val in already_followed:\n out_file.write(str(val) + \"\\n\")\n\n for user_id in not_following_back:\n if user_id not in users_keep_following:\n t.friendships.destroy(user_id=user_id)\n print(\"unfollowed %d\" % (user_id))", "def test_unfollowing_yourself(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.follower['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_406_NOT_ACCEPTABLE)", "def unsubscribe_user_follow(self, uuid: UUID) -> bool:\n return self._generic_unsubscribe('/users/follows', uuid)", "def user_playlist_unfollow(self, user, playlist_id, **kwargs):\n return self._delete(\n \"users/%s/playlists/%s/followers\" % (user, playlist_id), **kwargs\n )", "def test_unfollowing_non_existing_user(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def unfollow_commentable(request, course_id, commentable_id):\r\n user = cc.User.from_django_user(request.user)\r\n commentable = cc.Commentable.find(commentable_id)\r\n user.unfollow(commentable)\r\n return JsonResponse({})", "def on_deleted_follow(sender, instance: models_actstream.Follow, **kwargs):\n content_type = ContentType.objects.get_for_id(instance.content_type_id)\n log.debug(\"Unfollowing %s %s\" % (content_type.name, instance.object_id))\n dillo.tasks.feeds.repopulate_timeline_content(\n instance.content_type_id, instance.object_id, instance.user_id, 'unfollow'\n )", "def btn_unfollow_clicked(self, widget, data=None):\n print \"unfollow clicked\"", "def remove(self, user_id):\n pass", "def fb_deauth(self, request):\n signed_request = request.data.get('signed_request')\n if signed_request:\n parsed_signed_request = facebook_controller.parse_signed_request(signed_request)\n facebook_user_id = parsed_signed_request.get('user_id')\n if facebook_user_id:\n facebook_controller.delete_linked_facebook_account(facebook_user_id)\n return Response('OK')", "def unfriend(self, removee):\n\t\tremover_friend_list = self # person terminating the friendship\n\n\t\t# Remove friend from friend request\n\t\tremover_friend_list.remove_friend(removee)\n\n\t\t# Remove friend from the removeee friend list\n\t\tfriends_list = FriendList.objects.get(user=removee)\n\t\tfriends_list.remove_friend(self.user)", "def backwards(self, orm):\n \n user_type = orm['auth.User']\n user = user_type.objects.get(username=self.suUsername)\n \n profile_type = orm['mooi.Profile']\n userProfile = profile_type.objects.get(user=user)\n \n userProfile.delete()\n user.delete()", "def unfollow_group(request, pk):\n group = get_object_or_404(Group, id=pk)\n\n # Check user is not member of the group\n if not group.members.filter(id=request.user.id).exists():\n actions.unfollow(request.user, group, send_action=False)\n request.user.userprofile.follow_groups.remove(group)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore.')\n # the group members can choose not follow the group anymore, but still\n # been the member\n else:\n actions.unfollow(request.user, group, send_action=False)\n messages.warning(\n request,\n 'Successed, you are not following this Group anymore. But you are still the one of the members of this group.')\n\n return redirect('groups:groups-detail', pk)", "def remove_link():", "def test_unfollowing_existing_user_not_authenticated(self):\n self.client.credentials()\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def delete_user():", "def clean(self):\n super().clean()\n if self.user2:\n self.orig_cloud.delete_user(self.user2.id)", "def remove(self, uid):\n marker = object()\n name = self._reverse.get(uid, marker)\n if name is not marker:\n del self._reverse[uid]\n try:\n del self._forward[name]\n except KeyError:\n # If it isn't there, good, that is the outcome we wanted,\n # right?\n pass", "def unfollow_thread(request, course_id, thread_id):\r\n user = cc.User.from_django_user(request.user)\r\n thread = cc.Thread.find(thread_id)\r\n user.unfollow(thread)\r\n return JsonResponse({})", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user():\n #TODO user delete\n pass", "def remove_relation(request, id):\n user = request.user\n relation = get_object_or_404(User, id=id)\n user.profile.relations.remove(relation)\n user.profile.friends.add(relation)\n messages.success(\n request,\n 'Family member removed to your friends list'\n )\n return redirect('profiles:my_friends')", "async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def backwards(apps, schema_editor):\n Referral = apps.get_model(\"core\", \"Referral\")\n\n for referral in Referral.objects.all():\n referral.users.clear()\n referral.save()", "def unlink(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.UNLINK_FROM_TEMPLATE,\n EntityType.ROLE.value, None)", "def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network", "def follow_user(cls, user, following):\r\n pass", "def del_user(self, username):\n pass", "def delete_user(self):\n\n User.user_list.remove(self)", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def reset():\n\n Follower.clear()", "def test_stream_stale_follows(self):\n self.user2.delete()\n self.assertNotIn('Two', str(user_stream(self.user1)))", "def follow_user(cls, user, following):\n pass", "def delete_user(self, user):\n self.delete(user)", "def remove_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 1:\n self.users_hat[user1_index, user2_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user2_index} was not following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 1:\n self.users_hat[user2_index, user1_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user1_index} was not following user {user2_index}\")", "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def test_remove_followers(self):\n pass", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def rm_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.srem(key, user_id)" ]
[ "0.8147802", "0.8125675", "0.80680937", "0.80040616", "0.78747725", "0.7863218", "0.76762897", "0.7540846", "0.7521238", "0.74924225", "0.74637675", "0.7456486", "0.74547946", "0.73516536", "0.73467946", "0.7258564", "0.7226699", "0.719125", "0.7158494", "0.71420807", "0.71407115", "0.7123383", "0.7107736", "0.7101938", "0.7095888", "0.7058433", "0.70496774", "0.70361125", "0.70227575", "0.6988022", "0.698508", "0.69789916", "0.69541895", "0.6945841", "0.6936822", "0.6897518", "0.68944323", "0.6858234", "0.67758286", "0.6753741", "0.67476064", "0.67117876", "0.6686417", "0.66739917", "0.666484", "0.6653934", "0.6643302", "0.6625236", "0.6604231", "0.660413", "0.6596151", "0.65765953", "0.6552451", "0.6519006", "0.6511613", "0.6511075", "0.6482342", "0.6480878", "0.6456101", "0.6394447", "0.63816476", "0.6368419", "0.63683975", "0.6345274", "0.6283143", "0.62685585", "0.62296426", "0.62245697", "0.61911976", "0.6186014", "0.6185922", "0.61726135", "0.61710423", "0.61708087", "0.614958", "0.61441547", "0.61441547", "0.61441547", "0.6125494", "0.60909337", "0.6065649", "0.6056302", "0.6043886", "0.6018555", "0.60056573", "0.6005516", "0.60003483", "0.5997188", "0.59807247", "0.59756136", "0.59732234", "0.5951729", "0.59015304", "0.5901366", "0.5865764", "0.5859345", "0.58358806", "0.58320534", "0.58212763", "0.5806258" ]
0.7063021
25
Method to add a user as friends that is, to create a bidirectional link that connects the two users.
def add_friends(self, user1_index, user2_index): if user1_index >= self.num_users or user2_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user1_index} and {user2_index} were requested." ) if self.users_hat[user1_index, user2_index] == 0: self.users_hat[user1_index, user2_index] = 1 elif self.is_verbose(): self.log(f"User {user2_index} was already following user {user1_index}") if self.users_hat[user2_index, user1_index] == 0: self.users_hat[user2_index, user1_index] = 1 elif self.is_verbose(): self.log(f"User {user1_index} was already following user {user2_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_friend(self, User):\n if not User in self.friends.all():\n self.friend.add(User)\n #self.save()", "def addfriend(self, second_user_id):\n second_user = User.objects.get(id=second_user_id)\n new_friendship = Friendship.objects.create(friend_user=self, friend=second_user.gameplanuser)\n new_friendship.save()", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def addFriendship(self, userID, friendID):\n # adding a edge between two vertices\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend", "def make_friend(user_id, friend_id):\n # Find out if the user exists\n user_a = user_grab(user_id)\n if user_a is None:\n return \"user not found\", 404\n\n # Find the other user\n user_b = user_grab(friend_id)\n if user_b is None:\n return \"user not found\", 404\n\n # Get their friend list\n friends_current = user_a.get(\"friends\")\n friends_updated = []\n if friends_current is not None:\n for friend in friends_current:\n if friend == friend_id:\n return user_b\n friends_updated = friends_current\n friends_updated.append(str(user_b['_id']))\n api_vars.users.update({'_id': ObjectId(user_id)},\n {'$set': {'friends': friends_updated}})\n return json.dumps(user_b)", "def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')", "def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))", "async def add(\n self,\n\t\tuser_id: Optional[int] = None,\n\t\ttext: Optional[str] = None,\n\t\tfollow: Optional[bool] = None,\n\t\t**kwargs\n ) -> friends.AddResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.add\", params)\n model = friends.AddResponse\n return model(**response).response", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def add_user(self, name):\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def addUser(self, name):\n self.lastID += 1 # automatically increment the ID to assign the new user\n self.users[self.lastID] = User(name)\n self.friendships[self.lastID] = set()", "def test_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n self.assertIs(u in f.friends.all(), True)\n self.assertIs(f in u.friends.all(), True)", "def add_friend(request, profile_pk, friend_pk):\n\n profile_object = Profile.objects.get(pk=profile_pk)\n friend_object = profile_object.get_friend_suggestions().get(pk=friend_pk)\n \n profile_object.friends.add(friend_object)\n profile_object.save()\n\n return redirect(reverse('show_profile_page', kwargs={'pk': profile_pk}))", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n return True", "def add_friend(self, account):\n if not account in self.friends.all():\n self.friends.add(account)\n self.save()", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)", "def add_friend(self, account):\n\t\tif not account in self.friends.all():\n\t\t\tself.friends.add(account)", "def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list", "def accept(self):\n receiver_friend_list = FriendList.objects.get(user=self.receiver)\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender)\n sender_friend_list = FriendList.objects.get(user=self.sender)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver)\n self.is_active = False\n self.save()", "def connect_user(self, user):\n\t\tis_user_added = False\n\t\tif not user in self.users.all():\n\t\t\tself.users.add(user)\n\t\t\tself.save()\n\t\t\tis_user_added = True\n\t\telif user in self.users.all():\n\t\t\tis_user_added = True\n\t\treturn is_user_added", "def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()", "def add_friend():\n\n\n user_id = session['user_id']\n add_friend = request.form.get(\"add-friend\")\n friend_id = request.form.get(\"friend_id\")\n friendship = Friendship.add_friend(user_id, friend_id)\n\n print \"This is the friend id\", friend_id\n\n return 'friend added'", "def add_friend_to_trip(request, trip_id, user_id):\n try:\n trip = Trip.objects.get(pk=trip_id)\n if request.user not in trip.users.all():\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n user = User.objects.get(pk=user_id)\n if user in trip.users.all():\n error_message = \"User already associated with trip\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n trip.users.add(user)\n except Trip.DoesNotExist:\n error_message = \"Trip does not exist\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n except User.DoesNotExist:\n error_message = \"User does not exist\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(str(e), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_200_OK)", "def register_friend(cls, friend: Any) -> None:\n\n cls.__friends.add((cls, friend))", "def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "async def add_list(\n self, name: str, user_ids: Optional[List[int]] = None, **kwargs\n ) -> friends.AddListResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.addList\", params)\n model = friends.AddListResponse\n return model(**response).response", "def set_friends(self, name, friend_names):\n\n person = self.nodes[name]\n\n for friend_name in friend_names:\n friend = self.nodes[friend_name]\n\n # Since adjacent is a set, we don't care if we're adding duplicates ---\n # it will only keep track of each relationship once. We do want to\n # make sure that we're adding both directions for the relationship.\n person.adjacent.add(friend)\n friend.adjacent.add(person)", "def follow_user(cls, user, following):\r\n pass", "def add_user_friendships(friend_page, acct):\n\n friends_list = [] # becomes a list of User objects\n # with db.session.begin():\n for friend in friend_page.user: # loops over page of 30 friends\n gr_id = int(friend.id.cdata.encode('utf8'))\n gr_url = friend.link.cdata.encode('utf8')\n name = friend.name.cdata.encode('utf8')\n image_url = friend.small_image_url.cdata.encode('utf8')\n\n try:\n # if user is already in db, add friendship only\n existing_user = User.query.filter_by(gr_id=gr_id).one()\n friends_list.append(existing_user)\n except:\n new_user = User(gr_id=gr_id, gr_url=gr_url,\n gr_name=name, image_url=image_url)\n db.session.add(new_user)\n print \"added new friend: \" + friend.name.cdata.encode('utf8')\n friends_list.append(new_user)\n\n print friends_list\n db.session.commit()\n\n # after adding missing users to db, add friendship between authorized account\n # and all friends\n for friend in friends_list:\n\n new_friend = Friendship(user_id=acct.user.user_id, friend_id=friend.user_id)\n old_friend = Friendship(user_id=friend.user_id, friend_id=acct.user.user_id)\n db.session.add(new_friend)\n db.session.add(old_friend)\n print \"Added friendship!\"\n\n db.session.commit()", "def add_follow(follow_id):\n\n want_to_follow_user = User.query.get_or_404(follow_id)\n if want_to_follow_user.private:\n # =========== NEED TO IMPLEMENT ====================\n # send them a request to follow\n want_to_follow_user.from_users.append(g.user) \n db.session.commit()\n flash(\"Your request has been sent\", \"success\")\n return redirect(f\"/users/{g.user.id}/following\")\n\n g.user.following.append(want_to_follow_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def add_to_following(sender, instance, created, **kwargs):\r\n sender_= instance.sender\r\n receiver_ = instance.receiver\r\n if instance.status == 'accepted':\r\n sender_.following.add(receiver_.user)", "def follow_user(cls, user, following):\n pass", "def view_friends(request, username):\n user = get_object_or_404(user_model, username=username)\n qs = Friend.objects.select_related(\"UserProfile\").filter(to_user=user)\n friends = [u.from_user for u in qs]\n self = navbar(request.user.id)\n user1 = self.user.id\n for i in friends:\n to_user = i.id\n i.user2 = str(user1)+\"|\"+str(to_user)\n return render_to_response( 'view_friends.html', {'friends': friends, 'self':self})", "def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.create_pending_friend_request(user_id, target_id)", "async def send_friend_request(self):\n\n logging.debug(\"Sending friend request to \" + self.username)\n\n if self.is_friend:\n raise ObjectErrors.AlreadyFriends(\n \"You are already friends with \" + self.display_name)\n\n await self.client.request.post(\n \"/user/%s/friendRequest\" % self.id)", "def add_following(self, user_id):\n sleep(360) # too much follows => function ban\n self.following.append(user_id)\n return perform_with_ran_delay(self.instagram.follow, user_id)", "def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')", "def add_member(self, user):\n if user is self.owner:\n raise ValidationError('A trip owner cannot also be a member.')\n # check the user is not already a member\n if self.members.filter(pk=user.pk).exists():\n return\n self.members.add(user)", "def forwards(apps, schema_editor):\n Referral = apps.get_model(\"core\", \"Referral\")\n\n for referral in Referral.objects.all():\n if hasattr(referral, \"user\"):\n referral.users.add(referral.user)\n referral.save()", "def addUser(self, user):\r\n self.users.append(user)\r\n return len(self.users)-1", "def test_remove_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n f.friends.remove(u)\n self.assertIs(u in f.friends.all(), False)\n self.assertIs(f in u.friends.all(), False)", "def add_user(self, user: User):\n raise NotImplementedError", "def add(self, user):\n int_id = user.get_int_id(self.rooms)\n self.rooms[user.room][\"users\"].append(user)\n\n # Games\n if self.rooms[user.room][\"isGame\"] == \"true\":\n user.send([\"jg\", int_id, user.room])\n # Rooms\n else:\n user.send([\"jr\", int_id, user.room, self.get_strings(user.room)])\n self.packet.send_room([\"ap\", int_id, user.get_string()], user.room)", "def add_user(self, user):\n\t\tself.users[user.username] = user", "def get_friends(user, data):\n setA = list(\n data.loc[data.user == user].user_friend_list.values)\n setB = list(\n data.loc[data.user_friend_list == user].user\n .values)\n friends = list(set(set(setA).union(setB)))\n return friends", "def register(self, user) -> None:\n self._all_members[user.name] = user\n if type(user).__name__ == 'LeaderUser':\n self._leaders.append(user)", "def get_friends(self):\n edges = DirectedUserToUserEdge.all().filter(\n 'owner_user_id =', self.key().id()).run()\n return db.get([db.Key.from_path('User', edge.friend_user_id) for edge in\n edges])", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def _add_to_graph(self, user_profiles, new_infected_users):\n self.diffusion_tree.add_nodes_from(new_infected_users)\n parents = self._find_parents(user_profiles, new_infected_users)\n # connect parent(s) and child(ren)\n if parents is not None:\n edges = np.vstack((parents, new_infected_users)).T\n self.diffusion_tree.add_edges_from(edges)", "def list_users_friends(self):\n user = self.sp.user(self.user)\n return user", "def friends():\n friends = [u.to_dict() for u in g.user.get_friends()]\n return jsonify({'success': True, 'friends': friends})", "def remove_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 1:\n self.users_hat[user1_index, user2_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user2_index} was not following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 1:\n self.users_hat[user2_index, user1_index] = 0\n elif self.is_verbose():\n self.log(f\"User {user1_index} was not following user {user2_index}\")", "def add_untracked_friends(self):\n\n self.log.debug(\"CHECK FOR UNTRACKED FRIENDS\")\n friends_ids_api = self.api.friends_ids()\n targets = Target.objects.filter(hunter=self.user)\\\n .exclude(status__in=Target.ON_DECK)\n friends_ids_django = [t.hunted.twitter_id for t in targets]\n untracked_friends_ids = \\\n filter(lambda x: unicode(x) not in friends_ids_django,\n friends_ids_api)\n\n untracked_friends, remainder = lookup_users_by_id(self.api,\n untracked_friends_ids)\n for untracked_friend in untracked_friends:\n \"\"\"These could be people who don't follow us, but we want to follow,\n for example to keep up with news of their company\"\"\"\n twitter_account, created = utils.get_or_create_twitter_account(\n untracked_friend)\n target, created = Target.objects.get_or_create(\n hunter=self.user, hunted=twitter_account)\n if created:\n target.reason = \"External add.\"\n target.status = Target.FOLLOWER\n target.save()\n self.log.debug(\" => add friend: %s\" % twitter_account.screen_name)\n else:\n self.log.debug(\" => we're following, but no reciprocation: %s\" % twitter_account.screen_name)", "def friends(user_id):\n user = user_grab(user_id)\n if user is None:\n return \"user not found\", 404\n friends = user.get(\"friends\")\n if friends is None:\n friends = []\n data_json = json.dumps({'friends': [str(friend) for friend in friends]})\n return data_json", "def add_connection(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n if not user_B in network[user_A]['connections']:\n network[user_A]['connections'].append(user_B)\n return network", "def confirm_request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n\n if self.database.delete_pending_friend_request(user_id, target_id):\n return self.database.create_friend(user_id, target_id)\n return False", "def add(self, user: U) -> None:\n ...", "def connections2Neo(db, user, renderedTwits, friends=True):\n started = datetime.now()\n right_now = started.isoformat()\n \n users2Neo(db, renderedTwits)\n \n match = (\"MATCH (t:twitter_user {{screen_name: '{}'}}),\" +\n \" (f:twitter_user {{screen_name: d.screen_name}})\").format(user)\n\n if friends:\n merge = \"MERGE (t)-[:FOLLOWS]->(f)\"\n update = \"SET {}.friends_last_scraped = '{}'\".format('t'+user, right_now)\n else:\n merge = \"MERGE (t)<-[:FOLLOWS]-(f)\"\n update = \"SET {}.followers_last_scraped = '{}'\".format('t'+user, right_now)\n \n query = '\\n'.join(['UNWIND $data AS d', match, merge])\n \n data = [{'screen_name': twit.get('screen_name', False)}\n for twit in renderedTwits if twit.get('screen_name', False)]\n\n userNode = nodeRef(user, 'twitter_user', {'screen_name': user})\n update_query = '\\n'.join([mergeNode(userNode, match=True), update])\n\n neo_tx(db, update_query)\n neo_tx(db, query, data=data)\n\n how_long = (datetime.now() - started).seconds\n logging.info(\n '*** PUSHED %d CONNECTIONS FOR %s TO NEO IN %ds ***' %\n (len(renderedTwits), user, how_long))", "def add_user(self, user_id, user_point, do_update=True):\n \n self.n_users += 1;\n self.user_ids.append(user_id);\n self.user_points.append(user_point);\n \n if do_update:\n self.update();", "def add_connection(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n if user_B not in network[user_A][0]:\n network[user_A][0].append(user_B)\n return network[user_A][0]", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def get_friends(self, user_id):\n # if user_id is alias, replace it with id\n if not self._is_positive_number(user_id):\n user_id = get_names_of_users(set([user_id]))[0].id\n api = pyvkontakte.VkontakteApi()\n return set(api.call('friends.get', user_id=user_id, v='5.8')['items'])", "def add_new_user(network, user, games):\n if not user in network:\n network[user] = {'connections': [], 'games': games}\n return network", "def add_user(self, user_id):\n\n if not str(user_id).isalnum():\n raise ValueError('Identifier must be the numerical user ID')\n\n # skip adding user if existing & detailed\n existing_user = self.get_user(user_id)\n if existing_user:\n if existing_user.get('detail') == 'full':\n log.info('Not adding user %s, already (full) in graph' % user_id)\n return existing_user\n if existing_user.get('detail') == 'basic':\n log.info('Not adding user %s, already (basic) in graph: updating' % user_id)\n return self.update_user(existing_user)\n\n\n log.info('Adding user %s to graph' % user_id)\n # get and assign user data to node\n props = self.fetch_user_data(user_id)\n user_node = self.gdb.node(**props)\n\n # add user node to indexes\n users = self.gdb.nodes.indexes.get('users')\n users['user_id'][props.get('id_str')] = user_node\n users['screen_name'][props.get('screen_name')] = user_node\n\n # add followers/following\n \n self.add_subscriptions(user_node)\n\n return user_node", "def create_friends(friend, friendors, create_post = True, visibility = ACL_DEFAULT):\n for friendor in friendors:\n friend.add_friend(friendor)\n friendor.add_friend(friend)\n # FriendRelationship.objects.create(friendor = friendor, friend = friend)\n\n if create_post:\n Post.objects.create(content = TEXT, author = friendor, visibility = visibility)", "def follow(self, user):\n if not self.is_following(user):\n f = Follow(follower=self, followed=user)\n db.session.add(f)", "def user_follow_users(self, ids=None, **kwargs):\n return self._put(\n API.MY_FOLLOWING.value, type=\"user\", ids=\",\".join(ids or []), **kwargs\n )", "def add_candidate(self, user):\n weight = (\n self.assignment_related_users.aggregate(models.Max(\"weight\"))[\"weight__max\"]\n or 0\n )\n defaults = {\"weight\": weight + 1}\n self.assignment_related_users.update_or_create(user=user, defaults=defaults)", "def fetch_friends(self, user, paginate=False):\n\n if USING_ALLAUTH:\n social_app = SocialApp.objects.get_current('facebook')\n oauth_token = SocialToken.objects.get(account=user, app=social_app).token\n else:\n social_auth_backend = FacebookBackend()\n\n # Get the access_token\n tokens = social_auth_backend.tokens(user)\n oauth_token = tokens['access_token']\n\n graph = facebook.GraphAPI(oauth_token)\n\n friends = graph.get_connections(\"me\", \"friends\")\n\n if paginate:\n total_friends = friends.copy()\n total_friends.pop('paging')\n while 'paging' in friends and 'next' in friends['paging'] and friends['paging']['next']:\n next_url = friends['paging']['next']\n next_url_parsed = urlparse.urlparse(next_url)\n query_data = urlparse.parse_qs(next_url_parsed.query)\n query_data.pop('access_token')\n for k, v in query_data.items():\n query_data[k] = v[0]\n friends = graph.get_connections(\"me\", \"friends\", **query_data)\n total_friends['data'] = sum([total_friends['data'], friends['data']], [])\n else:\n total_friends = friends\n\n return total_friends", "def follow(self, followerId, followeeId):\n\n # 把 followeeId append到他的 follow 属性中\n if followerId == followeeId: # 不能自己关注自己\n return\n # 实例化一个user(followerID)\n follower = UserInfo()\n follower.user_id = followerId \n follower.follows.append(followeeId) \n self.user_pool[followerId] = follower", "def add_user(self, attrs):\n pass", "def create_user_links(verbose=False):\n rg = global_ratings_graph()\n if verbose:\n print \"Ratings graph loaded.\"\n uids = rg.users()\n links = []\n for user in User.select():\n uid1 = \"u%s\" % user.user_id\n m1 = set(rg.user_movies(uid1))\n\n buddies = {}\n\n for uid2 in uids:\n if uid1 == uid2:\n continue\n\n m2 = set(rg.user_movies(uid2))\n\n intersection = m1.intersection(m2)\n if not intersection:\n continue\n\n union = m1.union(m2)\n\n buddies[uid2] = dict(\n # Jaccard index\n j=len(intersection)/float(len(union)),\n # Common movies count\n c=len(intersection),\n )\n\n links.append(dict(user=user, buddies=buddies))\n\n chunked_insert(UserLink, links)", "def user_follow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n if user_id in self._users_following.get(self.user_id, []):\n self.logger.debug(\"User %s already followed\", user_id)\n return False\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/create/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following.pop(self.user_id) # reset\n return result[\"friendship_status\"][\"following\"] is True", "def add_owner(self, user):\n user_in = user.get_groups()\n member = False\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n member = True\n ownership = Relationship(user.get(), 'owns', self.usergroup_node)\n graph.create(ownership)\n if not member:\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def save_user(self):\n\n User.user_list.append(self)", "def add_user(self, user, id_bot=None):\n # salvo l'id dell'utente o del bot\n self.execute(TABELLE['id_users']['insert'], (user['id'],))\n # salvo le altre informazioni relative ad utenti o bot\n # queste informazioni potrebbero cambiare nel tempo, quindi\n # prima di tutto selezione le ultime informazioni note dal database\n # se sono uguali ignoro, altrimenti effettuo un inserimento\n user_db = self.get_user(user['id'])\n if self.different_user(user, user_db):\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username'], user['first_name'], user['last_name'], user['language_code']))\n if id_bot is not None:\n self.execute(TABELLE['bot_users']['insert'], (id_bot, user['id'], user['language_code']))", "def addUserId(self, user_id):\n self.__register_user_ids.add(user_id)", "def add_follow(follow_id):\n followed_user = User.query.get_or_404(follow_id)\n if not g.user or g.user.id == follow_id or followed_user.is_blocking(g.user):\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n g.user.following.append(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def add_all_friends(twitter, users):\n for u_dict in users:\n u_dict['friends'] = get_friends(twitter,u_dict['screen_name'])", "def add_new_user(network, user, games):\n if user not in network:\n network[user] = [[], games]\n return network", "def connect(request, pk=None):\n # check if user sent request to them self\n if int(request.user.id) == int(pk):\n return Response({'status': '400', 'code': 'E_SAME_USER',\n 'detail': code['E_SAME_USER']}, status=400)\n\n # Check both Users are valid\n from_user = get_or_none(User, pk=request.user.id)\n to_user = get_or_none(User, pk=pk)\n # Return Error Message User is not valid\n if from_user is None or to_user is None:\n return Response({'status': '400', 'code': 'E_USER_NOT_FOUND',\n 'detail': code['E_USER_NOT_FOUND']}, status=400)\n\n # check user have sent request before or not\n current_request = get_or_none(FriendRequest, from_user=from_user, to_user=to_user)\n # search current request in reverse way\n if current_request is None:\n current_request = get_or_none(FriendRequest, from_user=to_user, to_user=from_user)\n # Return Error Message request have sent before\n if current_request is not None:\n return Response({'status': '400', 'code': 'E_ALREADY_SEND_REQUEST',\n 'detail': code['E_ALREADY_SEND_REQUEST']}, status=400)\n # Check both users are connect or not\n current_connection = get_or_none(FriendConnect, user=from_user, friend=to_user)\n # Return Error Message both user are friend before\n if current_connection is not None:\n return Response({'status': '400', 'code': 'E_ALREADY_CONNECT',\n 'detail': code['E_ALREADY_CONNECT']}, status=400)\n # Save new request\n new_request = FriendRequest(from_user=from_user, to_user=to_user)\n new_request.save()\n # Check request is save success\n is_created = get_or_none(FriendRequest, from_user=from_user, to_user=to_user)\n # Return Error Message Request is not save\n if is_created is None:\n return Response({'status': '500', 'code': 'E_NOT_SAVE',\n 'detail': code['E_NOT_SAVE']}, status=500)\n # Return Message sent request success\n return Response({'status': '200', 'code': 'OK_SEND_FRIEND_REQUEST',\n 'detail': code['OK_SEND_FRIEND_REQUEST']}, status=201)", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def get_user_friends(user_id):\n\n friends = db.session.query(User_Friend).filter(User_Friend.user_id==user_id).all() \n\n return friends", "def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId: return\n self.users[followerId].add(followeeId)", "def save_user(self):\n User.user_list.append(self)", "def save_user(self):\n User.user_list.append(self)" ]
[ "0.7621462", "0.7019252", "0.69413483", "0.69305646", "0.69305646", "0.69305646", "0.6898999", "0.68216527", "0.67973375", "0.67016155", "0.6672114", "0.6542767", "0.6497178", "0.6497178", "0.6497178", "0.6497178", "0.6441999", "0.6441999", "0.6441999", "0.6441999", "0.6438662", "0.64343077", "0.6427127", "0.6427127", "0.6396734", "0.629882", "0.6285157", "0.616015", "0.6052063", "0.6042031", "0.6030429", "0.6030309", "0.601947", "0.60156846", "0.5974656", "0.5974556", "0.59515923", "0.5923835", "0.59031296", "0.589587", "0.58848363", "0.58709234", "0.5856122", "0.58339226", "0.58276594", "0.5790553", "0.5760359", "0.5751824", "0.57401067", "0.5708472", "0.5708374", "0.570405", "0.5700152", "0.5690787", "0.568561", "0.5670067", "0.56391764", "0.5632944", "0.56315976", "0.56304526", "0.56297517", "0.5614501", "0.56036747", "0.5595652", "0.55920494", "0.55884147", "0.55875283", "0.5587373", "0.5585077", "0.5567215", "0.5554397", "0.5550712", "0.55317765", "0.5525458", "0.5525458", "0.55198836", "0.5516966", "0.5505593", "0.549906", "0.5492659", "0.54818326", "0.5461817", "0.5455905", "0.5451705", "0.54412353", "0.5440351", "0.54309034", "0.5429561", "0.54224735", "0.5414548", "0.5412817", "0.54082334", "0.5405674", "0.5397396", "0.53817666", "0.53755254", "0.53614914", "0.53373504", "0.533069", "0.533069" ]
0.7181159
1
Method to remove a user from friends that is, to remove a bidirectional link that connects the two users.
def remove_friends(self, user1_index, user2_index): if user1_index >= self.num_users or user2_index >= self.num_users: raise ValueError( f"Number of users is {self.num_users}, but indices " f"{user1_index} and {user2_index} were requested." ) if self.users_hat[user1_index, user2_index] == 1: self.users_hat[user1_index, user2_index] = 0 elif self.is_verbose(): self.log(f"User {user2_index} was not following user {user1_index}") if self.users_hat[user2_index, user1_index] == 1: self.users_hat[user2_index, user1_index] = 0 elif self.is_verbose(): self.log(f"User {user1_index} was not following user {user2_index}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "def removeFriend(self, user):\n user = user if isinstance(user, MyPlexUser) else self.user(user)\n url = self.FRIENDUPDATE.format(userId=user.id)\n return self.query(url, self._session.delete)", "def unfriend(self, removee):\n\t\tremover_friend_list = self # person terminating the friendship\n\n\t\t# Remove friend from friend request\n\t\tremover_friend_list.remove_friend(removee)\n\n\t\t# Remove friend from the removeee friend list\n\t\tfriends_list = FriendList.objects.get(user=removee)\n\t\tfriends_list.remove_friend(self.user)", "def unfriend(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_friend(user_id, target_id)", "def remove_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_remove = get_id_from_username(request.form['remove_user'])\n if not friend_to_remove or friend_to_remove==user_id:\n return redirect(url_for('message.converse'))\n remove_friend_db(user_id, friend_to_remove)\n return redirect(url_for('message.converse'))", "def remove_relation(request, id):\n user = request.user\n relation = get_object_or_404(User, id=id)\n user.profile.relations.remove(relation)\n user.profile.friends.add(relation)\n messages.success(\n request,\n 'Family member removed to your friends list'\n )\n return redirect('profiles:my_friends')", "def delete_user(network, user):\n if user in network:\n del network[user]\n for u in network:\n connections = get_connections(network, u)\n if user in connections:\n i = connections.index(user)\n del connections[i]\n return network", "def delete_friend(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.friends.remove(friend)\n friend.profile.friends.remove(user)\n messages.success(\n request,\n 'User deleted from your friends list'\n )\n return redirect('profiles:profile')", "def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)", "def unfollow(self, user):\n if self.is_following(user):\n self.followed.remove(user)\n return self", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def remove(self, user_id):\n pass", "async def unlink(self, ctx):\n # Remove all link tokens and spotify details for this user\n remove_tokens(ctx.author.id)\n remove_spotify_details(ctx.author.id)\n await ctx.reply(\"All your linked accounts were removed, if you had any!\")", "def test_remove_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n f.friends.remove(u)\n self.assertIs(u in f.friends.all(), False)\n self.assertIs(f in u.friends.all(), False)", "def remove_candidate(self, user):\n self.assignment_related_users.filter(user=user).delete()\n inform_changed_data(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def user_unfollow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/destroy/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following[self.user_id].pop(user_id, None)\n return result[\"friendship_status\"][\"following\"] is False", "def disconnect_user(self, user):\n\t\tis_user_removed = False\n\t\tif user in self.users.all():\n\t\t\tself.users.remove(user)\n\t\t\tself.save()\n\t\t\tis_user_removed = True\n\t\treturn is_user_removed", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def unfollowUser(following):\n \n cur, user_id, con = initialise(3, True)\n cur.execute(\"DELETE FROM followers WHERE user = (SELECT username FROM users WHERE id = ?) AND following = ?\", (user_id, following))\n finish(con)", "def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)", "def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)", "def delete_user(self):\n\n User.user_list.remove(self)", "def unfollow(request, usertostopfollow):\n stop_follow = Member.objects.get(user__username=usertostopfollow)\n user = Member.objects.get(user=request.user)\n user.following.remove(stop_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "async def delete(\n self, user_id: Optional[int] = None, **kwargs\n ) -> friends.DeleteResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.delete\", params)\n model = friends.DeleteResponse\n return model(**response).response", "def view_removeConnection(self, user, tagA, tagB):\r\n key = int(md5(tagA).hexdigest(), 16) ^ int(md5(tagB).hexdigest(), 16)\r\n\r\n try:\r\n connection = user.connections.pop(key)\r\n except KeyError:\r\n raise InvalidRequest('Can not disconnect two unconnected '\r\n 'interfaces.')\r\n\r\n connection.dontNotifyOnDeath(user.connectionDied)\r\n connection.destroy()\r\n\r\n # TODO: Return some info about success/failure of request\r", "def delete_user(self, user):\n self.delete(user)", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")", "def remove_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # delete friend from user profile\n if not mock_db.remove_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when removing friend from the profile!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def unfriend(request, pk=None):\n # Check user id and friend id\n if int(request.user.id) == int(pk):\n return Response({'status': '400', 'code': 'E_SAME_USER',\n 'detail': code['E_SAME_USER']}, status=400)\n # Check 2 user is valid\n current_user = get_or_none(User, pk=request.user.id)\n friend = get_or_none(User, pk=pk)\n # if 1 or 2 user is not valid\n if current_user is None or friend is None:\n return Response({'status': '400', 'code': 'E_USER_NOT_FOUND',\n 'detail': code['E_USER_NOT_FOUND']}, status=400)\n # get connect of request user -> friend\n # from_user=friend.to_user, to_user=request.user\n current_connection = get_or_none(Friend, from_user=current_user, to_user=friend)\n if current_connection is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # get connect of friend to request user\n # reverse_connection = get_or_none(FriendConnect, user=friend, friend=current_user)\n #if reverse_connection is None:\n # return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n # 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Delete\n current_connection.delete()\n #reverse_connection.delete()\n # if every thing ok\n return Response({'status': '200', 'code': 'OK_UNFRIEND',\n 'detail': code['OK_UNFRIEND']}, status=200)", "def removeUserId(self, user_id):\n self.__register_user_ids.discard(user_id)", "def remove_user(self, user_id):\n if user_id in self:\n user = self[user_id]\n del self[user_id]\n return user", "async def remove(self, friend: Union[Player, 'Friend', str]):\n\t\tif not isinstance(friend, Friend):\n\t\t\tfriend = self.get_friend(friend)\n\n\t\t\tif friend is None:\n\t\t\t\treturn\n\n\t\tif friend.isSoulmate:\n\t\t\tsid = await self._client.sendCP(26, Packet())\n\t\t\tresult, error = 27, 36\n\t\telse:\n\t\t\tsid = await self._client.sendCP(20, Packet().writeString(friend.name.lower()))\n\t\t\tresult, error = 21, 30\n\n\t\tdef is_deletion(tc, packet):\n\t\t\treturn tc == result and packet.read32() == sid\n\n\t\ttc, packet = await self._client.wait_for('on_raw_cp', is_deletion, timeout=5)\n\t\tresult = packet.read8()\n\n\t\tif result != 1:\n\t\t\traise CommunityPlatformError(error, result)\n\n\t\tif friend not in self.friends:\n\t\t\treturn\n\n\t\tif friend == self.soulmate:\n\t\t\tself.soulmate = None\n\t\tself.friends.remove(friend)", "def unfollow_friend(username):\n\n if not g.user:\n print \"401\"\n abort(401)\n whom_id = get_user_id(username)\n print whom_id\n if whom_id is None:\n abort(404)\n unfollow_query(whom_id)\n flash('You are no longer following \"%s\"' % username)\n name = {'name of unfollowing user': username}\n ############### REDIS cache invalidate #####################\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)", "def removeFollower(self,id):\n # DELETE /followers/$id\n pass", "def unfollow(self, followerId: int, followeeId: int) -> None:\n following = self.user_followed[followerId]\n if followeeId in following:\n following.remove(followeeId)\n self.user_followed[followerId] = following", "def unfollow_user(request, course_id, followed_user_id):\r\n user = cc.User.from_django_user(request.user)\r\n followed_user = cc.User.find(followed_user_id)\r\n user.unfollow(followed_user)\r\n return JsonResponse({})", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def fusion_api_remove_user(self, name=None, uri=None, api=None, headers=None):\n return self.user.delete(name, uri, api, headers)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def unfollow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('delete from follower where who_id=? and whom_id=?',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are no longer following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))", "def remove_user(users, curr_username, user_role, request_ip):\n #TODO: error checking\n log_connector.add_log('DELETE USER', \"Removed {} user(s)\".format(len(users)), curr_username, user_role, request_ip)\n user_connector.remove_user(users)", "def del_user(request):\r\n mdict = request.matchdict\r\n\r\n # Submit a username.\r\n del_username = mdict.get('username', None)\r\n\r\n if del_username is None:\r\n LOG.error('No username to remove.')\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: No username to remove.',\r\n })\r\n\r\n u = UserMgr.get(username=del_username)\r\n\r\n if not u:\r\n LOG.error('Username not found.')\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': 'User not found.',\r\n })\r\n\r\n try:\r\n # First delete all the tag references for this user's bookmarks.\r\n res = DBSession.query(Bmark.bid).filter(Bmark.username == u.username)\r\n bids = [b[0] for b in res]\r\n\r\n qry = bmarks_tags.delete(bmarks_tags.c.bmark_id.in_(bids))\r\n qry.execute()\r\n\r\n # Delete all of the bmarks for this year.\r\n Bmark.query.filter(Bmark.username == u.username).delete()\r\n DBSession.delete(u)\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Removed user: ' + del_username\r\n })\r\n except Exception, exc:\r\n # There might be cascade issues or something that causes us to fail in\r\n # removing.\r\n LOG.error(exc)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'Bad Request: ' + str(exc)\r\n })", "def delete_user():", "def _remove_connection(user, friend):\n # stat\n removed_count = 0\n updated_count = 0\n dep_count = 0\n # global flags\n updating = False\n updating_rev = False\n # conn's without shortest paths\n deps_lost = []\n deps_rev_lost = []\n # current\n current = Degree.objects.filter(from_user=user, to_user=friend)\n current_rev = Degree.objects.filter(to_user=user, from_user=friend)\n current.delete()\n current_rev.delete()\n\n\n # find all dependants\n # path traversing\n #deps = Degree.objects.extra(where=[\"path like '%%\"+str(user.id)+\"%%\"+str(friend.id)+\"%%'\"]).order_by('distance')\n #deps_rev = Degree.objects.extra(where=[\"path like '%%\"+str(friend.id)+\"%%\"+str(user.id)+\"%%'\"]).order_by('distance')\n deps = Degree.objects.filter(path__iregex=r'(,|\\A)%s,%s(,|\\Z)' % (user.id, friend.id))\n deps_rev = Degree.objects.filter(path__iregex=r'(,|\\A)%s,%s(,|\\Z)' % (friend.id, user.id))\n\n if deps.count():\n updating = True\n dep_count += deps.count()\n if deps_rev.count():\n updating_rev = True\n dep_count += deps_rev.count()\n\n \"\"\"\n logic here:\n ask all nighbours if they have path to old node\n if no, wait until loop is finished\n if someone made new connection\n look again\n \"\"\"\n deps = list(deps)\n deps_lost = list(deps)\n i=1\n while updating:\n i+=1\n # flag for global update\n # we need to run all dependants at least once\n y=0\n updating = False\n for dep in deps:\n #deps.remove(dep)\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n # hooray! shortest pass\n #if '%s,%s' % (user.id, friend.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), shortest.path):\n # check length (and current path)\n #if '%s,%s' % (user.id, friend.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), dep.path):\n # if we have wrong path\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n deps_lost.remove(dep)\n updating = True\n else:\n # path already updated from neighbour\n # we need to check current length\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updating = True\n except Degree.DoesNotExist:\n continue\n except:\n logger = logging.getLogger(__name__)\n logger.warning('Error in updating, id of degree = %s' % dep.id)\n raise\n # if no shortest found\n # append to lost list\n #if not updated:\n #deps_lost.append(dep)\n\n deps_rev = list(deps_rev)\n deps_rev_lost = list(deps_rev)\n i=1\n while updating_rev:\n i+=1\n y=0\n updating_rev = False\n for dep in deps_rev:\n y+=1\n #deps_rev.remove(dep)\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n #if '%s,%s' % (friend.id, user.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), shortest.path):\n #if '%s,%s' % (friend.id, user.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), dep.path):\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n deps_rev_lost.remove(dep)\n else:\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n except Degree.DoesNotExist:\n continue\n except:\n logger = logging.getLogger(__name__)\n logger.warning('Error in updating rev, id of degree = %s' % dep.id)\n raise\n\n\n #if not deps_lost and not deps_rev_lost:\n\n # remove all connections\n # we can't do that\n # since although we have losters\n # there still could be connection\n # exmple: triangle connection\n #removed_count += len(deps_lost)\n #for conn in deps_lost:\n #conn.delete()\n #deps_lost.remove(conn)\n #else:\n # restore original\n restored = 0\n neighs = Degree.objects.filter(from_user=user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=friend)\n #if '%s,%s' % (user.id, friend.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), shortest.path):\n obj, created = Degree.objects.get_or_create(from_user=user, to_user=friend)\n if created:\n obj.path=\"%s,%s\" % (user.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n restored +=1\n else:\n if shortest.distance + 1 < obj.distance:\n obj.path=\"%s,%s\" % (user.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n except Degree.DoesNotExist:\n continue\n\n # reverse\n\n neighs = Degree.objects.filter(from_user=friend, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=user)\n #if '%s,%s' % (friend.id, user.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), shortest.path):\n obj, created = Degree.objects.get_or_create(from_user=friend, to_user=user)\n if created:\n obj.path=\"%s,%s\" % (friend.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n restored +=1\n else:\n if shortest.distance + 1 < obj.distance:\n obj.path=\"%s,%s\" % (friend.id, shortest.path)\n obj.distance = shortest.distance + 1\n obj.save()\n\n except Degree.DoesNotExist:\n continue\n\n # if we still have someone left\n # check last time through neigh\n if deps_lost:\n for dep in deps_lost:\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n # hooray! shortest pass\n #if '%s,%s' % (user.id, friend.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), shortest.path):\n # check length (and current path)\n #if '%s,%s' % (user.id, friend.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (user.id, friend.id), dep.path):\n # if we have wrong path\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n # FIXME\n # THis will not remove all elements\n # maybe we can use\n # for item in mylist[:]:\n # but needs to be tested\n deps_lost.remove(dep)\n updating = True\n else:\n # path already updated from neighbour\n # we need to check current length\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updating = True\n except Degree.DoesNotExist:\n continue\n\n if deps_rev_lost:\n for dep in deps_rev_lost:\n neighs = Degree.objects.filter(from_user=dep.from_user, distance = 0)\n for neigh in neighs:\n try:\n shortest = Degree.objects.get(from_user=neigh.to_user, to_user=dep.to_user)\n #if '%s,%s' % (friend.id, user.id) not in shortest.path:\n if not re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), shortest.path):\n #if '%s,%s' % (friend.id, user.id) in dep.path:\n if re.match(r'.*(,|\\A)%s,%s(,|\\Z).*' % (friend.id, user.id), dep.path):\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n # FIXME\n # look above\n deps_rev_lost.remove(dep)\n else:\n if shortest.distance + 1 < dep.distance:\n dep.path = \"%s,%s\" % (dep.from_user.id, shortest.path)\n dep.distance = shortest.distance + 1\n dep.save()\n updated_count += 1\n updating_rev = True\n except Degree.DoesNotExist:\n continue\n\n # if we still have losters,\n # delete them\n\n if deps_lost:\n removed_count += len(deps_lost)\n for conn in deps_lost:\n conn.delete()\n #deps_lost.remove(conn)\n\n if deps_rev_lost:\n removed_count += len(deps_rev_lost)\n for conn in deps_rev_lost:\n\n conn.delete()\n #deps_rev_lost.remove(conn)\n\n logger = logging.getLogger(__name__)\n logger.warning('We removed: %s records, updated: %s, restored %s, dependants: %s/2 ' % (removed_count, updated_count, restored, dep_count))\n\n return True", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def unfollow(self, followerId: int, followeeId: int) -> None:\n self.users[followerId].discard(followeeId)", "def remove_user(self, username):\n del self.user_table[username]", "def rm_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.srem(key, user_id)", "def delete_user():\n #TODO user delete\n pass", "def unlink(self, link_id):", "def unfollow(self, followerId, followeeId):\n if followerId in self.follows:\n if followeeId in self.follows[followerId]:\n self.follows[followerId].remove(followeeId)", "def del_user(self, username):\n pass", "def fb_deauth(self, request):\n signed_request = request.data.get('signed_request')\n if signed_request:\n parsed_signed_request = facebook_controller.parse_signed_request(signed_request)\n facebook_user_id = parsed_signed_request.get('user_id')\n if facebook_user_id:\n facebook_controller.delete_linked_facebook_account(facebook_user_id)\n return Response('OK')", "def unfollow(self, followerId, followeeId):\n # 在user_pool 中查询这个用户 follower\n if self.user_pool[followerId]:\n # 如果在用户的关注列表中才删除\n if followeeId in self.user_pool[followerId].follows:\n self.user_pool[followerId].follows.remove(followeeId)", "def remove(self, redditor: str | praw.models.Redditor):\n data = {\"name\": str(redditor), \"type\": self.relationship}\n url = API_PATH[\"unfriend\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url, data=data)", "async def unfriend(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/unfriend', method='post',\n data=data)\n return e", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.users:\n self._create_user(followeeId)\n if followerId not in self.users:\n self._create_user(followerId)\n if followerId != followeeId and followeeId in self.users[followerId]:\n self.users[followerId].remove(followeeId)", "async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")", "def _RemoveUsers(self, remove_users):\n for username in remove_users:\n self.utils.RemoveUser(username)\n self.user_ssh_keys.pop(username, None)\n self.invalid_users -= set(remove_users)", "def remove(self, update, context):\n\n telegram_user = update.message.from_user\n if len(context.args) != 1:\n message = (\n \"To remove a subscriptions from your list please use /remove <entryname>. To see all your \"\n \"subscriptions along with their entry names use /list ! \"\n )\n update.message.reply_text(message)\n return\n\n entry = self.db.get_user_bookmark(telegram_id=telegram_user.id, alias=context.args[0])\n if entry:\n self.db.remove_user_bookmark(telegram_id=telegram_user.id, url=entry[0])\n message = \"I removed \" + context.args[0] + \" from your subscriptions!\"\n update.message.reply_text(message)\n else:\n message = (\n \"I can not find an entry with label \"\n + context.args[0]\n + \"in your subscriptions! Please check your subscriptions using /list and use the delete command \"\n \"again! \"\n )\n update.message.reply_text(message)", "def del_user(self, name):\n del self.users[irc.strings.IRCFoldedCase(modules.trim_nick(name))]", "def remove_users(caller, role, *users):\r\n # can always remove self (at this layer)\r\n if not(len(users) == 1 and caller == users[0]):\r\n _check_caller_authority(caller, role)\r\n role.remove_users(*users)", "def delete(self, request, username):\n\n # Retrieve the user from the user table if the user exists\n try:\n user_to_unfollow = User.objects.get(username=username)\n current_user = request.user\n # If a user is trying to unfollow themselves then stop the request\n if user_to_unfollow.profile.id == current_user.profile.id:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['CANNOT_UNFOLLOW_SELF']},\n status=status.HTTP_400_BAD_REQUEST\n )\n # Check if the user to be unfollowed\n # is in the current users following list\n try:\n profile_id = user_to_unfollow.profile.id\n user_being_followed = CustomFollows.objects.get(\n to_profile_id=profile_id,\n from_profile_id=current_user.profile.id\n )\n # If not tell the user the request can't happen\n # Because they don't follow the user\n except Exception as e:\n return Response(\n {\n \"errors\": FOLLOW_USER_MSGS['USER_UNFOLLOWED_ALREADY']\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n # Otherwise unfollow the user as requested\n current_user.profile.follows.remove(user_to_unfollow.profile)\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['USER_UNFOLLOW_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_200_OK\n )\n # End request if we cannot find the user we want to unfollow.\n except User.DoesNotExist:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['USER_NOT_FOUND']},\n status=status.HTTP_404_NOT_FOUND\n )", "def view_remove_user(self, user, username):\r\n user.realm._checker.removeUser(username)", "def unfollow_user(username):\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n mongo.db.users.update({'_id': g.user['_id']}, {\n '$pull': {'follows': whom_id}})\n flash('You are no longer following \"%s\"' % username)\n if redis_obj.get(user_ID):\n return redirect(url_for('user_timeline', username=username, userId=pickle.loads(redis_obj.get(user_ID))))\n else:\n redis_obj.delete(session['user_id'])\n print \"Invalidating cache after Unfollow\"\n return redirect(url_for('user_timeline', username=username))", "def remove_user(self, user: discord.User) -> bool:\n\t\tif not self.user_has_entry(user):\n\t\t\treturn False\n\t\t\n\t\tdef data_interaction(cur: Cursor):\n\t\t\tsql = f\"DELETE FROM {StrikeConsts.STRIKE_TABLE} WHERE id=%s;\"\n\t\t\tcur.execute(sql, (user.id,))\n\t\t\t\n\t\t\treturn [True]\n\t\t\t\n\t\treturn self.connect_and_execute(data_interaction)[1][0]", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def destroy(self):\r\n for user in self._users.copy():\r\n user.destroy()\r\n\r\n assert len(self._users) == 0\r\n\r\n self._interface.unregisterConnection(self)\r\n self._interface = None\r\n\r\n self._protocol.unregisterConnection(self)\r\n self._protocol = None", "def remove_users(self, user_ids, nid=None):\n if self.cookies is None:\n raise NotAuthenticatedError(\"You must authenticate before making any other requests.\")\n\n nid = nid if nid else self._nid\n\n content_url = self.base_api_url\n content_params = {\"method\": \"network.update\"}\n content_data = {\n \"method\": \"network.update\",\n \"params\": {\n \"id\": nid,\n \"remove_users\": user_ids\n }\n }\n\n r = requests.post(\n content_url,\n data=json.dumps(content_data),\n params=content_params,\n cookies=self.cookies\n ).json()\n\n if r.get(u'error'):\n raise Exception(\"Could not remove users.\\n{}\".format(r))\n else:\n return r.get(u'result')", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def unfollow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 1:\n self.users_hat[following_index, user_index] = 0\n elif self.is_verbose():\n self.log(f\"User {following_index} was not following user {user_index}\")", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId not in self.users.keys() or followeeId not in self.users.keys():\n return\n if followeeId not in self.users[followerId].followees.keys():\n return\n self.users[followerId].followees.pop(followeeId)\n\n\n\n # Your Twitter object will be instantiated and called as such:\n # obj = Twitter()\n # obj.postTweet(userId,tweetId)\n # param_2 = obj.getNewsFeed(userId)\n # obj.follow(followerId,followeeId)\n # obj.unfollow(followerId,followeeId)", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId != followeeId and followeeId in self.followList.get(followerId, []):\n self.followList[followerId].remove(followeeId)\n # print(self.followList)", "def remove_member(self, group_id: str, user_id: str):\n # If successful, this method returns 204 No Content response code.\n # It does not return anything in the response body.\n # Using resp_type=\"text\" to avoid parsing error in the calling method.\n self.ms_client.http_request(\n method='DELETE',\n url_suffix=f'groups/{group_id}/members/{user_id}/$ref', resp_type=\"text\")", "def remove_member(self, db: Session, *, room: Room, user: User) -> Room:\n members = [x for x in room.members if x.id != user.id]\n return self.update(db=db, db_obj=room, obj_in={\"members\": members})", "def remove_user(self, username):\n\n row = self.c.execute(\"SELECT * FROM profiles WHERE name =?\",\n (username,))\n for i in row:\n user = i[1]\n print(user)\n if user == username:\n self.c.execute(\"SELECT id FROM profiles WHERE name=?\",\n (username,))\n i_d = self.c.fetchone()[0]\n self.c.execute(\"DELETE FROM events WHERE user_id=?\", (i_d,))\n self.c.execute(\"DELETE FROM profiles WHERE name=?\", (username,))\n self.conn.commit()\n return True\n else:\n print ('User not found.')", "def remove_user(user_id):\n response_object = {'status': 'success'}\n models.User.query.filter(models.User.id == user_id).delete()\n database.session.commit() # pylint: disable=no-member\n\n return jsonify(response_object)", "def remove(self, uid):\n marker = object()\n name = self._reverse.get(uid, marker)\n if name is not marker:\n del self._reverse[uid]\n try:\n del self._forward[name]\n except KeyError:\n # If it isn't there, good, that is the outcome we wanted,\n # right?\n pass", "def remove_friend(request):\n collected_values = {}\n\n if request.method != 'POST':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n \n uid = request.POST[\"user_id\"]\n oid = request.POST[\"oid\"]\n token = request.POST[\"token\"]\n\n # Check auth\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n user_raw_query = \"SELECT friends, friend_not_to_add from linx_luser WHERE user_id = {}\".format(uid)\n other_raw_query = \"SELECT friends, friend_not_to_add from linx_luser WHERE user_id = {}\".format(oid)\n with connection.cursor() as cursor:\n cursor.execute(user_raw_query)\n values = cursor.fetchall()\n user_friends = values[0][0]\n if user_friends == None:\n user_friends = \"\"\n user_blocked = values[0][1]\n if user_blocked == None:\n user_blocked = \"\"\n\n cursor.execute(other_raw_query)\n values = cursor.fetchall()\n other_friends = values[0][0]\n if other_friends == None:\n other_friends = \"\"\n other_blocked = values[0][1]\n if other_blocked == None:\n other_blocked = \"\"\n\n friendsr = user_friends.replace(\"[\", \"\").replace(\"]\", \"\")\n split_user_friends = friendsr.split(\",\")\n split_user_friends.remove(oid)\n new_user_friends = \"[\" + \",\".join(split_user_friends) + \"]\"\n \n block_listr = user_blocked.replace(\"[\", \"\").replace(\"]\", \"\")\n block_list = block_listr.split(\",\")\n if block_list is []:\n block_list = [oid]\n else:\n block_list.append(oid)\n new_user_block = \"[\" + \",\".join(block_list) + \"]\"\n\n ofriendsr = other_friends.replace(\"[\", \"\").replace(\"]\", \"\")\n other_friends = ofriendsr.split(\",\") \n other_friends.remove(uid)\n new_other_friends = \"[\" + \",\".join(other_friends) + \"]\"\n\n block_listr2 = other_blocked.replace(\"[\", \"\").replace(\"]\", \"\")\n block_list2 = block_listr2.split(\",\")\n if block_list2 is []:\n block_list2 = [uid]\n else:\n block_list2.append(uid)\n new_other_block = \"[\" + \",\".join(block_list2) + \"]\"\n \n user_raw_query2 = \"UPDATE linx_luser SET friends = \\'{}\\', friend_not_to_add = \\'{}\\' WHERE user_id = {}\".format(new_user_friends, new_user_block, uid)\n other_raw_query2 = \"UPDATE linx_luser SET friends = \\'{}\\', friend_not_to_add = \\'{}\\' WHERE user_id = {}\".format(new_other_friends, new_other_block, oid)\n\n cursor.execute(user_raw_query2)\n cursor.execute(other_raw_query2)\n\n collected_values[\"uid\"] = uid\n collected_values[\"oid\"] = oid\n collected_values[\"token\"] = token\n collected_values[\"raw_query_1\"] = user_raw_query2\n collected_values[\"raw_query_2\"] = other_raw_query2\n\n LOGGER.info(\"Block user request: %v\", collected_values)\n return JsonResponse(collected_values, status=200)", "def removeHomeUser(self, user):\n user = user if isinstance(user, MyPlexUser) else self.user(user)\n url = self.HOMEUSER.format(userId=user.id)\n return self.query(url, self._session.delete)", "def unregister_user(self, userID: str):\n requests.post('https://' + self.serverIp + '/unregister/' + userID, verify=False)", "def remove_edge(self, rtype, node1, node2):\n self.nodes[node1].remove_relation(rtype,node2)\n self.nodes[node2].remove_predecessor(rtype,node1)\n self.dirty = True", "def remove_link():", "def unfollow(source_id, destination_id):\n Forward.objects.filter(source_id=source_id,\n destination_id=destination_id).delete()\n Backward.objects.filter(destination_id=destination_id,\n source_id=source_id).delete()", "def removeUser(self, fullName):\n logger.debug(\"Func: removeUser\")\n\n # old Name removeUser\n currentDB = self._loadUsers()\n del currentDB[fullName]\n self._dumpJson(currentDB, self._pathsDict[\"usersFile\"])\n self._usersDict = currentDB\n return None, None", "def unfollow(self, followerId, followeeId):\n if followerId in self.follow_map and followeeId in self.follow_map[followerId]:\n self.follow_map[followerId].remove(followeeId)", "def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))", "def remove_user(user_id):\n user = Users.query.get(user_id)\n if user_id in [0, 1]:\n return 'Removal of default User #%s (%s) is forbidden.' % (user_id, user.login), 'warning'\n db_session.delete(user)\n db_session.commit()\n return 'User #%s (%s) has been deleted.' % (user_id, user.login), 'success'", "def delUser(self, id):\n del self.users[id]\n if id in self._nameCache:\n del self._nameCache[self._nameCache[id]]\n del self._nameCache[id]\n if id in self._hostmaskCache:\n for hostmask in self._hostmaskCache[id]:\n del self._hostmaskCache[hostmask]\n del self._hostmaskCache[id]\n self.flush()" ]
[ "0.7632799", "0.7582847", "0.755684", "0.730245", "0.71368355", "0.7024519", "0.69181836", "0.676393", "0.6699931", "0.667411", "0.6658038", "0.6638588", "0.66359735", "0.6634647", "0.6563862", "0.6552684", "0.65465987", "0.65465987", "0.65465987", "0.6525971", "0.65162206", "0.6515509", "0.6469655", "0.64527446", "0.6427119", "0.6425656", "0.6416702", "0.63188636", "0.6305449", "0.62774086", "0.6250204", "0.62453765", "0.62435484", "0.6235968", "0.62180984", "0.62089235", "0.6197824", "0.6162416", "0.6143596", "0.6111347", "0.6103929", "0.60852206", "0.6072085", "0.60630864", "0.60530275", "0.6051188", "0.6032866", "0.6023754", "0.6014634", "0.6010318", "0.6008247", "0.5992088", "0.59890056", "0.5969203", "0.596582", "0.5944699", "0.5916369", "0.590533", "0.5896504", "0.58892727", "0.5888137", "0.58782095", "0.58777183", "0.58707124", "0.58657813", "0.5859262", "0.5842374", "0.5815432", "0.5777801", "0.5773379", "0.5770068", "0.57696295", "0.57684505", "0.575114", "0.57496977", "0.57489073", "0.5745378", "0.57450944", "0.57430524", "0.5725597", "0.57214576", "0.5704755", "0.5698378", "0.56896466", "0.5685091", "0.5676379", "0.5675734", "0.5672402", "0.5666067", "0.5665153", "0.5657567", "0.565544", "0.5654743", "0.56463766", "0.5640048", "0.56398934", "0.5639799", "0.5636122", "0.5628569", "0.5627973" ]
0.6983191
6
Connect to a specific port
def connect(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((src_addr, src_port)) except: print("Error connecting to {}:{}".format(src_addr, src_port)) return None try: print("Sending stream info") sock.sendall(struct.pack('<iBi', 5, 1, stream_id)); except: print("Error: Stream rejected") return None print("Successfully connected to host") return sock
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, host, port):\n pass", "def connect(self, port=None, options=None):\n pass", "def connect(self,ip,port):\n return self.network.connect(ip,port)", "def connect(self, host=None, port=None):\n host = self.host if host is None else host\n port = self.port if port is None else port\n self.socket.connect(host, port)", "def tryconnect(name, port):\n return port_talker.TCPTalk(name, port, 2, '', None, 0, 1) # use ext. resolver", "def connect(self, connection_host, connection_port):\n self.connection.connect((connection_host, connection_port))", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def connect_to_server(host, port) -> socket.SocketIO:\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (host, port)\n print('[CLIENT LOG] connecting to {} port {}'.format(host,port)) \n sock.connect(server_address)\n return sock", "def port():", "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "def port_connection(self, sock):\n sock.bind(('', 0)) # Bind to OS-assigned available & random port.\n sock.listen(1)", "def connect(self,ip,port):\n import time\n import socket\n\n try:\n self.socket_reference.connect((ip, port))\n except socket.error:\n self.close()\n reload(socket)\n raise CommClientException(\"Cannot connect to \" + ip + \":\" + str(port))", "def connect(self,addr=None,port=None):\n\n self.type = 'connect'\n\n if addr != None:\n self.remote_location = (addr,int(port))\n try:\n s = socket(AF_INET,SOCK_STREAM)\n s.settimeout(1.0)\n s.connect(self.remote_location)\n self.status = 'connected'\n s.settimeout(0.0)\n self.sock = s\n except error as e:\n self.errno = e.errno\n self.status = 'closed'", "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(5)\n try:\n conn.connect((target, port))\n return conn\n except socket.timeout as e:\n print(e)\n return None", "def connect(self, device_ip, device_port=DEFAULT_PORT):\n return", "def connect(self, host, port):\n if self._connectedTo is not None:\n raise ValueError(\"Already connected\")\n self._connectedTo = (host, port)", "def socket_port(ip, port):\n socket.setdefaulttimeout(3) \n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = s.connect_ex((ip, port))\n if result == 0:\n print(ip, u':', port, u'port is occupied')\n return False\n return True\n except Exception as error:\n print('error:', error)\n return False", "def connect(self, host, port=6667):\n\t\tprint(host)\n\t\tprint(port)\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n\t\tself.s = ssl.wrap_socket(sock)\n\t\tself.s.connect((host, port))", "def connect(host, port = DEFAULT_SERVER_PORT):\n return factory.connect(host, port, SlaveService)", "def connect(host: str, port: int):\n print('Connecting to the server...')\n print(cmd.RESP_OK, type(cmd.RESP_OK))\n tn = telnetlib.Telnet(host = host, port = port)\n code, params = cmd.serv_read_resp(tn)\n if code != cmd.RESP_OK:\n print(f'Connection problem. {code, params}')\n exit(0)\n print(f'{params[0]}\\n')\n return tn", "def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)", "def connect(self, address, port):\n address = socket.getfqdn(address)\n self.channel = \"http://\" + str(address) + \":\" + str(port)", "def connect_to_server(host, port):\n # Create a socket to use IPv4 and TCP stream communication\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Connect to the server\n client_socket.connect( (host, port) )\n return client_socket", "def _connect_to_mongo_port(port):\r\n \r\n sock = socket.socket()\r\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\r\n sock.settimeout(1)\r\n sock.connect((\"localhost\", int(port)))\r\n sock.close()", "def connect(self, cmd, window, **kwargs):\n try:\n kwargs['server']\n except KeyError:\n window.server_event('/%s syntax: /%s servername [port] [nickname]' % cmd)\n return\n try:\n kwargs['port']\n try: \n int(kwargs['port'])\n except ValueError:\n raise KeyError\n except KeyError:\n kwargs['port'] = 6667\n try:\n kwargs['nickname']\n except KeyError:\n kwargs['nickname'] = \"circe\"\n self.connection.connect(**kwargs)", "def connect( self, str_address, port_no ):\r\n\r\n self._socket.connect( str_address, port_no )\r\n\r\n # return None \r", "def connect(host, port, service=VoidService, config={}, ipv6=False, keepalive=False):\n s = SocketStream.connect(host, port, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)", "def connect(self, host, port):\n\t\tif self.is_server:\n\t\t\traise socket.error(\"\"\"A server socket was used in place of a client\n\t\t\t\t\t\t\t socket for connecting\"\"\")\n\n\t\tself.socket.connect((host, port))\n\t\tself.socket_connected = True", "def connect(self, host, port):\n\n self.connect_count = self.RETRY_COUNT\n timeout = None if self.debug_mode else FvpConnector.MAX_IDLE_TIME\n\n while not self.has_connect_timed_out():\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.settimeout(timeout)\n self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n self.sock.connect((host, port))\n return\n except ConnectionRefusedError:\n time.sleep(FvpConnector.RETRY_PERIOD)\n\n raise Exception(\"Failed to connect to FVP\")", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def connect(self):\n try:\n self.sock.connect((self.hostname, self.port))\n print 'connected to ' + self.hostname\n except socket.gaierror as e:\n print(\"Recieved error when connecting to \" + str((self.hostname, self.port)))\n raise e", "def connect(self, host, port=6667, use_ssl=False):\n self.log('@ Connecting to %s port %d' % (host, port))\n\n self.sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.sk.connect((host, port))\n self.log('@ Connected')\n self.connected = True\n self.heartbeat.start()\n self._callback('on_connected')", "def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))", "def connect(self, host=HOST, port=PORT, timeout=10):\r\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self._socket.connect((host, port))\r\n if timeout is not None:\r\n self._socket.settimeout(timeout)\r\n logger.info('Connected to: %s...', repr((host, port)))", "def connect(self):\n print(\"Connecting\")\n self.socket.connect((self.ip, self.port))\n self.startReading()", "def port_scan(host, port):\n # AF_INET specifies ipv4, SOCK_STREAM for TCP\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n sock.connect((host, port))\n except socket.error:\n return False\n except KeyboardInterrupt:\n utils.cprint('Scanning interrupted')\n sys.exit()\n except socket.gaierror:\n utils.cprint('Hostname could not be resolved')\n sys.exit()\n else:\n return port\n finally:\n sock.close()", "def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)", "def mpd_connect(host=_MPD_HOST, port=_MPD_PORT):\n \n _mpd_client.connect(host, port)", "def _connect(self, port):\n return propar.instrument(port)", "def checkPort(self, port, servicename, hint):\n print (\"Checking remote port %s/tcp (%s)\" % (port, servicename)).ljust(65, '.'),\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect( (self._target,int(port)) )\n s.close()\n print \"[ OK ]\"\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR: %s\" % err\n print \"Port %s/tcp seems to be closed\" % port\n print hint\n sys.exit(0)", "def connect(self, host: str, port: int, timeout: float) -> None:\n self.socket.settimeout(timeout)\n self.socket.connect((host, port))\n self.socket.settimeout(0)", "def connect( self, port = 1883 ):\n logging.info( \"Connecting to broker {}:{}\".format( self.broker_ip, port ) )\n\n try:\n self.client.connect( self.broker_ip, port = port )\n time.sleep(2) # Wait to connect\n\n except Exception as error:\n print( error )", "def port(self) -> int:", "def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))", "def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()", "def get_open_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((\"\", 0))\n s.listen(1)\n port = s.getsockname()[1]\n s.close()\n return port", "def connect_cups(hostname, port):\n cups.setServer(hostname)\n cups.setPort(port)\n return(cups.Connection())", "def connect(self, host, port):\n logging.debug(\"Connecting to %s:%i\", host, port)\n self._hasError = False\n self.tcpsocket = QTcpSocket()\n self.tcpsocket.error.connect(self.processError)\n self.tcpsocket.connected.connect(self._connected)\n self.tcpsocket.connected.connect(lambda: self._stopWaiting.emit())\n self.tcpsocket.readyRead.connect(self.receive)\n\n self.tcpsocket.connectToHost(host, port)\n self.waitForConnection()", "def open_tunnel(self, serial_no, port=19020):\n return self.open(ip_addr='tunnel:' + str(serial_no) + ':' + str(port))", "def connect(self, host, port, uri, timeout):\n _abstract()", "def connect(self, host, port, uri, timeout):\n _abstract()", "def run(port):\n run(host=config.HOST, port=port)", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def connect(self, ip: str, port: int=25565) -> bool:\n if self.is_auth:\n try: \n self.connection = Connection(ip, port, auth_token=self.authToken)\n self.connection.connect()\n # self.connection.register_packet_listener(self.chat, clientbound.play.ChatMessagePacket)\n self.is_connected = True\n except (YggdrasilError, Exception): return False\n \n return True\n \n return False", "def SCPI_sock_connect(ipaddress,port=5025):\n\n try:\n session=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #session.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n #session.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, 0)\n session.connect((ipaddress,port))\n except IOError:\n print( \"Failed to connect to the instrument, pleace check your IP address\" )\n return\n return session", "def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()", "def createConnection(addr):\r\n\r\n # cast port number to integer\r\n addr = (addr[0],int(addr[1]))\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.settimeout(5)\r\n try:\r\n s.connect(addr)\r\n except (socket.timeout, ConnectionRefusedError):\r\n return None\r\n return s", "async def connect(\n self, host: str, port: int, use_tls: bool = False, loop=None\n ) -> None:\n\n self.logger.info('Connecting to {}:{}'.format(host, port))\n\n self.secure = use_tls\n connection = asyncio.open_connection(host, port, ssl=use_tls, loop=loop)\n try:\n self.reader, self.writer = await connection\n except Exception as exception:\n self.logger.error('Disconnected', exception)\n self.irc_disconnected(exception)\n return\n\n await self.connected()", "def connect(self):\n try:\n if not self.serial.isOpen():\n self.serial = serial.Serial(\n self.port, \n self.baudrate, \n timeout=self.timeout, \n rtscts=self.hardware_flagging, \n xonxoff=self.software_flagging\n )\n print(\"connected to %s\") % (self.port)\n except serial.SerialException as e:\n msg = \"unable to connect to %s\" % (self.port)\n raise Exception(msg, e)", "def connect(self):\n self.conn.connect()", "def connect(self, connID, addr):\r\n return self.callRemote('connect', connID, addr)", "def connect(self, timeout=1.0):\n if self.socket:\n self.socket.close()\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(timeout)\n self.socket.connect((self.ip, self.port))", "def new_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n for i in range(12042, 16042):\n try:\n s.bind(('127.0.0.1', i))\n s.close()\n return i\n except socket.error, e:\n pass\n raise Exception('No local port available')", "def connect(self):\n\n import serial\n\n if self.addr == None:\n self.addr = self.get_EFu_addr()\n\n self.ser = serial.Serial(self.addr, 115200, timeout=1)\n if self.ser.isOpen():\n print('Opened port: {}'.format(self.addr))\n else:\n raise RuntimeError('Failed to open the serial port: {}'.format(self.addr))", "def is_port_listening(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n result = s.connect_ex((\"127.0.0.1\", port))\n return result == 0", "def connect(self, server, port):\n print 'connecting to \"%s\" on port \"%s\"...' % (server.name, port),\n self.server = server\n self.ctx = server.context()\n self.port = port\n p = self.packet()\n p.open(port)\n p.baudrate(38400)\n p.read() # clear out the read buffer\n p.timeout(TIMEOUT)\n yield p.send()", "def __TCP_connect(self, ip, port_number, message):\n # Initialize the TCP socket object based on different operating systems.\n # All systems except for 'Windows' will be treated equally.\n curr_os = platform.system()\n if curr_os == 'Windows':\n TCP_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n TCP_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n TCP_sock.settimeout(self.__timeout)\n else:\n TCP_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n TCP_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n TCP_sock.settimeout(self.__timeout)\n\n b_message = message.encode('utf-8', errors='replace')\n\n # Initialize a UDP socket to send scanning alert message if there exists an non-empty message\n UDP_sock = None\n try:\n if message:\n UDP_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n UDP_sock.sendto(b_message, (ip, int(port_number)))\n\n result = TCP_sock.connect_ex((ip, int(port_number)))\n if message and result == 0:\n TCP_sock.sendall(b_message)\n\n # If the TCP handshake is successful, the port is OPEN. Otherwise it is CLOSE\n if result == 0:\n return port_number, 'OPEN'\n else:\n return port_number, 'CLOSE'\n\n except socket_error as e:\n # Failed to perform a TCP handshake means the port is probably close.\n return port_number, 'CLOSE'\n finally:\n if UDP_sock:\n UDP_sock.close()\n TCP_sock.close()", "def startup(self, port, ip_address = None, callback = None):\n if self.socket is None:\n if ip_address is None:\n thread_target = self._wait_for_connection\n else:\n thread_target = self._connect_to_peer\n\n t = Thread(target = thread_target, args = (port, ip_address))\n t.start()\n\n if callback is None:\n t.join()\n else:\n callback()", "def connect(self, address, port_number):\n while True:\n try:\n print(\"Connecting to the game server...\")\n # Connection time out 15 seconds\n self.client_socket.settimeout(15)\n # Connect to the specified host and port\n self.client_socket.connect((address, int(port_number)))\n # Return True if connected successfully\n return True\n except:\n # Caught an error\n print(\"There was an error when trying to connect to \" + str(\n address) + \"::\" + str(port_number))\n self.__connect_failed__()\n return False", "def get_open_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((\"\", 0))\n o_port = sock.getsockname()[1]\n sock.close()\n return o_port", "def start_socket(ip, port):\n try:\n # initiate socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # connect to server\n print(\"socket connected at ip {} and port {}\".format(ip, port))\n sock.connect((ip, port))\n return sock\n except Exception as e:\n print(\"Error start_socket\", e)\n #exit()", "def connect(self, ip: str, port: Optional[int] = None, version: Optional[str] = None,\n auth: Optional[Dict[str, str]] = None) -> None:\n api_version = 'v1' if version > '8.5' else 'v0'\n default_port = 8443 if api_version == 'v1' else 8080\n port = port if port else default_port\n http_protocol = 'https' if api_version == 'v1' else 'http'\n connection_url = f'{http_protocol}://{ip}:{port}/'\n apikey = auth.get('apikey') if auth else None\n crt = auth.get('crt') if auth else None\n self.connection = IxRestUtils.Connection(connection_url, api_version, version, apikey, crt)\n self.session_url = IxLoadUtils.createSession(self.connection)", "def ConnectPort(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('connectPort', payload=payload, response_object=None)", "def waithp(host, port):\n debug(\"waithp({0},{1})\".format(safestr(host), safestr(port)))\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((host, int(port)))\n except: # pylint: disable=bare-except\n a, b, c = sys.exc_info()\n traceback.print_exception(a, b, c)\n sock.close()\n raiseRecoverableError('Server at {0}:{1} is not ready'.format(safestr(host), safestr(port)))\n sock.close()", "def opensock(ipaddr,port):\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((ipaddr,port))\n \n return s", "def tcp_socket_open(host, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n try:\n return sock.connect_ex((host, port)) == 0\n except socket.timeout:\n return False", "def is_port_open(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n return sock.connect_ex(('127.0.0.1', port)) == 0", "def port_cmd(self):\n print_debug(\"Executing PORT\")\n # PORT creates a new connection from server to client.\n sock = new_socket()\n self.port_connection(sock)\n # Get required parameters for PORT command.\n port_params, host_ip, host_port = self.parse_port_req(sock)\n print_debug(\"PARAMS: \" + port_params)\n command = \"PORT %s\\r\\n\" % port_params\n msg_rec = self.send_and_log(self.s, command)\n print_debug(msg_rec)\n return msg_rec, sock", "def connect(\n host=\"localhost\",\n port=1113,\n discovery_host=None,\n discovery_port=2113,\n username=None,\n password=None,\n loop=None,\n) -> Client:\n discovery = get_discoverer(host, port, discovery_host, discovery_port)\n dispatcher = MessageDispatcher(loop)\n connector = Connector(discovery, dispatcher)\n\n credential = msg.Credential(username, password) if username and password else None\n\n return Client(connector, dispatcher, credential=credential)", "def _connect_to_peer(self, port, ip_address):\n getLogger(__name__).info(\"Attempting to connect to peer {}:{}...\"\n .format(ip_address, port))\n conn = self._create_new_socket()\n connected = False\n\n for i in range(self.CONNECT_ATTEMPTS):\n try:\n conn.connect((ip_address, port))\n connected = True\n break\n except (ConnectionRefusedError, OSError):\n getLogger(__name__).info(\"Attempt {}/{} failed\"\n .format(i + 1, self.CONNECT_ATTEMPTS))\n if i < self.CONNECT_ATTEMPTS:\n sleep(i + 1)\n\n if connected:\n self._set_socket(conn)\n getLogger(__name__).info(\"Connection established\")\n else:\n getLogger(__name__).info((\"Connection could not be established, \"\n \"starting in offline mode.\"))", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def cmd_port (self, line):\r\n info = line[1].split (',')\r\n ip = '.'.join (info[:4])\r\n port = int(info[4])*256 + int(info[5])\r\n # how many data connections at a time?\r\n # I'm assuming one for now...\r\n # TODO: we should (optionally) verify that the\r\n # ip number belongs to the client. [wu-ftpd does this?]\r\n self.client_addr = (ip, port)\r\n self.respond ('200 PORT command successful.')", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n if self.print_send:\n print(' - connecting...')\n self.socket.settimeout(1)\n self.socket.connect(self.host_port)\n if self.print_send:\n print(' - connected')\n except socket.timeout:\n raise Timeout('Timeout connecting to projector')\n except Exception as err:\n raise Error('Connection failed', err)\n self.expect(b'PJ_OK')\n self.send(b'PJREQ')\n self.expect(b'PJACK')", "def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)", "def pScan(ip, port):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.settimeout(.01)\r\n try:\r\n sock.connect((ip, port))\r\n return True\r\n except socket.error:\r\n return False\r\n except socket.timeout:\r\n return False", "def port_show(switch, port):\n print client.port.show(switch, port)", "def connect():", "def __init__(self, port):\n self.port = port\n self.connection = serial.Serial(timeout=1)\n self.connection.port = self.port", "def setport(self, port):\n self.__port = port", "def init_serial(port):\n try:\n ser_port = serial.Serial(port, 9600)\n except:\n raise SerialPortError(\"Error opening \" + port)\n\n print(\"Serial port \"+ser_port.name+\" opened.\")\n return ser_port", "def get_open_port(host=\"localhost\"):\n temp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n temp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n temp_sock.bind((host, 0))\n port = temp_sock.getsockname()[1]\n temp_sock.close()\n del temp_sock\n return port", "def port(self, port):\n\n self._port = port", "def port(self, port):\n\n self._port = port", "def port(self, port):\n\n self._port = port", "async def connect(self, peer_ip, peer_port):\n peer_name = f\"{peer_ip}:{peer_port}\"\n try:\n reader, writer = await open_connection(peer_ip, peer_port)\n self.peers[peer_name] = {\n \"reader\": reader,\n \"writer\": writer,\n \"buffer\": ProtocolBuffer()\n }\n client_coro = create_task(self.connection_handler(peer_name))\n await client_coro\n except CancelledError:\n print(f\"Warning: Task handling connection to {peer_name} canceled.\")\n except NodeDisconnectException:\n print(f\"Warning: Peer {peer_name} disconnected\")\n await self.close_connection(peer_name)\n except ConnectionError:\n print(f\"Error: connection error for peer {peer_name}\")", "def setup_server(port=0, verbose=False):\r\n\r\n host = gethostname()\r\n sock = socket(AF_INET, SOCK_STREAM)\r\n try:\r\n sock.bind((host, port))\r\n except error as msg:\r\n raise error(\"Could not open Socket on server: \" + str(msg))\r\n sock.listen(5) # max num of queued connections usually [1..5]\r\n if verbose:\r\n print \"Server listening on %s\" % str(sock.getsockname())\r\n return sock", "def connect_to_server(self, host=HOST, port=PORT):\r\n\t\t# HOST = server.ipAddress\r\n\t\t# PORT = int(server.port)\r\n\t\t# self.tcpSocket.disconnectFromHost()\r\n\t\t# self.tcpSocket.waitForDisconnected ()\r\n\t\t# print(HOST, PORT)\r\n\t\t# self.__tcpSocket.connectToHost(host, port, QIODevice.ReadWrite)\r\n\t\tself.__tcpSocket.connectToHost(host, port, QIODevice.ReadWrite)\r\n\t\tif self.__tcpSocket.waitForConnected(5000):\r\n\t\t\tprint('Client connected to server.')\r\n\t\t\tself.connection_established.emit((host, port))\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Impossible de se connecter au serveur !\",\r\n\t\t\t\t\t\t\t\t\t \"Vérifiez que les paramètres que vous avez entré sont corrects et que le serveur est en fonctionnement.\",\r\n\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\t\t\tprint('Unable to connect...')", "def listen(self, port):\n return self", "def connect(self):\n self.sock = s.socket(s.AF_INET,s.SOCK_STREAM)\n self.sock.connect((self.remote_host,\n self.remote_port))", "def connect(self):\n # open serial port\n try:\n #device = self.get_device_name(self.serial_number)\n device = \"/dev/ttyAMA0\"\n self.serial.port = device\n # Set RTS line to low logic level\n self.serial.rts = False\n self.serial.open()\n except Exception as ex:\n self.handle_serial_error(ex)" ]
[ "0.81374085", "0.7722904", "0.7718337", "0.7456098", "0.7439767", "0.7409976", "0.740931", "0.7345239", "0.7266387", "0.7218907", "0.71471167", "0.71010596", "0.7090724", "0.7063016", "0.7051909", "0.70335734", "0.70265913", "0.70235753", "0.7016226", "0.700159", "0.69971234", "0.6990588", "0.6967241", "0.6921168", "0.6920926", "0.6847714", "0.68355143", "0.68092597", "0.68062377", "0.6786681", "0.6771137", "0.6758067", "0.67513156", "0.6742955", "0.67057425", "0.66641176", "0.66612655", "0.66612613", "0.66391915", "0.6636848", "0.66108817", "0.6607265", "0.6594342", "0.6588763", "0.6547977", "0.65433365", "0.6539315", "0.6533839", "0.65310293", "0.6496008", "0.64865506", "0.64865506", "0.64672184", "0.6462463", "0.6447766", "0.64458203", "0.644544", "0.6440098", "0.6428251", "0.6413622", "0.63922054", "0.63775676", "0.63756216", "0.6361805", "0.6361632", "0.63505733", "0.63401103", "0.63397944", "0.6332071", "0.6329067", "0.63281804", "0.63273656", "0.6318631", "0.63146484", "0.63090146", "0.6307092", "0.6303204", "0.6293788", "0.6286934", "0.6284496", "0.62835294", "0.62779856", "0.6275801", "0.6271211", "0.6264513", "0.6250935", "0.62439436", "0.6239937", "0.62322253", "0.6222897", "0.6219093", "0.6212701", "0.620438", "0.620438", "0.620438", "0.6203463", "0.6202735", "0.6198929", "0.6195213", "0.6193266", "0.6190864" ]
0.0
-1
Experimental function to read each stream frame from the server
def recv_depth_frame(sock): (frame_size,) = struct.unpack("<i", recv_all(sock, 4)) return recv_all(sock, frame_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stream_frames(video_capture):", "def get_frame(self):\n\n self.load_network_stream()\n\n while True:\n try:\n if self.online:\n # Read next frame from stream and insert into deque\n status, frame = self.cap.read()\n if status:\n self.deque.appendleft(frame)\n else:\n self.cap.release()\n self.online = False\n print('Cannot read from Camera')\n else:\n # Attempt to reconnect\n print('attempting to reconnect', self.device)\n time.sleep(2)\n # time.sleep(0.02)\n except AttributeError:\n pass", "def gen_frame():\n while True:\n frame = camera_stream()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/png\\r\\n\\r\\n' + frame + b'\\r\\n') # concate frame one by one and show result", "def get_frames(self):\n video_getter = Thread(target=self.streamer)\n video_getter.daemon = True\n video_getter.start()", "def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame", "def readFrames(video):\n frames = []\n while True:\n _, frame = video.read()\n\n if frame is None:\n break\n else:\n frames.append(frame)\n video.release()\n return frames", "def parse_frames(stream: BytesIO) -> Iterable[_Frame]:\n while True:\n old = stream.tell()\n try:\n yield _parse_frame(stream)\n except IncompleteData as exc:\n stream.seek(old)\n break", "def __next__(self):\n while True:\n self.stream_bytes += self.stream_conn.read(1024)\n first = bytearray(self.stream_bytes).find(b'\\xff\\xd8')\n last = bytearray(self.stream_bytes).find(b'\\xff\\xd9')\n if first != -1 and last != -1:\n jpg = self.stream_bytes[first:last + 2]\n self.stream_bytes = self.stream_bytes[last + 2:]\n image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), 0)\n self.total_frame += 1\n return image", "def read(self):\n data = self.stream.read(self.CHUNK)\n self.frames.append(data)\n self.frames.popleft()", "def _read_frames(self):\n cap = self._read_file()\n\n frame_list = []\n ret_list = []\n\n while True:\n ret, frame = cap.read()\n if ret:\n frame_list.append(np.array(frame))\n ret_list.append(ret)\n else:\n break\n if self.mode==\"np\":\n frame_list = np.array(frame_list)\n return frame_list", "def streamer(self):\n retry = 3\n print ('start streamer!')\n while self.container is None and 0 < retry:\n if not self.collect_frames:\n break\n #print (type(self.container))\n retry -= 1\n try:\n self.container = av.open(self.drone.get_video_stream())\n print('success')\n except av.AVError as ave:\n print(ave)\n print('retry...')", "def gen(self):\n\n # context = zmq.Context()\n # receiver = context.socket(zmq.PULL)\n self.receiver.connect(inference_url())\n\n while self.is_opened:\n ret = self.receiver.recv_pyobj()\n\n nparr = np.frombuffer(np.array(ret['data']), np.uint8)\n\n # logger.warning('Receive: %s', ret['ts'])\n # logger.warning('Time elapsed: %s', (time.time()-self.keep_alive))\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # ret2 = receiver.recv_pyobj()\n # logger.warning(ret2['ts'])\n # logger.warning(ret2['shape'])\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n cv2.imencode('.jpg', img)[1].tobytes() + b'\\r\\n')\n self.receiver.close()", "async def _retrieve_frame(self, mode: BufferRetrieveMode) -> RawArray:", "def loop(self):\n data_received = 0\n data = ''\n start_time = time.time()\n current_start_time = time.time()\n current_frames_received = 0\n while not self.halt:\n # Collect data until we have a full frame.\n tmp_data = self.sock.recv(self.chunk_length)\n data += tmp_data\n if tmp_data.startswith(self.sync_start):\n # Reset data variables, to prepare for a new frame.\n data = ''\n data_received = 0\n switch = self.switch_resolution(tmp_data)\n if switch:\n current_start_time = time.time()\n current_frames_received = 0\n elif tmp_data.startswith(self.sync_kill):\n # The server is closing the connection.\n print 'Received kill frame...'\n self.halt = True\n elif self.width != 0 and self.height != 0:\n data_received += self.chunk_length\n # When we've received a frame or more (because of zero padding), \n # then we can finally show it.\n if data_received >= self.full_frame_length():\n data = data[:self.full_frame_length()]\n frame = self.data_to_frame(data)\n if self.output_file is None:\n self.show_video_image(frame)\n else:\n self.write_video_image(frame)\n # Update statistics\n self.received_frames += 1\n current_frames_received += 1\n self.output_statistics_during(start_time, \n current_start_time, \n current_frames_received\n )\n # Reset the values after we've received the whole frame.\n data_received = 0\n data = ''", "def process_stream(self, sid, f):\n http_response = wavehttp.get(\"/wave/wfe/channel?VER=6&RID=rpc&SID=\"+sid+\n \"&CI=0&AID=0&TYPE=xmlhttp&zx=\"+self.zx()+\"&t=1\")\n http_data = http_response.read(120)\n print http_data\n exit()\n connection.close()", "def server_streaming(self) -> global___Snippet.ServerStreaming:", "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def read(self):\n while True:\n size_bytes = self.connection.read(Frame.SIZE_WIDTH)\n # Read will return zero bytes when the other side of the connection\n # closes.\n if not size_bytes:\n break\n\n message_length = read_number_string(size_bytes, Frame.SIZE_WIDTH)\n\n chunk = self.connection.read(message_length - Frame.SIZE_WIDTH)\n if not chunk:\n raise ProtocolException(\n 'Expected %d bytes available, got none' % message_length\n )\n\n if len(chunk) != message_length - Frame.SIZE_WIDTH:\n raise ProtocolException(\n 'Expected %d bytes, got %d' %\n (len(chunk), message_length - Frame.SIZE_WIDTH)\n )\n\n yield Frame.decode(BytesIO(chunk), message_length)", "def update(self):\n print('VIDEO: Video Stream started')\n while True:\n if self.stopped:\n return\n (self.grabbed, self.frame) = self.stream.read()", "def ordinarilyGenerateFrames(self):\n for name, video in self._videos.items():\n print(f'Reading:{name}...')\n success, frame = video.read()\n while self.alive and success:\n yield frame\n success, frame = video.read()\n print('Reading Completed!')\n self._videos.clear()", "def audio_stream() -> typing.Iterable[bytes]:\n frames = frame_queue.get()\n while frames:\n yield frames\n frames = frame_queue.get()", "def gen_livestream():\n\n flag = True\n frame = _dog()\n while True:\n time.sleep(0.02)\n if app.images.qsize():\n image = app.images.get()\n if flag:\n image = base64_to_cv2(image)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n detector = dlib.get_frontal_face_detector()\n rects = detector(gray, 0)\n for (i, rect) in enumerate(rects):\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n \n for (x, y) in shape:\n cv2.circle(image, (x, y), 2, (0, 255, 0), -1)\n _, frame = cv2.imencode('.jpg', image)\n else:\n frame = _dog()\n # print(position)\n flag = not flag\n # yield ('Content-Type: image/jpeg\\r\\n\\r\\n' + base64.b64encode(frame).decode(\"utf-8\") + '\\r\\n')\n\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def read_raw_data_from_server():\n s = connect_to_eeg_server(enable_raw_output=True)\n loop = 50\n while loop > 0:\n buf = s.recv(1024)\n raw = str(buf).strip()\n print(raw)\n loop -= 1\n s.close()", "def get_still(self):\n _, frame = self.client.read()\n return frame", "def readVideo(self):\n vid = cv2.VideoCapture(self.fname)\n imgstack = []\n # grab = True\n grab, img = vid.read()\n while grab:\n imgstack.append(\n Frame(\n cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),\n self.starttime\n + datetime.timedelta(seconds=self.frame_dt * self.length),\n )\n )\n self.length += 1\n grab, img = vid.read()\n self.frames = imgstack", "def read(self):\r\n try:\r\n if not self.connected:\r\n self._connect()\r\n\r\n (length, encoding, chunked) = self._send_request()\r\n\r\n if chunked:\r\n data = self._read_chunked()\r\n else:\r\n data = self._read_num_bytes(length)\r\n\r\n if encoding == \"gzip\":\r\n data = self._unzip(data)\r\n\r\n data = json.loads(data)\r\n self.timestamp = int(data[1])\r\n if len(data[0]):\r\n if self.cipher:\r\n msg_list = [self._decrypt(m) for m in data[0]]\r\n else:\r\n msg_list = data[0]\r\n\r\n if len(data) > 2:\r\n chan_list = data[2].split(\",\")\r\n else:\r\n chan_list = [self.chan for m in msg_list]\r\n\r\n return zip(chan_list, msg_list)\r\n else:\r\n return []\r\n\r\n except:\r\n self.connected = False\r\n self.sock.close()\r\n raise", "def run(self):\n i = 0\n t = time.time()\n while True:\n i = i + 1\n ret, frame = self.stream.read()\n if (i == 20):\n self.fps = 20/(time.time() - t)\n t = time.time()\n i = 0\n #If a frame is None need to re-init it: \n # - close a stream;\n # - reopen it;\n # - read frame again\n if frame is None:\n self.stream.release()\n self.stream = cv2.VideoCapture(self.url)\n ret, frame = self.stream.read()\n text = time.strftime('%Y-%m-%d %H:%M:%S')\n if (self.fps > 0):\n text = text + ' FPS: ' + str(round(self.fps))\n self.frame = cv2.putText(frame, text, (10, int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)\n self.frameID = uuid.uuid4()", "def stream():\n while True:\n try:\n r = requests.post(\"http://streamer_0:5000/stream\", json={})\n break\n except requests.exceptions.ConnectionError:\n logging.error(\"Could not connect to server streamer_0, retrying\")\n time.sleep(2)\n continue\n logging.info(\"'http://streamer_0:5000/stream', response = {}\".format(r.status_code))\n if r.status_code != 200:\n time.sleep(2)\n stream()", "def getCamera1():\n for msg in camera1:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpg\\r\\n\\r\\n' + base64.b64decode(msg.value['image_bytes']) + b'\\r\\n\\r\\n')", "def update(self):\n self.frame = self.video_stream.read()", "async def _read_frame(self):\n # Read the Frame start and header\n response = await self.sreader.read(len(_FRAME_START)+2)\n if self.debug:\n print('_read_frame: frame_start + header:', [hex(i) for i in response])\n\n if len(response) < (len(_FRAME_START) + 2) or response[:-2] != _FRAME_START:\n raise RuntimeError('Response does not begin with _FRAME_START!')\n \n # Read the header (length & length checksum) and make sure they match.\n frame_len = response[-2]\n frame_checksum = response[-1]\n if (frame_len + frame_checksum) & 0xFF != 0:\n raise RuntimeError('Response length checksum did not match length!')\n\n # read the frame (data + data checksum + end frame) & validate\n data = await self.sreader.read(frame_len+2)\n if self.debug:\n print('_read_frame: data: ', [hex(i) for i in data])\n \n checksum = sum(data) & 0xFF\n if checksum != 0:\n raise RuntimeError('Response checksum did not match expected value: ', checksum)\n\n if data[-1] != 0x00:\n raise RuntimeError('Response does not include Frame End')\n\n # Return frame data.\n return data[0:frame_len]", "def atari_frames_generator(env_name, ip):\n\n print(\"> Waiting for a stream of frames from:\", ip)\n\n # Set up a connection\n receiver = AtariFramesReceiver(env_name, ip)\n\n # Collect\n try:\n while True:\n yield receiver.receive(wait=True)\n\n except ConnectionAbortedError:\n raise StopIteration", "def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()", "def start(self) -> None:\n data = b\"\"\n while True:\n # while loop to get size of receiving data\n while len(data) < self.payload_size:\n packet = self.client_socket.recv(4 * 1024) # 4KB\n if not packet:\n break\n data += packet\n # counting size of sending data\n packed_msg_size = data[: self.payload_size]\n # if in first while loop there was download part of data, need to add it on start\n data = data[self.payload_size :]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n # receiving concrete data\n while len(data) < msg_size:\n data += self.client_socket.recv(4 * 1024)\n # getting all data for current state\n data_recv_pickled = data[:msg_size]\n # setting data to whats left for next state\n data = data[msg_size:]\n # unpickle what we got\n data_recv = pickle.loads(data_recv_pickled)\n # show image and if q pressed - stop\n cv2.imshow(\"RECEIVING VIDEO\", data_recv.frame)\n print(\n f\"[CLIENT] GOT IMAGE AT TIME: {data_recv.decision} | WITH PERCENTAGE: {data_recv.percentage}% | DELAY: {datetime.datetime.now() - data_recv.time_sended}\"\n )\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n # disconnect from server\n self.disconnect()", "def read_frame(self):\n _temp = self._read_unpack(4, lilendian=True)\n if _temp is None: raise EOFError\n\n _time = datetime.datetime.fromtimestamp(_temp)\n _tsss = _temp\n _tsus = self._read_unpack(4, lilendian=True)\n _ilen = self._read_unpack(4, lilendian=True)\n _olen = self._read_unpack(4, lilendian=True)\n\n frame = dict(\n frame_info = dict(\n ts_sec = _tsss,\n ts_usec = _tsus,\n incl_len = _ilen,\n orig_len = _olen,\n ),\n time = _time,\n number = self._fnum,\n time_epoch = f'{_tsss}.{_tsus} seconds',\n len = _ilen,\n cap_len = _olen,\n )\n\n length = frame['cap_len']\n return self._decode_next_layer(frame, length)", "def stream_timelapse(path):\n parts = path.split('.')[0].split('_')\n count = int(parts[2])\n filename = parts[0] + '_{0:03d}_{1:03d}.jpg'\n for i in range(count):\n frame = open(filename.format(i, count), 'rb').read()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\nContent-Length: ' + \\\n str(len(frame)).encode() + b'\\r\\n\\r\\n' + frame + b'\\r\\n'\n time.sleep(0.5)", "def get_frames_from_video_capture(video_capture):\n while video_capture.isOpened():\n success, frame = video_capture.read()\n if not success:\n break\n else:\n yield frame", "def update(self):\r\n for f in self.stream:\r\n # if the thread indicator variable is set, stop the thread\r\n # and release camera resources\r\n if self.stopped:\r\n self.stream.close()\r\n self.rawCapture.close()\r\n self.camera.close()\r\n return\r\n # grab the frame from the stream and clear the stream in\r\n # preparation for the next frame\r\n frame = f.array\r\n self.rawCapture.truncate(0)\r\n with self.read_lock:\r\n self.frame = frame", "def run(self):\n if self.stream:\n while True:\n try:\n ret, frame = self.stream.read()\n if ret is True:\n # TODO: replace by a real function that send frame to detection model\n self.detection_model.send_image(image=frame)\n if self.show_in_window:\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except KeyboardInterrupt:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.close()\n return None\n except Exception as e:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.write('Error:Unexpected Error happened:\\n {}'.format(e))\n self.log.close()\n return None\n else:\n self.log.write(\"Error initializing stream....\\n\")\n self.log.close()\n return None", "def video_thread():\n global last_frame\n # Creating stream capture object\n cap = cv2.VideoCapture('udp://' + drone.tello_ip + ':11111')\n\n while(True):\n _, last_frame = cap.read()\n cap.release()", "def gen(camera):\n while livestreamOn == True:\n frame = camera.get_frame()\n img = cv2.imencode('.jpg', frame)[1]\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + img.tobytes() + b'\\r\\n')", "def gen(camera):\n \n while True:\n \n \n \n frame = camera.get_frame()\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def read(self):\n\n # if self.deque and self.online:\n if self.online:\n # Grab latest frame\n if self.enable_camera_reader_thread:\n frame = self.deque[-1]\n else:\n _, frame = self.cap.read()\n return frame\n else:\n print('Waiting for camera for 2 more secs')\n time.sleep(2)\n return None", "def process(self):\n frame_count = 0\n size = self.frame.size\n while True:\n try:\n for i in range(parallel.BUFFER_LENGTH):\n offset = i * size;\n self.manager.image[offset : offset + size] = self.frame.ravel()\n self.ret, self.frame = self.capture.read()\n if not self.ret:\n self.clear_buffer(offset=offset + size + 1)\n raise StopIteration\n if DEBUG_LEVEL > 2:\n cv.imshow(self.name, self.frame)\n frame_count += 1\n key = cv.waitKey(self.toggle)\n if key is 27:\n raise StopIteration\n return\n elif key is 32:\n self.toggle ^= 1\n self.manager.detect()\n self.barrier.wait()\n except StopIteration:\n # Handle dangling frames in buffer and return gracefully\n self.manager.detect()\n self.barrier.wait()\n self.cleanup()\n try:\n # Handle rangequits in Phase 1\n for rv in self.variables:\n for event in rv['events']:\n if event['event_subtype'] == \"Finish\":\n return self.variables\n return None\n except:\n # Phase 0 -- no handling\n return self.variables\n except:\n # Any other exception is bad!\n return None", "def read_frame(self):\n return self.decode_frame(self.grab_frame())", "def gen():\n while True:\n retval, frame = vc.read()\n\n if retval:\n #image_processing(frame)\n frame = cv2.imencode('.jpg', frame)[1].tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def getCamera2():\n for msg in camera2:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpg\\r\\n\\r\\n' + base64.b64decode(msg.value['image_bytes']) + b'\\r\\n\\r\\n')", "def start_capture_stream(\n self, nframes, aoi, storemethod, *store_a, **store_kw):\n def int_action(*a):\n self.stop_capture_stream(0)\n self.connect() # connect if not yet connected\n if self.lib().gone_live(self.um):\n self.stop_capture_stream(-1)\n cmos, pcb = self.get_temps()\n self.lib().go_live(self.um)\n writer = self._Writer(nframes, storemethod, *store_a, **store_kw)\n reader = self._Reader(self, writer, nframes, aoi)\n self._streams[self.um] = (reader, writer)\n timeout = time.time()+3\n while not self.lib().gone_live(self.um):\n if timeout > time.time():\n raise CygnetExc(\"timeout on going live\")\n time.sleep(.1)\n reader.start()\n signal.signal(signal.SIGINT, int_action)\n if reader.is_alive():\n reader.ready.wait(1)\n if cygnet4k.debug:\n print(\"Video capture started.\")\n return cmos, pcb", "def start(self):\n\t\twhile self.capture_status:\n\t\t\t_, frame = self.cap.read()\n\t\t\tc_frame = frame[self.width / 2 - self.face_width / 2: self.width / 2 + self.face_width / 2,\n\t\t\t self.height / 2 - self.face_width / 2: self.height / 2 + self.face_height / 2, :]\n\t\t\tif not self.in_processing:\n\t\t\t\tself.frame = frame\n\t\t\t\tself.in_processing = True\n\t\t\tsleep(0.2)\n\t\tyield cv2.imdecode('png', c_frame)", "def get_frames(self):\n if not self.video:\n return []\n # We cannot validate shape on construction as that happens inside graph\n # mode as we construct from a tf.data.Dataset, so we validate here.\n self.video[0].validate_shape_and_dtype()\n return self.video", "def _read_loop(self):\n while True:\n self.read()", "def recv_chunk(self, data):", "def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame", "def _recv(self) -> List[np.ndarray]:", "def recv_frame(sock):\n (frame_size,) = struct.unpack(\"<i\", recv_all(sock, 4))\n return recv_all(sock, frame_size)", "def readFrame(self):\n\t\tsuccess, self.frameImage = self.vidcap.read()\n\t\treturn success, self.frameImage", "def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]:\n ...", "def getCamera3():\n for msg in camera3:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpg\\r\\n\\r\\n' + base64.b64decode(msg.value['image_bytes']) + b'\\r\\n\\r\\n')", "def _stream(self):\n logger.info('getting meta-data')\n while not self.handle.has_metadata():\n time.sleep(0.1)\n\n #self.handle.rename_file(0, 'test.mp4')\n\n while not self.handle.is_seed():\n stat = self.handle.status()\n\n print 'downloading %.2f%%'%(stat.progress * 100)\n sys.stdout.flush()\n\n time.sleep(1)", "def packets_for_stream(fobj, offset):\n pcap = dpkt.pcap.Reader(fobj)\n pcapiter = iter(pcap)\n ts, raw = pcapiter.next()\n\n fobj.seek(offset)\n for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):\n yield p", "def get_frame_read(self):\r\n if self.background_frame_read is None:\r\n if self.is_dummy:\r\n self.background_frame_read = BackgroundFrameRead(self, 0).start()\r\n else:\r\n self.background_frame_read = BackgroundFrameRead(self, self.get_udp_video_address()).start()\r\n return self.background_frame_read", "def gen_frames(camera):\n while True:\n success, frame = camera.read()\n if not success:\n break\n else:\n ret, buffer = opencv.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def __iter__(self):\n # Start streaming from file\n profile = self.pipeline.start(self.config)\n\n # Getting the depth sensor's depth scale (see rs-align example for explanation)\n #depth_sensor = profile.get_device().first_depth_sensor()\n #depth_scale = depth_sensor.get_depth_scale()\n\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n while True:\n # returns the next color/depth frame\n frames = self.pipeline.wait_for_frames()\n\n # Align the depth frame to color frame\n aligned_frames = align.process(frames)\n\n # Get aligned frames\n # aligned_depth_frame is a 640x480 depth image\n aligned_depth_frame = aligned_frames.get_depth_frame()\n color_frame = aligned_frames.get_color_frame()\n\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n yield depth_image, color_image", "def client_streaming(self) -> global___Snippet.ClientStreaming:", "def read_chunks(self):\n buf = []\n chunks = []\n delays = []\n if not self.chunked:\n chunks.append(self.read())\n delays.append(0)\n else:\n start = TIMER()\n try:\n while True:\n line = self.fp.readline()\n chunk_size = self._read_chunk_size(line)\n if chunk_size is None:\n raise httplib.IncompleteRead(''.join(chunks))\n if chunk_size == 0:\n break\n delays.append(TIMER() - start)\n chunks.append(self._safe_read(chunk_size))\n self._safe_read(2) # skip the CRLF at the end of the chunk\n start = TIMER()\n\n # Ignore any trailers.\n while True:\n line = self.fp.readline()\n if not line or line == '\\r\\n':\n break\n finally:\n self.close()\n return chunks, delays", "def video_by_frame(video):\n cap = cv2.VideoCapture(video)\n\n while True:\n ret, im = cap.read()\n yield im", "def read(self) -> np.array:\n return self._stream.read(self._frame_size)", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def recv_stream(self) -> Dict[str, Any]:\n while True:\n # Try to reassemble and return a stream\n for i in range(len(self.streams)):\n tcp = self.streams[i].reassemble()\n\n if tcp:\n del self.streams[i]\n return tcp\n\n # Receive the next packet\n packet = self.recv_raw()\n\n # Add to the correct stream\n new = True\n for stream in self.streams:\n if stream.add_packet(packet):\n new = False\n break\n\n # See if this is a new TCP stream\n if new:\n self.streams.append(TCPStream(packet))", "def streaming_buffer(self) -> 'outputs.StreamingbufferResponse':\n return pulumi.get(self, \"streaming_buffer\")", "def Parse(self):\n prev_percent_read = 0\n for packet in TS.next_packet(self._filename):\n #check_packet_formedness(packet)\n pei = TS.get_transport_error_indicator(packet)\n pusi = TS.get_payload_start(packet)\n pid = TS.get_pid(packet)\n tsc = TS.get_tsc(packet)\n\n # per .ts packet handler\n if self.OnTSPacket:\n self.OnTSPacket(packet)\n\n # Update a progress callback\n self._read_size += TS.PACKET_SIZE\n percent_read = ((self._read_size / float(self._total_filesize)) * 100)\n new_percent_read = int(percent_read * 100)\n if new_percent_read != prev_percent_read and self.Progress:\n self.Progress(self._read_size, self._total_filesize, percent_read)\n prev_percent_read = new_percent_read\n\n adaptation_field_control = TS.get_adaptation_field_control(packet)\n continuity_counter = TS.get_continuity_counter(packet)\n\n # put together PES from payloads\n payload = TS.get_payload(packet)\n if pusi == True:\n if not ES.pes_packet_check_formedness(payload):\n if pid in self._elementary_streams:\n self._elementary_streams[pid] = None\n continue\n pes_id = ES.get_pes_stream_id(payload)\n self._elementary_streams[pid] = payload\n else:\n if pid in self._elementary_streams:\n # TODO: check packet sequence counter\n if not self._elementary_streams[pid]:\n self._elementary_streams[pid] = \"\"\n self._elementary_streams[pid] += payload\n else:\n # TODO: throw. this situaiton means out of order packets\n pass\n if pid in self._elementary_streams and ES.pes_packet_complete(self._elementary_streams[pid]):\n # TODO: handle packet contents here (callback)\n es = self._elementary_streams[pid]\n if self.OnESPacket:\n header_size = ES.get_pes_header_length(es)\n self.OnESPacket(pid, es, header_size)", "def iterate_msgs(stream):\n while True:\n item = None\n buf = bytearray()\n stall_timer = None\n\n while True:\n # read bytes until item boundary reached\n buf += stream.read(1)\n if not buf:\n # check for stall (i.e. no data for 90 seconds)\n if not stall_timer:\n stall_timer = time.time()\n elif time.time() - stall_timer > TwitterAPI.STREAMING_TIMEOUT:\n raise TwitterConnectionError(\"Twitter stream stalled\")\n elif stall_timer:\n stall_timer = None\n if buf[-2:] == b\"\\r\\n\":\n item = buf[0:-2]\n if item.isdigit():\n # use byte size to read next item\n nbytes = int(item)\n item = None\n item = stream.read(nbytes)\n yield item\n else:\n yield b\"\" # keepalive\n break", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def test_get_stream(self):\n pass", "def gen(camera):\n #time.sleep(3)\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def run(self):\n while not self.is_stop():\n content = self.render()\n fid, frame = content['fid'], content['container_frame']\n\n if not self.is_pause():\n # Send request\n request = { 'action': 'detect' }\n self.send(request)\n\n # Send raw frames to workers\n video_frames = []\n for panel in self.panel_to_channel.keys():\n media_frame = panel.media_cache\n media_frame = cv2.resize(media_frame, self.trans_resolution)\n frame_bytes = cv2.imencode('.jpg', media_frame)[1]\n video_frames.append({ 'panel': panel, 'frame_bytes': frame_bytes })\n self.parallel_send_videos(video_frames)\n\n # Catch response from remote worker\n response = self.recv()\n if response is None:\n break\n\n # Handle server response\n handler = self.event_handler[response['action']]\n new_content = handler(response)\n fid, frame = new_content['fid'], new_content['container_frame']\n last_frame = frame\n\n # Show applications\n cv2.imshow(self.winname, frame)\n cv2.setTrackbarPos(self.barname, self.winname, fid)\n\n # Handling keyboard events\n key = cv2.waitKey(1) & 0xff\n self.keyboaord_handler(key)\n\n cv2.destroyAllWindows()", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(frame) + b'\\r\\n')", "def next_batch(self, frame_skip_count=5):\n frame_count = 0\n frame_divisor = max(frame_skip_count + 1, 1)\n while True:\n ret, frame = self.cap.read()\n if ret:\n if frame_count % frame_divisor == 0:\n yield frame\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n else:\n break", "def __listener__(self):\n frame_interval = 0.1\n str_list = []\n c = ''\n while True:\n with Timeout(frame_interval, False):\n while True:\n try:\n c = self.ser.read()\n except:\n self.ser.close()\n self.make_connection.go()\n self.connection_made.wait()\n str_list.append(c)\n if c == \"\\n\" or c == '':\n break\n received = ''.join(str_list)\n str_list = []\n if received:\n for i in self.read_handlers:\n gevent.spawn(i, received)\n sleep(0.001)", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n if not stop:\n time.sleep(frame_sleep)\n else:\n break", "def gen(camera, cam_id):\n while True:\n ##print('Send frame: ', cam_id)\n frame = camera.frames(cam_id)\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "async def _stream_next_event(stream):\n while True:\n last_new_line = False\n data = b\"\"\n\n while True:\n dat = await stream.read(1)\n if dat == b\"\\n\" and last_new_line:\n break\n data += dat\n last_new_line = dat == b\"\\n\"\n\n conv = data.decode(\"utf-8\").strip()[6:]\n\n if conv != \"ping\":\n break\n return json.loads(conv)", "def get_frame_sequence(captured_file):\n frame_seq = []\n get_all_frame = \"tshark -r {} -Y 'http.request || http.response' -T fields -e frame.number\".format(captured_file)\n frames = run_command(get_all_frame, True)\n for f in frames:\n fn = int(f.decode('utf8').rstrip('\\n'))\n frame_seq.append(HTTPNode(fn))\n \n return frame_seq", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def extract_frames():\n vc = cv2.VideoCapture(INPUT_FILE)\n c=1\n\n if vc.isOpened():\n rval , frame = vc.read()\n else:\n rval, frame = False, False\n\n while rval:\n # cv2.imwrite((MODIFIED_FRAMES_DIR + 'img' + str(c) + '.jpg'),frame)\n cv2.imwrite((MODIFIED_FRAMES_DIR + str(c) + '.jpg'),frame)\n c = c + 1\n cv2.waitKey(1)\n rval, frame = vc.read()\n vc.release()\n print(\"All frames extracted successfully...\")", "def _get_stream(\n session: \"Session\", url_tail: str, params: Optional[Dict[str, Any]] = None\n) -> Any:\n response = _get(session, url_tail, params, stream=True)\n response.raw.decode_content = True\n return response.raw", "def crosslyGenerateFrames(self):\n fail = set()\n try:\n while self.alive:\n for name, video in self._videos.items():\n video: cv2.VideoCapture\n success, frame = video.read()\n if self.longFirst:\n if len(fail) == len(self._videos): # 长视频优先,视频长度由最长决定\n return\n elif not success:\n print(f'Read {name} Over')\n fail.add(video)\n else:\n yield frame\n else:\n if success: # 短视频优先,视频长度由最短决定\n yield frame\n else:\n return\n print('Reading Completed!')\n except Exception as e:\n raise e\n finally:\n self.close()", "def receive_chunk(self):\n raw_chunk_size = b''\n raw_chunk_size_to_get = MAX_CHUNK_SIZE\n while len(raw_chunk_size) < raw_chunk_size_to_get:\n raw_chunk_size += self.receive_video_socket.recv(\n raw_chunk_size_to_get - len(raw_chunk_size))\n #try:\n chunk_size = int(raw_chunk_size.decode())\n left = chunk_size\n #except Exception as e:\n #print('raw chunk size is {} its length is {}'\n #.format(raw_chunk_size, len(raw_chunk_size)))\n #print(\"exception receive chunk 1: {}\".format(e))\n\n chunk = b''\n try:\n while left > END:\n chunk += self.receive_video_socket.recv(left)\n left = left - len(chunk)\n return chunk\n except Exception as e:\n print(\"exception receive chunk 1: {}\".format(e))\n self.receive_video_socket.close()", "def read(self):\n\n # ret, image = self.video.read()\n (self.grabbed, self.frame) = self.cap.read()\n image = self.frame\n\n if image is not None:\n \"\"\"Update FPS, and incode received frame. \"\"\"\n self.fps.update()\n # TODO: add self.fps.fps() to image, if flagged raised.\n\n # We are using Motion JPEG, but OpenCV defaults to cap raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n\n # display a piece of text to the frame (so we can benchmark\n # fairly against the fast method)\n self.fps.stop()\n cv2.putText(image, \"FPS (simple): {:.2f}\".format(self.fps.fps()), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n self.frame = image.copy()\n\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n else:\n self.logger.debug(\"in 'get_frame', video.read not success\")", "def test_is_streaming(fprime_test_api):\n results = fprime_test_api.assert_telemetry_count(5, timeout=10)\n for result in results:\n msg = \"received channel {} update: {}\".format(result.get_id(), result.get_str())\n print(msg)\n fprime_test_api.assert_telemetry(\n \"sendBuffComp.SendState\", value=\"SEND_IDLE\", timeout=3\n )" ]
[ "0.7428881", "0.68029535", "0.67922986", "0.6777833", "0.6765679", "0.6748719", "0.67172873", "0.6695475", "0.6688924", "0.6657313", "0.6572944", "0.64670205", "0.64453244", "0.6411161", "0.63258195", "0.6290956", "0.62526816", "0.62319446", "0.6203036", "0.6193663", "0.61668545", "0.61452895", "0.6143986", "0.6131414", "0.61280745", "0.6122159", "0.6107584", "0.6060426", "0.603085", "0.6029249", "0.6008231", "0.60039663", "0.59873366", "0.5959356", "0.5912636", "0.58870864", "0.5856453", "0.58363587", "0.5832509", "0.58279985", "0.5820848", "0.5808883", "0.5804411", "0.577626", "0.5773251", "0.5773024", "0.57719105", "0.57698727", "0.5767955", "0.57622474", "0.5759026", "0.5758089", "0.5751348", "0.57465637", "0.57405674", "0.5731403", "0.57288665", "0.57264566", "0.5717613", "0.570989", "0.5703157", "0.5701865", "0.5685784", "0.5680683", "0.5670916", "0.5668574", "0.56625426", "0.565219", "0.5651413", "0.5649944", "0.56499285", "0.5647333", "0.5646201", "0.5646201", "0.5646201", "0.5646201", "0.5646201", "0.5646201", "0.5646201", "0.5646201", "0.5644579", "0.56435096", "0.56411827", "0.56399614", "0.5629782", "0.56286526", "0.5627207", "0.5624073", "0.5620585", "0.56126195", "0.56096566", "0.56096566", "0.56096566", "0.56096566", "0.56096566", "0.5606166", "0.5595241", "0.5593023", "0.559196", "0.5590966", "0.5583266" ]
0.0
-1
Render the Lilypond music expression lily using lilypond.
def render_lily(self, lily): shasum = "%s.png" % sha(lily.encode('utf-8')).hexdigest() relfn = posixpath.join(self.builder.imgpath, 'lily', shasum) outfn = path.join(self.builder.outdir, '_images', 'lily', shasum) if path.isfile(outfn): return relfn if hasattr(self.builder, '_lilypng_warned'): return None, None music = DOC_HEAD + self.builder.config.pnglily_preamble + lily if isinstance(music, unicode): music = music.encode('utf-8') # use only one tempdir per build -- the use of a directory is cleaner # than using temporary files, since we can clean up everything at once # just removing the whole directory (see cleanup_tempdir_lily) if not hasattr(self.builder, '_lilypng_tempdir'): tempdir = self.builder._lilypng_tempdir = tempfile.mkdtemp() else: tempdir = self.builder._lilypng_tempdir tf = open(path.join(tempdir, 'music.ly'), 'w') tf.write(music) tf.close() ensuredir(path.dirname(outfn)) # use some standard lilypond arguments lilypond_args = [self.builder.config.pnglily_lilypond] #lilypond_args += ['-o', tempdir, '--png'] lilypond_args += ['-dbackend=eps', '-dno-gs-load-fonts', '-dinclude-eps-fonts', '-o', tempdir, '--png'] # add custom ones from config value lilypond_args.extend(self.builder.config.pnglily_lilypond_args) # last, the input file name lilypond_args.append(path.join(tempdir, 'music.ly')) try: p = Popen(lilypond_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise self.builder.warn('lilypond command %r cannot be run (needed for music ' 'display), check the pnglily_lilypond setting' % self.builder.config.pnglily_lilypond) self.builder._lilypng_warned = True return None, None stdout, stderr = p.communicate() if p.returncode != 0: raise LilyExtError(u'lilypond exited with error:\n[stderr]\n%s\n' '[stdout]\n%s' % (stderr.decode('utf-8'), stdout.decode('utf-8'))) shutil.copyfile(path.join(tempdir, 'music.png'), outfn) #Popen(['mogrify', '-trim', outfn], stdout=PIPE, stderr=PIPE) return relfn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self, format=\"png\"):\n from .core.transforms import lilypond\n seq = HSeq(self) | lilypond()\n\n lily_output = write_lilypond.lily_format(seq)\n if not lily_output.strip():\n #In the case of empty lily outputs, return self to get a textual display\n return self\n\n if format == \"png\":\n suffix = \".preview.png\"\n args = [\"lilypond\", \"--png\", \"-dno-print-pages\", \"-dpreview\"]\n elif format == \"svg\":\n suffix = \".preview.svg\"\n args = [\"lilypond\", \"-dbackend=svg\", \"-dno-print-pages\", \"-dpreview\"]\n\n f = tempfile.NamedTemporaryFile(suffix=suffix)\n basename = f.name[:-len(suffix)]\n args.extend([\"-o\" + basename, \"-\"])\n\n #Pass shell=True so that if your $PATH contains ~ it will\n #get expanded. This also changes the way the arguments get\n #passed in. To work correctly, pass them as a string\n p = sp.Popen(\" \".join(args), stdin=sp.PIPE, shell=True)\n stdout, stderr = p.communicate(\"{ %s }\" % lily_output)\n if p.returncode != 0:\n # there was an error\n #raise IOError(\"Lilypond execution failed: %s%s\" % (stdout, stderr))\n return None\n\n if not ipython:\n return f.read()\n if format == \"png\":\n return Image(data=f.read(), filename=f.name, format=\"png\")\n else:\n return SVG(data=f.read(), filename=f.name)", "def exec_lilypond(ly_string, filename, command):\n ly_string = '\\\\version \"2.10.33\"\\n' + ly_string\n if filename[-4:] in [\".pdf\", \".png\"]:\n filename = filename[:-4]\n try:\n f = open(filename + \".ly\", \"w\")\n f.write(ly_string)\n f.close()\n except:\n return False\n command = 'lilypond %s -dresolution=600 -o \"%s\" \"%s.ly\"' % (command, filename, filename)\n #print(\"Executing: %s\" % command)\n p = subprocess.Popen(command, shell=True).wait()\n os.remove(filename + \".ly\")\n return True", "def render(self):\n canvas_id = 'zdog_{}'.format(self.CANVAS_INDEX)\n illo_id = 'illo_{}'.format(self.CANVAS_INDEX)\n Scene.CANVAS_INDEX += 1\n\n html_lines = []\n\n js_lines = []\n\n euler = -rowan.to_euler(\n self.rotation, convention='xyz', axis_type='intrinsic')\n translation = self.translation*(1, -1, 1)\n\n pan_cfg = self.get_feature_config('pan')\n pan = pan_cfg.get('value', True) if pan_cfg is not None else False\n\n js_lines.append(\"\"\"\n let {illo_id} = new Zdog.Illustration({{\n element: '#{canvas_id}',\n zoom: {zoom},\n dragRotate: {rotation_enabled},\n rotate: {{x: {angle[0]}, y: {angle[1]}, z: {angle[2]}}},\n translate: {{x: {pos[0]}, y: {pos[1]}, z: {pos[2]}}},\n }});\n \"\"\".format(\n illo_id=illo_id, canvas_id=canvas_id, zoom=self.zoom*self.pixel_scale,\n angle=euler, pos=translation,\n rotation_enabled=('false' if pan else 'true')))\n\n config = self.get_feature_config('ambient_light')\n ambient_light = 0 if config is None else config.get('value', .4)\n\n config = self.get_feature_config('directional_light')\n directional_light = ([(0, 0, 0)] if config is None else\n config.get('value', [(0, 0, 0)]))\n directional_light = np.atleast_2d(directional_light)\n\n shapeIndex = 0\n for i, prim in enumerate(self._primitives):\n js_lines.extend(prim.render(\n rotation=self.rotation, illo_id=illo_id,\n name_suffix=i, ambient_light=ambient_light,\n directional_light=directional_light))\n\n (width, height) = map(int, self.size_pixels)\n html_lines.append(\"\"\"\n <canvas id=\"{canvas_id}\" width=\"{width}\" height=\"{height}\"></canvas>\n \"\"\".format(canvas_id=canvas_id, width=width, height=height))\n\n html_lines.append(\"\"\"<script>\n var fill_{canvas_id} = function() {{\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(LOCAL_HELPER_SCRIPT)\n html_lines.extend(js_lines)\n\n pan_snippet = \"\"\"\n new Zdog.Dragger({{\n startElement: {illo_id}.element,\n onDragStart: function( pointer, moveX, moveY) {{\n this.lastX = 0;\n this.lastY = 0;\n }},\n onDragMove: function( pointer, moveX, moveY ) {{\n let deltax = moveX - this.lastX;\n let deltay = moveY - this.lastY;\n let scale = 1.0/{illo_id}.zoom;\n {illo_id}.translate.x += deltax*scale;\n {illo_id}.translate.y += deltay*scale;\n this.lastX = moveX;\n this.lastY = moveY;\n }}\n }});\"\"\".format(illo_id=illo_id)\n if pan:\n html_lines.append(pan_snippet)\n\n html_lines.append(\"\"\"\n let this_canvas = document.querySelector(\"#{canvas_id}\");\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append(\"\"\"\n let animate_{canvas_id} = function() {{\n if(is_in_view(this_canvas))\n {{\n {illo_id}.updateRenderGraph();\n }}\n if(document.contains(this_canvas))\n {{\n requestAnimationFrame(animate_{canvas_id});\n }}\n }};\n animate_{canvas_id}();\"\"\".format(canvas_id=canvas_id, illo_id=illo_id))\n # remove the global reference to this function after using it\n html_lines.append('fill_{canvas_id} = null;'.format(canvas_id=canvas_id))\n html_lines.append('};') # end of fill_{canvas_id}\n # now call fill_{canvas_id}, possibly after loading zdog\n html_lines.append(\"\"\"\n if (typeof Zdog == 'undefined')\n {{\n var script = document.createElement('script');\n script.addEventListener('load', fill_{canvas_id}, false);\n script.src = 'https://unpkg.com/zdog@1/dist/zdog.dist.min.js';\n document.getElementsByTagName('head')[0].appendChild(script);\n }}\n else\n fill_{canvas_id}();\n \"\"\".format(canvas_id=canvas_id))\n html_lines.append('</script>')\n\n return '\\n'.join(html_lines)", "def render(self) -> None: # pragma: no cover\n top_level_dir = self.rendering_params['dir']\n now = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S,%f\")\n nested_dir = os.path.join(top_level_dir, f\"result_{now}\")\n os.mkdir(nested_dir)\n\n midi_path = os.path.join(nested_dir, 'music.mid')\n midi_params = self.rendering_params['midi']\n measure = self.rendering_params['measure_in_seconds']\n create_midi_from_piece(self, midi_path, measure, **midi_params)\n\n events_path = os.path.join(nested_dir, 'sinethesizer_events.tsv')\n events_params = self.rendering_params['sinethesizer']\n create_events_from_piece(self, events_path, measure, **events_params)\n\n wav_path = os.path.join(nested_dir, 'music.wav')\n create_wav_from_events(events_path, wav_path)\n\n lilypond_path = os.path.join(nested_dir, 'sheet_music.ly')\n create_lilypond_file_from_piece(self, lilypond_path)\n create_pdf_sheet_music_with_lilypond(lilypond_path)", "def asLily(self):\n n = self.getNoteName()[0].lower()\n a = self._getLilyAccidental()\n o = self._getLilyOctave()\n d = self._getLilyDuration()\n s = self._getLilyDot()\n t = self._getLilyTie()\n return \"{}{}{}{}{}{}\".format(n, a, o, d, s, t)", "def hxlexpand():\n run_script(hxlexpand_main)", "def playOutput():\n global coordinates, lastPlayedCoordinates\n\n tempDir = \".bt_temp\"\n tempSongPath = tempDir + \"/lastPlayedSong.wav\"\n\n if (coordinates == []):\n return\n\n # If there have been no changes to the canvas, don't recreate the .wav files\n if (coordinates == lastPlayedCoordinates):\n if os.path.isfile(tempSongPath):\n call(['python','PlayMelody.py',tempSongPath])\n return\n\n lex = Lexer(coordinates)\n song = lex.compose_song()\n \n # Don't create a sub directory and just make them hidden files, this way no permission error\n\n # Delete the old one if it exists\n if os.path.exists(tempDir):\n shutil.rmtree(tempDir)\n # Create temporary directory to store intermediate files\n os.makedirs(tempDir)\n \n \n tempSongPath = tempDir + \"/lastPlayedSong.wav\"\n if os.path.exists(tempSongPath):\n shutil.rmtree(tempSongPath)\n\n createMelody(song, tempSongPath)\n\n call(['python','PlayMelody.py',tempSongPath])\n\n lastPlayedCoordinates = coordinates", "def render_example() -> str:\n return str(<Hello name=\"World\"/>)", "def render_ldl(variables, output):\n\n f = open(output, 'w')\n\n # Include header\n f.write(\"#include \\\"ldl.h\\\"\\n\\n\")\n\n # Write ldl_lsolve\n write_ldl_lsolve(f, variables)\n\n # Write ldl_ltsolve\n write_ldl_ltsolve(f, variables)\n\n # Write ldl_dinvsolve\n write_ldl_dinvsolve(f, variables)\n\n # Write ldl_perm\n write_ldl_perm(f, variables)\n\n # Write ldl_permt\n write_ldl_permt(f, variables)\n\n f.close()", "def bidi_streaming(self) -> global___Snippet.BidiStreaming:", "def add_song():\n return render_template('pong!')", "def ly(self, l: int, lfrac: float) -> float:\n self._check_lfrac(lfrac)\n self._raise_if_not_line(l)\n result = self._read_inline(f\"ly({l},{lfrac})\")\n return result", "def render(sim_file: str, only_stuck: bool) -> None:\n import DLA\n DLA.GREEN = (0, 0, 0) # type: ignore\n DLA.WHITE = (151, 151, 151, 150) # type: ignore\n from DLA import config\n config.USE_PYGAME = True # type: ignore\n from DLA import renderer\n renderer.render(Path(sim_file), only_stuck)", "def midi_to_lilypond_note(note):\n return all_notes[note+4]", "def create_artist_new_music_line(spotify_artist_music):\n body = ''\n for item in spotify_artist_music:\n if item['thumbnail']:\n artist_string = '<p><img src=\"{}\" width=\"{}\" height=\"{}\" /> {} released on {}--{}</p>\\n'\n body += artist_string.format(item['thumbnail'][0]['url'], item['thumbnail'][0]['width'],\n item['thumbnail'][0]['height'], item['name'], item['releaseDate'], item['url'])\n return body", "def all_notes():\n \n return render_template('all_notes.html',colors=music_color,)", "def __init__(self):\n inkex.Effect.__init__(self)\n\n self.doc_center = None\n self.normal_line = {\n 'stroke': '#000000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '1' # can also be in form '2mm'\n }\n self.cut_line = {\n 'stroke': '#ff0000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '0.1' # can also be in form '2mm'\n }\n self.doted_line = {\n 'stroke': '#000000', # black\n 'fill': 'none', # no fill - just a line\n 'stroke-width': '1', # can also be in form '2mm'\n 'stroke-linecap': 'butt',\n 'stroke-linejoin': 'miter',\n 'stroke-miterlimit': '10',\n 'stroke-dasharray': '9.883,9.883',\n 'stroke-dashoffset': '0'\n }\n\n # Define the list of parameters defined in the .inx file\n self.OptionParser.add_option(\"-t\", \"--type\", type=\"string\", dest=\"type\", default='perso',\n help=\"Type of template rendered\")\n self.OptionParser.add_option(\"-u\", \"--units\", type=\"string\", dest=\"units\", default='cm',\n help=\"User interface units\")\n self.OptionParser.add_option(\"--style\", type=\"string\", dest=\"style\", default='print',\n help=\"Style of the template\")\n self.OptionParser.add_option(\"-n\", \"--neck\", type=\"float\", dest=\"neck\", default=11,\n help=\"Width of the neck\")\n self.OptionParser.add_option(\"-s\", \"--shoulder\", type=\"float\", dest=\"shoulder\", default=44,\n help=\"Width shoulder to shoulder\")\n self.OptionParser.add_option(\"--hip\", type=\"float\", dest=\"hip\", default=89,\n help=\"Hip measurement\")\n self.OptionParser.add_option(\"-w\", \"--waist\", type=\"float\", dest=\"waist\", default=79,\n help=\"Waist measurement\")\n self.OptionParser.add_option(\"-c\", \"--chest\", type=\"float\", dest=\"chest\", default=97,\n help=\"Chest measurement\")\n self.OptionParser.add_option(\"--hsptochest\", type=\"float\", dest=\"hsp_chest\", default=21,\n help=\"Lenght HSP to chest\")\n self.OptionParser.add_option(\"--hsptowaist\", type=\"float\", dest=\"hsp_waist\", default=45,\n help=\"Lenght HSP to waist\")\n self.OptionParser.add_option(\"--hsptohip\", type=\"float\", dest=\"hsp_hip\", default=67,\n help=\"Lenght HSP to hip\")\n self.OptionParser.add_option(\"-b\", \"--bicep\", type=\"float\", dest=\"bicep\", default=23,\n help=\"Bicep measurement\")\n self.OptionParser.add_option(\"--upersleeve\", type=\"float\", dest=\"top_sleeve\", default=20,\n help=\"Top lenght of the sleeve\")\n self.OptionParser.add_option(\"--bottomsleeve\", type=\"float\", dest=\"bottom_sleeve\", default=17,\n help=\"Bottom lenght of the sleeve\")\n self.OptionParser.add_option(\"-e\", \"--ease\", type=\"float\", dest=\"ease\", default=5,\n help=\"Amount of ease\")\n self.OptionParser.add_option(\"--neck_front\", type=\"float\", dest=\"neck_front\", default=0,\n help=\"Height of the front neck drop\")\n self.OptionParser.add_option(\"--neck_rear\", type=\"float\", dest=\"neck_rear\", default=6,\n help=\"Height of the rear neck drop\")\n self.OptionParser.add_option(\"--shoulder_drop\", type=\"float\", dest=\"shoulder_drop\", default=3,\n help=\"height of the shoulder\")\n self.OptionParser.add_option(\"--grid\", type=\"inkbool\", dest=\"grid\", default=True,\n help=\"Display the Reference Grid \")\n self.OptionParser.add_option(\"--temp\", type=\"inkbool\", dest=\"temp\", default=True,\n help=\"Display the template\")\n self.OptionParser.add_option(\"--active-tab\", type=\"string\", dest=\"active_tab\",\n default='title', help=\"Active tab.\")", "def drawMusicLines():\n global c\n c.create_line(0 , 3, 800, 3, width=2)\n c.create_line(0 , 79, 800, 79, width=2)\n c.create_line(0 , 159, 800, 159, width=2)\n c.create_line(0 , 239, 800, 239, width=2)\n c.create_line(0 , 319, 800, 319, width=2)\n c.create_line(799 , 0, 799, 320, width=6)\n c.create_line(790 , 0, 790, 320, width=2)\n c.create_line(3, 0, 3, 320, width=2)", "def dspyRender(self):\n pass", "def bar_to_lilypond_notes(notes):\n lp_notes = []\n if notes[0] is None:\n notes = notes[1:]\n if notes[0] is None:\n lp_notes.append(\"r\")\n for n in notes:\n if n is None:\n continue\n if type(n) is list:\n lp_notes.append([midi_to_lilypond_note(x) for x in n])\n else:\n lp_notes.append(midi_to_lilypond_note(n))\n return lp_notes", "def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)", "def render(self):\r\n \r\n # --------------------------------\r\n # Set world-level Panda properties\r\n # --------------------------------\r\n\r\n # Create Ambient Light 1\r\n ambientLight = AmbientLight( 'ambientLight_1' )\r\n ambientLight.setColor( Vec4( 0.2, 0.2, 0.2, 1 ) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, 50, 50)\r\n render.setLight(ambientLightNP)\r\n\r\n # Create Ambient Light 2\r\n ambientLight = AmbientLight( 'ambientLight_2' )\r\n ambientLight.setColor( Vec4(0.2, 0.2, 0.2, 1) )\r\n ambientLightNP = render.attachNewNode( ambientLight.upcastToPandaNode() )\r\n ambientLightNP.setPos( 50, -50, 50)\r\n render.setLight(ambientLightNP)\r\n# \r\n# # Directional light 01\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.8, 0.2, 0.2, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing backwards, towards the camera.\r\n# directionalLightNP.setHpr(180, 20, 0)\r\n# render.setLight(directionalLightNP)\r\n#\r\n# # Directional light 02\r\n# directionalLight = DirectionalLight( \"directionalLight\" )\r\n# directionalLight.setColor( Vec4( 0.2, 0.2, 0.8, 1 ) )\r\n# directionalLightNP = render.attachNewNode( directionalLight.upcastToPandaNode() )\r\n# # This light is facing forwards, away from the camera.\r\n# directionalLightNP.setHpr(0, -20, 0)\r\n# render.setLight(directionalLightNP)\r\n\r\n #create a directional light\r\n #light = DirectionalLight('my dlight')\r\n\r\n #create a point light\r\n light = PointLight('plight')\r\n #light.setColor(VBase4(0.2, 0.2, 0.2, 1))\r\n\r\n #The following line doesn't work in Panda3D 1.7.0\r\n #lightPath = render.attachNewNode(light.upcastToPandaNode())\r\n\r\n lightPath = render.attachNewNode(light)\r\n lightPath.setPos( 10, 10, 10)\r\n\r\n #lightPath.lookAt(objPath)\r\n\r\n #illuminate all\r\n render.setLight(lightPath)\r\n #illuminate only objPath objects\r\n #objPath.setLight(lightPath)\r\n\r\n #self.SetMouseControls(objPath)\r\n #self.setKeyboardControls()\r\n \r\n taskMgr.add(self.mouseControlsTask, 'mouseControlsTask')\r\n #taskMgr.add(self.cameraMovementTask, 'cameraMovementTask') \r\n\r\n base.setBackgroundColor( .0, .0, .0 )\r\n\r\n #taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")\r\n #core.cmd.exeCommand(\"LoadEdge\", obj, file_name+self.WingedEdgeExtensions[0], file_name+self.WingedEdgeExtensions[1], file_name+self.WingedEdgeExtensions[2], file_name+self.WingedEdgeExtensions[3])\r\n #self.model = importer.loadFile(fileName)\r\n #if self.model is None:\r\n # print \"Unsupported file\"\r\n # return\r", "def render(self):", "def mj_render(self):\n self.render(mode='human')", "def litchi(args):\n p = OptionParser(litchi.__doc__)\n opts, args, iopts = p.set_image_options(args, figsize=\"9x6\")\n\n if len(args) != 4:\n sys.exit(not p.print_help())\n\n datafile, bedfile, slayout, switch = args\n fig = plt.figure(1, (iopts.w, iopts.h))\n root = fig.add_axes([0, 0, 1, 1])\n\n Synteny(fig, root, datafile, bedfile, slayout, switch=switch)\n\n # legend showing the orientation of the genes\n draw_gene_legend(root, 0.4, 0.7, 0.82)\n\n # On the left panel, make a species tree\n fc = \"lightslategrey\"\n\n coords = {}\n xs, xp = 0.16, 0.03\n coords[\"lychee\"] = (xs, 0.37)\n coords[\"clementine\"] = (xs, 0.5)\n coords[\"cacao\"] = (xs, 0.6)\n coords[\"strawberry\"] = (xs, 0.7)\n coords[\"grape\"] = (xs, 0.8)\n xs -= xp\n coords[\"Sapindales\"] = join_nodes(root, coords, \"clementine\", \"lychee\", xs)\n xs -= xp\n coords[\"Rosid-II\"] = join_nodes(root, coords, \"cacao\", \"Sapindales\", xs)\n xs -= xp\n coords[\"Rosid\"] = join_nodes(root, coords, \"strawberry\", \"Rosid-II\", xs)\n xs -= xp\n coords[\"crown\"] = join_nodes(root, coords, \"grape\", \"Rosid\", xs, circle=False)\n\n # Names of the internal nodes\n for tag in (\"Rosid\", \"Rosid-II\", \"Sapindales\"):\n nx, ny = coords[tag]\n nx, ny = nx - 0.01, ny - 0.02\n root.text(nx, ny, tag, rotation=90, ha=\"right\", va=\"top\", color=fc)\n\n root.set_xlim(0, 1)\n root.set_ylim(0, 1)\n root.set_axis_off()\n\n pf = \"litchi\"\n image_name = pf + \".\" + iopts.format\n savefig(image_name, dpi=iopts.dpi, iopts=iopts)", "def _latex_(self):\n p = self._weight_rat.numer()\n q = self._weight_rat.denom()\n old = s = \"\\\\begin{verbatim}\\\\end{verbatim}\"\n new = \"\"\n # s=\"\\\\text{Space of Vector-Valued harmonic weak Maass forms on }\"\n # s+=latex(self.multiplier().group)+\" \\\\text{ of weight } \\\\frac{\"+str(p)+\"}{\"+str(q)+\"}\"\n # s+=\"\\\\text{and values in } \\\\mathbb{C}\\\\left[\\\\mathbb{Z}/\"+latex(2*self.multiplier().N)+\"\\\\mathbb{Z}\\\\right]\\\\text{.}\"\n # s+=\"$ \\\\text{ The representation is }\"+latex(self.multiplier())+\"\\\\text{.}\"\n s = \"\\\\begin{verbatim}\\\\end{verbatim}\"\n s += \" Space of Vector-Valued harmonic weak Maass forms on $\"\n s += latex(self.multiplier().group()) + \"$ of weight $\\\\frac{\" + str(p) + \"}{\" + str(q) + \"}$\"\n s += \"and values in $\\\\mathbb{C}\\\\left[\\\\mathbb{Z}/\" + latex(2 * self.multiplier().N) + \"\\\\mathbb{Z}\\\\right]$. \"\n s += \"The representation is \" + self.multiplier()._latex_().replace(old, new) + \".\"\n\n return s", "def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')", "def bands_nlp() -> Language:\n nlp = English()\n nlp.add_pipe(BandNameNerPipe(nlp, MusicBand.select(), \"band_ents\"))\n\n return nlp", "def render(self,screen):\n for boids in self.boid_list:\n boids.render(screen)", "def render(self, position, dimensions, filename):\n t = position[0]\n volume = np.zeros(dimensions,dtype=np.uint8)\n # render it!\n print(t, len(self.frames), self.frames)\n for i,f in enumerate(self.frames):\n f.render( position[1:],volume[t+i,:,:] )\n # save it\n tiff.imsave(filename, volume)", "def _lib_html(self, libname: str, history: List[str]) -> str:\n found = None\n for name, libdoc in self.docs(history).items():\n if libname.lower().strip() == name.lower().strip():\n found = libdoc\n break\n\n found = found or self._load_libdoc(libname)\n\n if found is None:\n return\n\n formatter = DocFormatter(found.keywords, found.doc, found.doc_format)\n libdoc_json = JsonConverter(formatter).convert(found)\n return LIB_TEMPLATE.render(libdoc=libdoc_json, **DOC_CONTEXT)", "def render(self, mode='human', close=False):\n self.gym.render(mode=mode, close=close)", "def pitch_name_to_lilypond(name):\n validate_pitch_name(name)\n octave = int(name.replace('#', '').replace('b', '')[1:])\n ref_octave = 3\n if octave > ref_octave:\n octave_str = '\\'' * (octave - ref_octave)\n elif octave < ref_octave:\n octave_str = ',' * abs(octave - ref_octave)\n else:\n octave_str = ''\n name = name[:name.index(str(octave))] + octave_str\n return name.replace('#', 'is').replace('b', 'es').lower()", "def _latex_(self):\n return \"\\\\textnormal{Decoder of } %s \\\\textnormal{ through } %s\" % (self.code(), self.original_decoder())", "def render(self):\n self.axial.Render()\n self.coronal.Render()\n self.sagittal.Render()\n #self.isosurface.Render()\n #self.rwi_pcp.Render()", "def renderNote(self):\n\t\tif self.activeNote:\n\t\t\tself.activeNote.render()", "def embed():", "def fromLily(cls, lilystring):\n if lilystring == None:\n return ''\n if len(lilystring) < 1:\n return ''\n name = cls._NoteNameFromLilyString(lilystring)\n if name == None:\n return None\n dur = cls._DecodeLilyDuration(lilystring)\n octave = cls._DecodeLilyOctave(lilystring)\n if name == 'r':\n note = Rest.new(dur)\n else:\n note = cls.new(name + str(octave), dur)\n tie = cls._DecodeLilyTie(lilystring)\n if tie:\n note.setTie(True)\n return note", "def make_lexicon_txt(self):\n raise NotImplementedError", "def graphs_kelly():\n return render_template(\"graphs-Kelly.html\")", "def generate():\n data = request.json\n melody_corpus, melody_set, notes_indices, indices_notes = model_data['corpus']\n temperature = float(data['temperature'])\n phrase_len = int(data['seed_length'])\n seq_len = int(data['seq_len'])\n model = model_data['models']['len_{}'.format(str(phrase_len))]\n songname = data['song_name']\n\n melody = generate_sequence(model, seq_len, melody_corpus, melody_set, phrase_len, notes_indices, indices_notes, temperature)\n stream = play_melody(melody)\n create_midi_from_stream(stream, songname)\n midi_upload_path = upload_to_s3_bucket('static/tmp/{}.mid'.format(songname), '{}.mid'.format(songname), AWS_BUCKET_NAME)\n png_path = create_png_from_stream(stream, songname)\n png_upload_path = upload_to_s3_bucket('static/tmp/{}.png'.format(songname), '{}.png'.format(songname), AWS_BUCKET_NAME)\n\n return jsonify(midi_s3_path=midi_upload_path, img_s3_path=png_upload_path)", "def RenderTeX(self, Q, size=None, dpi=50):\n code = Q.code\n name = Q.name\n tex = Q.tex\n if (defaults.use_tex):\n #try:\n if (1):\n # convert width/height in pixels to inches and add some padding\n if (size is None):\n w = defaults.tex_width\n h = defaults.tex_height\n else:\n w,h = size\n w *= defaults.tex_padding/dpi\n h *= defaults.tex_padding/dpi\n fig = Figure(figsize=(w,h))\n formula = FigureCanvas(self, -1, fig)\n\n fig.clear()\n fig.text(0.05, 0.25, tex, fontsize=defaults.tex_fontsize) # draw the LaTeX formula onto canvas\n formula.draw()\n #try:\n # filename = os.path.join(defaults.quantity_code_image_path, \"{}.png\".format(code))\n # png_image = wx.Image(filename, wx.BITMATP_TYPE_ANY).ConvertToBitmap()\n # formula = wx.StaticBitmap(self, -1, png_image) #, pos=position, size=size)\n else:\n #except:\n formula = wx.StaticText(self, -1, \"Error Importing Formula Image\")\n else:\n formula = wx.StaticText(self, -1, \"LaTeX was disabled\")\n\n return formula", "def _make_song_list_html(song_list):\n return '<p class=\"song_name\">' + '<br>'.join([f'{song[\"title\"]} <span class=\"artist_album\">{song[\"artist\"]} - {song[\"album\"]}</span>' for song in song_list]) + '</p>'", "def artist_page(name=None):\n\n # regular expressions: always almost never not a good idea\n match = search(r\"^artist-\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}$\", name)\n\n if match:\n id = name\n artist_data = generate_artist_data(id, use_id=True)\n else:\n artist_data = generate_artist_data(name)\n id = artist_data[\"id\"]\n\n track_data = generate_track_data(id)\n similarity_data = generate_similar_artists(id)\n\n return render_template(\"artist.html\",\n data=artist_data, tdata=track_data, sim=similarity_data,\n pygmented={\n # \"adata\": highlight(str(artist_data), JsonLexer(), HtmlFormatter()),\n # \"tdata\": highlight(str(track_data), JsonLexer(), HtmlFormatter()),\n #\"sdata\": highlight(str(similarity_data), JsonLexer(), HtmlFormatter())\n \"adata\": str(artist_data),\n \"tdata\": str(track_data),\n \"sdata\": str(similarity_data)\n },\n json_uuid=url_for(\"static\",\n filename=\"json/\" + get_mood_json(track_data))\n )", "def render(self, mode='human'):", "def render_product(children):\r\n if len(children) == 1:\r\n return children[0]\r\n\r\n position = \"numerator\" # or denominator\r\n fraction_mode_ever = False\r\n numerator = []\r\n denominator = []\r\n latex = \"\"\r\n\r\n for kid in children:\r\n if position == \"numerator\":\r\n if kid.latex == \"*\":\r\n pass # Don't explicitly add the '\\cdot' yet.\r\n elif kid.latex == \"/\":\r\n # Switch to denominator mode.\r\n fraction_mode_ever = True\r\n position = \"denominator\"\r\n else:\r\n numerator.append(kid)\r\n else:\r\n if kid.latex == \"*\":\r\n # Switch back to numerator mode.\r\n # First, render the current fraction and add it to the latex.\r\n latex += render_frac(numerator, denominator) + r\"\\cdot \"\r\n\r\n # Reset back to beginning state\r\n position = \"numerator\"\r\n numerator = []\r\n denominator = []\r\n elif kid.latex == \"/\":\r\n pass # Don't explicitly add a '\\frac' yet.\r\n else:\r\n denominator.append(kid)\r\n\r\n # Add the fraction/numerator that we ended on.\r\n if position == \"denominator\":\r\n latex += render_frac(numerator, denominator)\r\n else:\r\n # We ended on a numerator--act like normal multiplication.\r\n num_latex = r\"\\cdot \".join(k.latex for k in numerator)\r\n latex += num_latex\r\n\r\n tall = fraction_mode_ever or any(k.tall for k in children)\r\n return LatexRendered(latex, tall=tall)", "def render_knowl_in_template(knowl_content, **kwargs):\n render_me = u\"\"\"\\\n {%% include \"knowl-defs.html\" %%}\n {%% from \"knowl-defs.html\" import KNOWL with context %%}\n {%% from \"knowl-defs.html\" import KNOWL_LINK with context %%}\n {%% from \"knowl-defs.html\" import KNOWL_INC with context %%}\n {%% from \"knowl-defs.html\" import TEXT_DATA with context %%}\n\n %(content)s\n \"\"\"\n knowl_content = md_preprocess(knowl_content)\n\n # markdown enabled\n render_me = render_me % {'content': md.convert(knowl_content)}\n # Pass the text on to markdown. Note, backslashes need to be escaped for\n # this, but not for the javascript markdown parser\n try:\n return render_template_string(render_me, **kwargs)\n except Exception as e:\n return \"ERROR in the template: %s. Please edit it to resolve the problem.\" % e", "def bb_artist(hit):\n try:\n artistid = hit.group(1)\n artist = Artist.objects.get(id=artistid)\n T = loader.get_template('webview/t/artist.html')\n C = Context({'A' : artist})\n return T.render(C)\n except:\n return \"[artist]%s[/artist]\" % artistid", "def visualiser(filename):\n\tcollection = mongo.db[filename]\n\tjsonEncoder = hepmcio_json.HepMCJSONEncoder()\n\thepMCDecoder = hepmcio_json.HepMCJSONDecoder()\n\tjsonDecoder = json.JSONDecoder()\n\t#Get first event data in file and decode to HepMCIO objects.\n\tevent = collection.find_one({\"type\":\"event\", \"no\":1}, {\"_id\":False})\n\tparticleJson = collection.find({\"type\":\"particle\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tparticles = []\n\tfor particle in particleJson:\n\t\tparticles.append(jsonEncoder.encode(particle))\n\t\n\tvertices = []\n\tvertexJson = collection.find({\"type\":\"vertex\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tfor vertex in vertexJson:\n\t\tvertices.append(jsonEncoder.encode(vertex))\n\tevent = jsonEncoder.encode(event)\n\t\n\t\n\t#Decode event to find interesting particles, i.e. particles above PT_CUTOFF and their ancestors..\n\teventObject = hepmcio_json.EventJSONObject(event, particles, vertices)\n\tdecodedEvent = hepMCDecoder.decode(eventObject)\n\n\tPT_CUTOFF = 0.0\n\tintParticles = [particle for particle in decodedEvent.particles.values() if particle.status!=1 and \\\n\t\tparticle.mom[0]**2 + particle.mom[1]**2 > PT_CUTOFF**2]\n\t#Build a single list from the individual particle ancestor lists.\n\tintParticleAncestors = reduce(operator.concat, [hepmcio.get_ancestors(particle)[:-1] for particle in intParticles])\n\n\tparticles = []\n\tfor particle in (intParticles + intParticleAncestors):\n\t\t#Encode particle to JSON, decode to Python dicts.\n\t\tparticles.append(jsonDecoder.decode(jsonEncoder.encode(particle)))\n\t#Decode to Python dicts.\n\tvertices = list(map(jsonDecoder.decode, vertices))\n\t\n\treturn render_template(\"visualiser.html\", title=\"Visualiser\", file=filename, particles=particles, vertices=vertices)", "def render( *args, **kwargs ):", "def render(self):\n\n self.desert_image.render()\n self.cannon_image.render()\n self.play_button.render()\n self.escape_label.render()", "def kindler (self,filename=''):\r\n\r\n import kindleflex as kindle\r\n from numbertools import rom_to_int, is_roman\r\n\r\n\r\n #To suspend spelling, while keeping default value\r\n check_spelling_was = self.check_spelling\r\n self.check_spelling = False\r\n\r\n\r\n YESTERMS_PLUS = YESTERMS+[' ',' ']\r\n\r\n #To load file\r\n while not filename:\r\n filename = input('FILENAME?')\r\n\r\n try:\r\n note_text = file_access.get_text_file(filename)\r\n except:\r\n display.noteprint(('ATTENTION','File cannot be found!'))\r\n note_text = ''\r\n if 'Highlight (' in note_text:\r\n #If there are highlights and notes\r\n\r\n note_obj = kindle.GetNotes(note_text)\r\n note_obj.set_for_kindle()\r\n\r\n else:\r\n note_obj = kindle.GetNotes(note_text)\r\n note_obj.set_for_kindle()\r\n note_obj.set_divider('Note -')\r\n\r\n note_iterator = note_obj.return_iterator()()\r\n active_qualities = set()\r\n\r\n\r\n if input('SHOW TEXT?') in YESTERMS_PLUS:\r\n print(note_text)\r\n\r\n for qual in note_obj.qualities:\r\n if input('Include sequence key for '+qual+'?') in YESTERMS_PLUS:\r\n active_qualities.add(qual)\r\n\r\n additional_keys = input('Add additional keys?') in YESTERMS_PLUS\r\n annotation_before = input('Add annotation before?') in YESTERMS_PLUS\r\n annotation_after = input('Add annotation after?') in YESTERMS_PLUS\r\n query_index = input('Query index position?') in YESTERMS_PLUS\r\n only_notes = input('Only include highlights with notes attached?')\r\n temp_c_i = input('Restrict to the following colors?')\r\n include_part = input('Include part?') in YESTERMS_PLUS\r\n if temp_c_i:\r\n colors_to_include = set(x.strip() for x in temp_c_i.split(','))\r\n else:\r\n colors_to_include = set()\r\n\r\n if not query_index:\r\n starting_index = ''\r\n while not starting_index:\r\n starting_index = input('Starting index position?')\r\n try:\r\n starting_index = Index(starting_index)\r\n except:\r\n pass\r\n else:\r\n starting_index = Index(-1)\r\n\r\n\r\n\r\n go_on = True\r\n note = ''\r\n count_down = 0\r\n automatic = False\r\n temp_i = ''\r\n display.noteprint(('NUMBER OF NOTES in COLLECTION',str(note_obj.size)))\r\n\r\n current_iteration=0\r\n while True:\r\n current_iteration+=1\r\n try:\r\n note = next(note_iterator)\r\n except:\r\n display.noteprint(('ATTENTION','FINISHED!'))\r\n break\r\n\r\n\r\n print(str(current_iteration)+'/'+str(note_obj.size))\r\n if count_down > 0:\r\n count_down -= 1\r\n\r\n else:\r\n\r\n new_keys = set()\r\n\r\n text = ''\r\n note_part = ''\r\n part = ''\r\n if 'TEXT' in note:\r\n text = note['TEXT']\r\n if 'NOTE' in note:\r\n note_part = note['NOTE']\r\n if 'highlightcolor' in note:\r\n highlight_color = note['highlightcolor']\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not automatic and ((not colors_to_include or highlight_color in colors_to_include)\r\n and (not only_notes or note_part)):\r\n display.noteprint(('NUMBER OF NOTES in COLLECTION',str(note_obj.size)))\r\n display.noteprint(('CURRRENT POSITION',str(note_obj.position)))\r\n display.noteprint(('TEXT',text))\r\n display.noteprint(('NOTE',note_part))\r\n display.noteprint(('HIGHLIGHT COLOR',highlight_color))\r\n\r\n print_string = ''\r\n\r\n for qual in active_qualities:\r\n if qual in note:\r\n print_string += qual + ':' + note[qual] +', '\r\n if len(print_string)>1:\r\n print_string = print_string[0:-2]\r\n\r\n display.noteprint(('QAULITIES',print_string))\r\n\r\n temp_i = input('CREATE NOTE or quit to QUIT or NUMBERS to SKIP FORWARD or A(UTOMATIC) to add the rest of notes without querying')\r\n\r\n if temp_i.isnumeric():\r\n count_down = int(temp_i)\r\n if temp_i in ['A','AUTOMATIC']:\r\n\r\n automatic = True\r\n\r\n if temp_i in QUITTERMS:\r\n break\r\n\r\n\r\n elif (((not colors_to_include or highlight_color in colors_to_include)\r\n and (not only_notes or note_part))\r\n and (automatic or ((count_down == 0 and (temp_i in YESTERMS_PLUS or len(temp_i)>1 and temp_i[0]==' '))))):\r\n\r\n for qual in active_qualities:\r\n if qual in note:\r\n val = note[qual]\r\n if is_roman(val):\r\n val = str(rom_to_int(val))\r\n\r\n if qual == 'chapter':\r\n\r\n\r\n\r\n chapter_title = ''\r\n chapter = val\r\n\r\n if ':' in chapter:\r\n # THIS is specially designed for the kindle note format\r\n chapter_number, chapter_title = chapter.split(':')[0].strip(),chapter.split(':')[1].strip()\r\n if not chapter_number.isnumeric():\r\n try:\r\n chapter_number = str(rom_to_int(chapter_number.lower()))\r\n except:\r\n pass\r\n else:\r\n pass\r\n else:\r\n if not chapter.isnumeric() and not (part and include_part):\r\n\r\n part = chapter\r\n chapter_number = ''\r\n\r\n if chapter_number:\r\n new_keys.add('chapter@'+chapter_number)\r\n if chapter_title:\r\n new_keys.add('chaptertitle@'+chapter_title)\r\n else:\r\n new_keys.add(qual+'@'+val)\r\n\r\n if not automatic and additional_keys:\r\n for x in input('ADDITIONAL KEYS?').split(','):\r\n new_keys.add(x.strip())\r\n before, after = '',''\r\n if not automatic and annotation_before:\r\n before = input('ENTER ANNOTATION BEFORE?') +'/BREAK/'\r\n if not automatic and annotation_after:\r\n after = '/BREAK/' + input('ENTER ANNOTATION AFTER?')\r\n if note_part:\r\n note_part = '/BREAK/' + note_part\r\n\r\n\r\n\r\n if not automatic and query_index:\r\n new_index = None\r\n while not new_index:\r\n new_index = input('INDEX?')\r\n try:\r\n new_index = Index(new_index)\r\n except:\r\n pass\r\n else:\r\n new_index = starting_index\r\n\r\n final_text = before+text+after+note_part\r\n\r\n\r\n self.enter(ek=new_keys,\r\n et=final_text,\r\n right_at=query_index,\r\n ind=new_index)\r\n\r\n self.check_spelling = check_spelling_was", "def render(txt):\n\n # Removing links to other channels\n txt = re.sub(r'<#[^\\|]*\\|(.*)>', r'#\\g<1>', txt)\n\n # Removing links to other users\n txt = re.sub(r'<(@.*)>', r'\\g<1>', txt)\n\n # handle named hyperlinks\n txt = re.sub(r'<([^\\|]*)\\|([^\\|]*)>', r'<a href=\"\\g<1>\" target=\"blank\">\\g<2></a>', txt)\n\n # handle unnamed hyperlinks\n txt = re.sub(r'<([^a|/a].*)>', r'<a href=\"\\g<1>\" target=\"blank\">\\g<1></a>', txt)\n\n # handle ordered and unordered lists\n for delimeter in LIST_DELIMITERS:\n slack_tag = delimeter\n class_name = LIST_DELIMITERS[delimeter]\n\n # Wrap any lines that start with the slack_tag in <li></li>\n list_regex = u'(?:^|\\n){}\\s?(.*)'.format(slack_tag)\n list_repl = r'<li class=\"list-item-{}\">\\g<1></li>'.format(class_name)\n txt = re.sub(list_regex, list_repl, txt)\n\n # hanlde blockquotes\n txt = re.sub(u'(^|\\n)(?:&gt;){3}\\s?(.*)$', r'\\g<1><blockquote>\\g<2></blockquote>', txt, flags=re.DOTALL)\n txt = re.sub(u'(?:^|\\n)&gt;\\s?(.*)\\n?', r'<blockquote>\\g<1></blockquote>', txt)\n\n # handle code blocks\n txt = re.sub(r'```\\n?(.*)```', r'<pre>\\g<1></pre>', txt, flags=re.DOTALL)\n txt = re.sub(r'\\n(</pre>)', r'\\g<1>', txt)\n\n # handle bolding, italics, and strikethrough\n for wrapper in FORMATTERS:\n slack_tag = wrapper\n html_tag = FORMATTERS[wrapper]\n\n # Grab all text in formatted characters on the same line unless escaped\n regex = r'(?<!\\\\)\\{t}([^\\{t}|\\n]*)\\{t}'.format(t=slack_tag)\n repl = r'<{t}>\\g<1></{t}>'.format(t=html_tag)\n txt = re.sub(regex, repl, txt)\n\n # convert line breaks\n txt = txt.replace('\\n', '<br />')\n\n # clean up bad HTML\n parser = CustomSlackdownHTMLParser(txt)\n txt = parser.clean()\n\n # convert multiple spaces\n txt = txt.replace(r' ', ' &nbsp')\n\n return txt", "def render_tree(self, highlight=[]):\n # HTML\n WORD = '<span class=\"word-' + self.id + '-%s\">%s</span>'\n words = ' '.join(WORD % (i,w) for i,w in enumerate(self.words)) if self.words else ''\n html = open('%s/vis/tree-chart.html' % APP_HOME).read() % (self.id, self.id, words) \n display_html(HTML(data=html))\n\n # JS\n JS_LIBS = [\"http://d3js.org/d3.v3.min.js\"]\n js = open('%s/vis/tree-chart.js' % APP_HOME).read() % (self.id, json.dumps(self.to_json()), str(highlight))\n display_javascript(Javascript(data=js, lib=JS_LIBS))", "def render(self):\n if self.frame_pos:\n self.pos = [\n self.frame_pos[0] + self.position[0] - (self.size[0] / 2),\n self.frame_pos[1] + self.position[1] - (self.size[1] / 2),\n ]\n if self.variable_text:\n self.image = self.fontA.render(self.text, 1, self.color)", "def generate_playlist_display():\n if not g.ytpls:\n g.message = c.r + \"No playlists found!\"\n return logo(c.g) + \"\\n\\n\"\n g.rprompt = page_msg(g.current_page)\n\n cw = getxy().width\n fmtrow = \"%s%-5s %s %-12s %-8s %-2s%s\\n\"\n fmthd = \"%s%-5s %-{}s %-12s %-9s %-5s%s\\n\".format(cw - 36)\n head = (c.ul, \"Item\", \"Playlist\", \"Author\", \"Updated\", \"Count\", c.w)\n out = \"\\n\" + fmthd % head\n\n for n, x in enumerate(g.ytpls):\n col = (c.g if n % 2 == 0 else c.w)\n length = x.get('size') or \"?\"\n length = \"%4s\" % length\n title = x.get('title') or \"unknown\"\n author = x.get('author') or \"unknown\"\n updated = yt_datetime(x.get('updated'))[1]\n title = uea_pad(cw - 36, title)\n out += (fmtrow % (col, str(n + 1), title, author[:12], updated, str(length), c.w))\n\n return out + \"\\n\" * (5 - len(g.ytpls))", "def renderLadder(ladderHeight, interStep, riser):\n\n\t\t\t\t\t#building the ladder model and the ladder box\n\t\t\t\t\tladderModel = ladder.make_ladder(ladderHeight, interStep, riser)\n\t\t\t\t\twith open(\"lines/ladder.lines\", \"rb\") as ladderFile:\n\t\t\t\t\t\treader = csv.reader(ladderFile, delimiter=\",\")\n\t\t\t\t\t\trow = next(reader)\n\t\t\t\t\t\tladderModel = T([1,2])([float(row[0])*xfactor, float(row[1])*yfactor])(ladderModel)\n\t\t\t\t\tladderBOX = CUBOID([SIZE([1])(ladderModel)[0]/xfactor,SIZE([2])(ladderModel)[0]/yfactor, SIZE([3])(ladderModel)[0]/zfactor])\n\t\t\t\t\tladderBOX = T([1,2])([float(row[0])-SIZE([1])(ladderBOX)[0]/2., float(row[1])-SIZE([2])(ladderBOX)[0]/2.])(ladderBOX)\n\n\t\t\t\t\t#building roof model\n\t\t\t\t\tif isinstance(vertices, basestring):\n\t\t\t\t\t\twith open(\"lines/\" + vertices + \".lines\", \"rb\") as file:\n\t\t\t\t\t\t\treader = csv.reader(file, delimiter=\",\")\n\t\t\t\t\t\t\tnewVertices = []\n\t\t\t\t\t\t\tfor row in reader:\n\t\t\t\t\t\t\t\tnewVertices.append([float(row[0]), float(row[1])])\n\t\t\t\t\tif newVertices:\n\t\t\t\t\t\troofModel = roof.roofBuilder(newVertices, pitchAngle, height)\n\t\t\t\t\telse:\n\t\t\t\t\t\troofModel = roof.roofBuilder(vertices, pitchAngle, height)\n\t\t\t\t\troofModel = T([3])([nStorey*3/zfactor])(roofModel)\n\t\t\t\t\troofModel = S([1,2,3])([xfactor*1.09,yfactor*1.09,zfactor])(roofModel)\n\t\t\t\t\troofModel = T([1,2])([-SIZE([1])(roofModel)[0]*0.05,-SIZE([2])(roofModel)[0]*0.05]) (roofModel)\n\n\t\t\t\t\t#building full house model with windows and doors\n\t\t\t\t\tfullHouse = []\n\t\t\t\t\tfor story in range(nStorey):\n\t\t\t\t\t\thouseModel = house.build_house(story, windowModel, doorModel, ladderBOX)\n\t\t\t\t\t\tfullHouse.append(houseModel)\n\t\t\t\t\t\tfullHouse.append(T([3])([3]))\n\t\t\t\t\tfullHouse = STRUCT(fullHouse)\n\n\t\t\t\t\t#returning the result\n\t\t\t\t\treturn STRUCT([roofModel, ladderModel, fullHouse])", "def displayMelody(self):\r\n print(self.notes)", "def playMelody(melody: str):\n pass", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def info(self):\n txt = \"\"\"Lick Index {s.name}\n wavelength units: {s.wavelength_unit}\n Index Band: {s.band}\n Blue continuum band: {s.blue}\n Red continuum band: {s.red}\n Measurement unit: {s.index_unit}\"\"\".format(s=self)\n print(txt)", "def repr_html(self, ex: np.ndarray) -> str:\n if self.sample_rate:\n rate = self.sample_rate\n else:\n # We should display an error message once to warn the user the sample\n # rate was auto-infered. Requirements:\n # * Should appear only once (even though repr_html is called once per\n # examples)\n # * Ideally should appear on Colab (while `logging.warning` is hidden\n # by default)\n rate = 16000\n\n audio_str = utils.get_base64(\n lambda buff: _save_wav(buff, ex, rate)\n )\n return (\n f'<audio controls src=\"data:audio/ogg;base64,{audio_str}\" '\n ' controlsList=\"nodownload\" />'\n )", "async def music():\n widget = Widget()\n widget.icon = ' MPD '\n\n char_limit = 50\n\n async for song, state in _mpd_listener():\n if len(song) > char_limit:\n song = song[:char_limit - 1] + '…'\n\n if state == 'play':\n widget.icon_color() # Set defaults\n elif state == 'pause':\n widget.icon_color(background='#e7c547')\n else:\n song = ''\n\n widget.text = song\n\n yield widget", "def render(self, rstate):\n pass", "def compileIntro(self):\n out = audio.AudioQuantumList()\n intro = audio.AudioData(self.sample_path + self.template['intro'], sampleRate=44100, numChannels=2, verbose=False)\n \n # First 4 bars of song\n custom_bars = []\n\n if not self.beats or len(self.beats) < 16:\n # Song is not long or identifiable enough\n # Take our best shot at making something\n self.tempo = 60.0 * 16.0 / self.original.duration\n for i in xrange(0, 4):\n bar = []\n for j in xrange(0, 4):\n length = self.original.duration / 16.0\n start = ((i * 4) + j) * length\n bar.append(audio.AudioQuantum(start, length, None, 0, self.original.source))\n custom_bars.append(bar)\n else:\n for i in xrange(0, 4):\n custom_bars.append(self.beats[i*4:(i*4)+4])\n out.extend([x for bar in custom_bars for x in bar])\n\n # First beat of first bar x 4\n for i in xrange(0, 4):\n out.append(custom_bars[0][0])\n \n # First beat of second bar x 4\n for i in xrange(0, 4):\n out.append(custom_bars[1][0])\n\n beatone = custom_bars[2][0]\n beattwo = custom_bars[3][0]\n beatthree = custom_bars[3][2]\n \n # First beat of third bar x 8\n for x in xrange(0, 8):\n out.append(audio.AudioQuantum(beatone.start, beatone.duration/2, None, beatone.confidence, beatone.source))\n\n # First beat of fourth bar x 8\n for x in xrange(0, 8):\n out.append(audio.AudioQuantum(beattwo.start, beattwo.duration/4, None, beattwo.confidence, beattwo.source))\n\n # Third beat of fourth bar x 8\n for x in xrange(0, 8):\n out.append(audio.AudioQuantum(beatthree.start, beatthree.duration/4, None, beatthree.confidence, beatthree.source))\n \n if self.original.analysis.time_signature == 4:\n shifted = self.st.shiftTempo(audio.getpieces(self.original, out), self.template['tempo']/self.tempo)\n else:\n shifted1 = audio.getpieces(self.original, out)\n shifted = self.st.shiftTempo(shifted1, len(shifted1) / ((44100 * 16 * 2 * 60.0)/self.template['tempo']))\n shifted1.unload()\n if shifted.numChannels == 1: \n shifted = self.mono_to_stereo(shifted)\n return self.truncatemix(intro, shifted, self.mixfactor(out))", "def behind_the_music(request, template=\"media/behind_the_music.html\"):\n album = Album.objects.get(pk=3)\n album.images = album.image_set.all()\n album.videos = album.video_set.all()\n\n d = {\"album\": album}\n\n return render(request, template, d)", "def render_power(children):\r\n if len(children) == 1:\r\n return children[0]\r\n\r\n children_latex = [k.latex for k in children if k.latex != \"^\"]\r\n children_latex[-1] = children[-1].sans_parens\r\n\r\n raise_power = lambda x, y: u\"{}^{{{}}}\".format(y, x)\r\n latex = reduce(raise_power, reversed(children_latex))\r\n return LatexRendered(latex, tall=True)", "def render(self, time: float, frame_time: float):\n self.example.render(time, frame_time)", "def main():\n options = docopt(main.__doc__)\n mfile = MusicalCodeFile(options['--file'])\n if not options['--output']:\n mfile.play()\n else:\n mfile.save(options['--output'])\n if options['--output-ly']:\n mfile.save_lilypond(options['--output-ly'])", "def write_loop_text(self):\n\t\tself.write_components['loop'] = \"\"\"loop( ({sets})$({cond}), {loop})\n\t\t\"\"\".format( sets = ', '.join(self.shock_gm.database[self.loop_name].names),\n\t\t\t\t\tcond = self.shock_gm.database.get(self.loop_name).to_str,\n\t\t\t\t\tloop = self.loop_text)\n\t\treturn self.write_components['loop']", "def _latex_(self):\n from sage.misc.latex import latex\n if self.parent()._chart.manifold().options.textbook_output:\n return latex(ExpressionNice(self._express))\n else:\n return latex(self._express)", "def render_html(html_template, slides_src):\n return mako.template.Template(html_template, input_encoding='utf-8', output_encoding='utf-8').render(slides=slides_src)", "def render_dl(self, token: SyntaxTreeNode) -> None:\n node = nodes.definition_list(classes=[\"simple\", \"myst\"])\n self.add_line_and_source_path(node, token)\n with self.current_node_context(node, append=True):\n item = None\n for child in token.children or []:\n if child.type == \"dt\":\n item = nodes.definition_list_item()\n self.add_line_and_source_path(item, child)\n with self.current_node_context(item, append=True):\n term = nodes.term(\n child.children[0].content if child.children else \"\"\n )\n self.add_line_and_source_path(term, child)\n with self.current_node_context(term, append=True):\n self.render_children(child)\n elif child.type == \"dd\":\n if item is None:\n error = self.reporter.error(\n (\n \"Found a definition in a definition list, \"\n \"with no preceding term\"\n ),\n # nodes.literal_block(content, content),\n line=token_line(child),\n )\n self.current_node += [error]\n with self.current_node_context(item):\n definition = nodes.definition()\n self.add_line_and_source_path(definition, child)\n with self.current_node_context(definition, append=True):\n self.render_children(child)\n else:\n error_msg = self.reporter.error(\n (\n \"Expected a term/definition as a child of a definition list\"\n f\", but found a: {child.type}\"\n ),\n # nodes.literal_block(content, content),\n line=token_line(child),\n )\n self.current_node += [error_msg]", "def render_equations(self):\n\n #\n # Helpers\n #\n\n # This is always spawned as a separate process\n def make_images(q):\n eq = None\n while True:\n try:\n (eq, hash) = q.get(timeout=1)\n # create and save image\n dir = MEDIA_ROOT + \"/formulas/\"\n math2png([eq], dir, prefix=hash)\n except Empty:\n return\n\n q = Queue()\n p = Process(target=make_images, args=(q,))\n p.start()\n \n def __replace(m):\n formula = m.group(1)\n\n # hash the formula to make a unique url\n h = hashlib.sha1()\n h.update(formula)\n\n # use hexdigest because digest produces possibly unsafe characters\n hash = h.hexdigest() \n\n # This sends the formula to the other thread to render as an image\n q.put((formula, hash))\n \n # Notice the extra 1 before \".png\" that shows up in the hash for some\n # reason.\n return '<img src=\"%sformulas/%s1.png\" alt=\"%s\" />' \\\n % (MEDIA_URL, hash, formula)\n\n #\n # Endhelpers\n #\n\n svalue = re.sub(\n '\\$\\$(.*?)\\$\\$',\n __replace,\n self.body_display,\n re.DOTALL)\n\n # If you're brave, remove the following line, and the user will get their\n # response without having to wait for the images to render. Probably.\n p.join()\n\n self.body_display = svalue\n self.save()", "def render(self):\r\n super().render()\r\n layers, titles, lat, lon = self.make_layers()\r\n plots = []\r\n for i in range(len(layers)):\r\n p = figure(\r\n tools=self.tools, \r\n toolbar_location=self.toolbarLocation, \r\n plot_width=self.width, \r\n plot_height=self.height,\r\n x_range=(np.min(lon), np.max(lon)),\r\n y_range=(np.min(lat), np.max(lat)),\r\n title=titles[i]\r\n )\r\n p.xaxis.axis_label = self.xlabel\r\n p.yaxis.axis_label = self.ylabel\r\n colorMapper = LinearColorMapper(palette=self.cmap, low=self.vmin, high=self.vmax)\r\n p.image(\r\n image=[layers[i]], \r\n color_mapper=colorMapper, \r\n x=np.min(lon), \r\n y=np.min(lat), \r\n dw=np.max(lon)-np.min(lon), \r\n dh=np.max(lat)-np.min(lat)\r\n )\r\n\r\n p.add_tools(HoverTool(\r\n tooltips=[\r\n ('longitude', '$x'),\r\n ('latitude', '$y'),\r\n (self.variable + self.unit, '@image'),\r\n ],\r\n mode='mouse'\r\n )\r\n )\r\n\r\n colorBar = ColorBar(\r\n color_mapper=colorMapper, \r\n ticker=BasicTicker(),\r\n label_standoff=12, \r\n border_line_color=None, \r\n location=(0,0)\r\n )\r\n\r\n p.add_layout(colorBar, 'right')\r\n plots.append(p)\r\n \r\n \r\n if not inline(): output_file(get_figure_dir() + self.variable + \".html\", title=self.variable) \r\n show(column(plots))", "def mine():\n\n fig = new_slide()\n slide_heading(fig, 'Lesser-maintained parts')\n\n theta = np.linspace(0, 2*np.pi)\n x = np.cos(theta - np.pi/2)\n y = np.sin(theta - np.pi/2)\n z = theta\n\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n markerline, stemlines, baseline = ax.stem(\n x, y, z, linefmt='grey', markerfmt='D', bottom=np.pi)\n markerline.set_markerfacecolor('none')\n\n ax = fig.add_subplot(1, 2, 2)\n ax.axis('off')\n ax.imshow(imread('webagg.png'))\n\n yield fig", "def _lyrics_embed(colour, page: Dict[str, Any], data: Dict[str, Any]) -> discord.Embed:\n title = [\n x.get(\"value\")\n for x in data.get(\"names\")\n if x.get(\"language\") == LANGUAGE_MAP.get(page[\"cultureCode\"])\n ]\n em = discord.Embed(\n title=title[0] if title else data.get(\"defaultName\"),\n colour=colour,\n )\n em.set_thumbnail(url=data.get(\"thumbUrl\") or \"\")\n if data.get(\"id\"):\n em.url = f\"https://vocadb.net/S/{data['id']}\"\n em.description = page[\"value\"][:4090] if page.get(\"value\") else \"No lyrics found.\"\n if page.get(\"url\"):\n em.add_field(\n name=\"Source\",\n value=f\"[{page.get('source') or 'Source'}]({page['url']})\",\n )\n return em", "def embed_matplotlib(self):", "def litho_star(num_lines = 20,\n line_width = 2,\n diameter = 200,\n layer = 0):\n D = Device('litho_star')\n\n degree = 180 / num_lines\n R1 = rectangle(size = (line_width, diameter), layer = layer)\n for i in range(num_lines):\n r1 = D.add_ref(R1).rotate(degree * i)\n r1.center = (0,0)\n\n return(D)", "def _render(self):\n self._renderer.render_menu()\n pg.display.update()", "def graphing2():\n return render_template('graph2.html')", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def show(data, types=(\"inflated\", ), recache=False, cmap='RdBu_r', layout=None,\n autoclose=None, open_browser=None, port=None, pickerfun=None,\n template=\"mixer.html\", overlays_available=None,\n overlays_visible=('rois', 'sulci'), labels_visible=('rois', ),\n overlay_file=None, title='Brain', **kwargs):\n\n # populate default webshow args\n if autoclose is None:\n autoclose = options.config.get('webshow', 'autoclose', fallback='true') == 'true'\n if open_browser is None:\n open_browser = options.config.get('webshow', 'open_browser', fallback='true') == 'true'\n\n data = dataset.normalize(data)\n if not isinstance(data, dataset.Dataset):\n data = dataset.Dataset(data=data)\n\n html = FallbackLoader([os.path.split(os.path.abspath(template))[0], serve.cwd]).load(template)\n db.auxfile = data\n\n #Extract the list of stimuli, for special-casing\n stims = dict()\n for name, view in data:\n if 'stim' in view.attrs and os.path.exists(view.attrs['stim']):\n sname = os.path.split(view.attrs['stim'])[1]\n stims[sname] = view.attrs['stim']\n\n package = Package(data)\n metadata = json.dumps(package.metadata())\n images = package.images\n subjects = list(package.subjects)\n\n ctmargs = dict(method='mg2', level=9, recache=recache,\n external_svg=overlay_file, overlays_available=overlays_available)\n ctms = dict((subj, utils.get_ctmpack(subj, types, **ctmargs))\n for subj in subjects)\n package.reorder(ctms)\n\n subjectjs = json.dumps(dict((subj, \"ctm/%s/\"%subj) for subj in subjects))\n db.auxfile = None\n\n if layout is None:\n layout = [None, (1, 1), (2, 1), (3, 1), (2, 2), (3, 2), (3, 2), (3, 3), (3, 3), (3, 3)][len(subjects)]\n\n linear = lambda x, y, m: (1.-m)*x + m*y\n mixes = dict(\n linear=linear,\n smoothstep=(lambda x, y, m: linear(x, y, 3*m**2 - 2*m**3)),\n smootherstep=(lambda x, y, m: linear(x, y, 6*m**5 - 15*m**4 + 10*m**3))\n )\n\n post_name = Queue()\n\n # Put together all view options\n my_viewopts = dict(options.config.items('webgl_viewopts'))\n my_viewopts['overlays_visible'] = overlays_visible\n my_viewopts['labels_visible'] = labels_visible\n my_viewopts['brightness'] = options.config.get('curvature', 'brightness')\n my_viewopts['smoothness'] = options.config.get('curvature', 'webgl_smooth')\n my_viewopts['contrast'] = options.config.get('curvature', 'contrast')\n\n for sec in options.config.sections():\n if 'paths' in sec or 'labels' in sec:\n my_viewopts[sec] = dict(options.config.items(sec))\n\n if pickerfun is None:\n pickerfun = lambda a, b: None\n\n class CTMHandler(web.RequestHandler):\n def get(self, path):\n subj, path = path.split('/')\n if path == '':\n self.set_header(\"Content-Type\", \"application/json\")\n self.write(open(ctms[subj]).read())\n else:\n fpath = os.path.split(ctms[subj])[0]\n mtype = mimetypes.guess_type(os.path.join(fpath, path))[0]\n if mtype is None:\n mtype = \"application/octet-stream\"\n self.set_header(\"Content-Type\", mtype)\n self.write(open(os.path.join(fpath, path), 'rb').read())\n\n class DataHandler(web.RequestHandler):\n def get(self, path):\n path = path.strip(\"/\")\n try:\n dataname, frame = path.split('/')\n except ValueError:\n dataname = path\n frame = 0\n\n if dataname in images:\n dataimg = images[dataname][int(frame)]\n if dataimg[1:6] == \"NUMPY\":\n self.set_header(\"Content-Type\", \"application/octet-stream\")\n else:\n self.set_header(\"Content-Type\", \"image/png\")\n\n if 'Range' in self.request.headers:\n self.set_status(206)\n rangestr = self.request.headers['Range'].split('=')[1]\n start, end = [ int(i) if len(i) > 0 else None for i in rangestr.split('-') ]\n\n clenheader = 'bytes %s-%s/%s' % (start, end or len(dataimg), len(dataimg) )\n self.set_header('Content-Range', clenheader)\n self.set_header('Content-Length', end-start+1)\n self.write(dataimg[start:end+1])\n else:\n self.write(dataimg)\n else:\n self.set_status(404)\n self.write_error(404)\n\n class StimHandler(web.StaticFileHandler):\n def initialize(self):\n pass\n\n def get(self, path):\n if path not in stims:\n self.set_status(404)\n self.write_error(404)\n else:\n self.root, fname = os.path.split(stims[path])\n super(StimHandler, self).get(fname)\n\n class StaticHandler(web.StaticFileHandler):\n def initialize(self):\n self.root = ''\n\n class MixerHandler(web.RequestHandler):\n def get(self):\n self.set_header(\"Content-Type\", \"text/html\")\n generated = html.generate(data=metadata,\n colormaps=colormaps,\n default_cmap=cmap,\n python_interface=True,\n leapmotion=True,\n layout=layout,\n subjects=subjectjs,\n viewopts=json.dumps(my_viewopts),\n title=title,\n **kwargs)\n #overlays_visible=json.dumps(overlays_visible),\n #labels_visible=json.dumps(labels_visible),\n #**viewopts)\n self.write(generated)\n\n def post(self):\n data = self.get_argument(\"svg\", default=None)\n png = self.get_argument(\"png\", default=None)\n with open(post_name.get(), \"wb\") as svgfile:\n if png is not None:\n data = png[22:].strip()\n try:\n data = binascii.a2b_base64(data)\n except:\n print(\"Error writing image!\")\n data = png\n svgfile.write(data)\n\n class JSMixer(serve.JSProxy):\n @property\n def view_props(self):\n \"\"\"An enumerated list of settable properties for views. \n There may be a way to get this from the javascript object, \n but I (ML) don't know how.\n\n There may be additional properties we want to set in views\n and animations; those must be added here.\n\n Old property list that used to be settable before webgl refactor:\n view_props = ['altitude', 'azimuth', 'target', 'mix', 'radius', 'pivot',\n 'visL', 'visR', 'alpha', 'rotationR', 'rotationL', 'projection',\n 'volume_vis', 'frame', 'slices']\n \"\"\"\n camera = getattr(self.ui, \"camera\")\n _camera_props = ['camera.%s' % k for k in camera._controls.attrs.keys()]\n surface = getattr(self.ui, \"surface\")\n _subject = list(surface._folders.attrs.keys())[0]\n _surface = getattr(surface, _subject)\n _surface_props = ['surface.{subject}.%s'%k for k in _surface._controls.attrs.keys()]\n _curvature_props = ['surface.{subject}.curvature.brightness',\n 'surface.{subject}.curvature.contrast',\n 'surface.{subject}.curvature.smoothness']\n return _camera_props + _surface_props + _curvature_props\n\n def _set_view(self, **kwargs):\n \"\"\"Low-level command: sets view parameters in the current viewer\n\n Sets each the state of each keyword argument provided. View parameters\n that can be set include all parameters in the data.gui in the html view.\n\n \"\"\"\n # Set unfolding level first, as it interacts with other arguments\n surface = getattr(self.ui, \"surface\")\n subject_list = surface._folders.attrs.keys()\n # Better to only self.view_props once; it interacts with javascript, \n # don't want to do that too often, it leads to glitches.\n vw_props = copy.copy(self.view_props)\n for subject in subject_list:\n if 'surface.{subject}.unfold' in kwargs:\n unfold = kwargs.pop('surface.{subject}.unfold')\n self.ui.set('surface.{subject}.unfold'.format(subject=subject), unfold)\n for k, v in kwargs.items():\n if not k in vw_props:\n print('Unknown parameter %s!'%k)\n continue\n else:\n self.ui.set(k.format(subject=subject) if '{subject}' in k else k, v)\n # Wait for webgl. Wait for it. .... WAAAAAIIIT.\n time.sleep(0.03)\n\n def _capture_view(self, frame_time=None):\n \"\"\"Low-level command: returns a dict of current view parameters\n\n Retrieves the following view parameters from current viewer:\n\n altitude, azimuth, target, mix, radius, visL, visR, alpha,\n rotationR, rotationL, projection, pivot\n\n Parameters\n ----------\n frame_time : scalar\n time (in seconds) to specify for this frame.\n \n Notes\n -----\n If multiple subjects are present, only retrieves view for first subject.\n \"\"\"\n view = {}\n subject = list(self.ui.surface._folders.attrs.keys())[0]\n for p in self.view_props:\n try:\n view[p] = self.ui.get(p.format(subject=subject) if '{subject}' in p else p)[0]\n # Wait for webgl.\n time.sleep(0.03)\n except Exception as err:\n # TO DO: Fix this hack with an error class in serve.py & catch it here\n print(err) #msg = \"Cannot read property 'undefined'\"\n #if err.message[:len(msg)] != msg:\n # raise err\n if frame_time is not None:\n view['time'] = frame_time\n return view\n\n def save_view(self, subject, name, is_overwrite=False):\n \"\"\"Saves current view parameters to pycortex database\n\n Parameters\n ----------\n subject : string\n pycortex subject id\n name : string\n name for view to store\n is_overwrite: bool\n whether to overwrite an extant view (default : False)\n\n Notes\n -----\n Equivalent to call to cortex.db.save_view(subject, vw, name)\n For a list of the view parameters saved, see viewer._capture_view\n \"\"\"\n db.save_view(self, subject, name, is_overwrite)\n\n def get_view(self, subject, name):\n \"\"\"Get saved view from pycortex database.\n\n Retrieves named view from pycortex database and sets current\n viewer parameters to retrieved values.\n\n Parameters\n ----------\n subject : string\n pycortex subject ID\n name : string\n name of saved view to re-load\n\n Notes\n -----\n Equivalent to call to cortex.db.get_view(subject, vw, name)\n For a list of the view parameters set, see viewer._capture_view\n \"\"\"\n view = db.get_view(self, subject, name)\n\n def addData(self, **kwargs):\n Proxy = serve.JSProxy(self.send, \"window.viewers.addData\")\n new_meta, new_ims = _convert_dataset(Dataset(**kwargs), path='/data/', fmt='%s_%d.png')\n metadata.update(new_meta)\n images.update(new_ims)\n return Proxy(metadata)\n\n def getImage(self, filename, size=(1920, 1080)):\n \"\"\"Saves currently displayed view to a .png image file\n\n Parameters\n ----------\n filename : string\n duh.\n size : tuple (x, y)\n size (in pixels) of image to save.\n \"\"\"\n post_name.put(filename)\n Proxy = serve.JSProxy(self.send, \"window.viewer.getImage\")\n return Proxy(size[0], size[1], \"mixer.html\")\n\n def makeMovie(self, animation, filename=\"brainmovie%07d.png\", offset=0,\n fps=30, size=(1920, 1080), interpolation=\"linear\"):\n \"\"\"Renders movie frames for animation of mesh movement\n\n Makes an animation (for example, a transition between inflated and\n flattened brain or a rotating brain) of a cortical surface. Takes a\n list of dictionaries (`animation`) as input, and uses the values in\n the dictionaries as keyframes for the animation.\n\n Mesh display parameters that can be animated include 'elevation',\n 'azimuth', 'mix', 'radius', 'target' (more?)\n\n\n Parameters\n ----------\n animation : list of dicts\n Each dict should have keys `idx`, `state`, and `value`.\n `idx` is the time (in seconds) at which you want to set `state` to `value`\n `state` is the parameter to animate (e.g. 'altitude', 'azimuth')\n `value` is the value to set for `state`\n filename : string path name\n Must contain '%d' (or some variant thereof) to account for frame\n number, e.g. '/some/directory/brainmovie%07d.png'\n offset : int\n Frame number for first frame rendered. Useful for concatenating\n animations.\n fps : int\n Frame rate of resultant movie\n size : tuple (x, y)\n Size (in pixels) of resulting movie\n interpolation : {\"linear\", \"smoothstep\", \"smootherstep\"}\n Interpolation method for values between keyframes.\n\n Example\n -------\n # Called after a call of the form: js_handle = cortex.webgl.show(DataViewObject)\n # Start with left hemisphere view\n js_handle._setView(azimuth=[90], altitude=[90.5], mix=[0])\n # Initialize list\n animation = []\n # Append 5 key frames for a simple rotation\n for az, idx in zip([90, 180, 270, 360, 450], [0, .5, 1.0, 1.5, 2.0]):\n animation.append({'state':'azimuth', 'idx':idx, 'value':[az]})\n # Animate! (use default settings)\n js_handle.makeMovie(animation)\n \"\"\"\n # build up two variables: State and Anim.\n # state is a dict of all values being modified at any time\n state = dict()\n # anim is a list of transitions between keyframes\n anim = []\n setfunc = self.ui.set\n for f in sorted(animation, key=lambda x:x['idx']):\n if f['idx'] == 0:\n setfunc(f['state'], f['value'])\n state[f['state']] = dict(idx=f['idx'], val=f['value'])\n else:\n if f['state'] not in state:\n state[f['state']] = dict(idx=0, val=self.getState(f['state'])[0])\n start = dict(idx=state[f['state']]['idx'],\n state=f['state'],\n value=state[f['state']]['val'])\n end = dict(idx=f['idx'], state=f['state'], value=f['value'])\n state[f['state']]['idx'] = f['idx']\n state[f['state']]['val'] = f['value']\n if start['value'] != end['value']:\n anim.append((start, end))\n\n for i, sec in enumerate(np.arange(0, anim[-1][1]['idx']+1./fps, 1./fps)):\n for start, end in anim:\n if start['idx'] < sec <= end['idx']:\n idx = (sec - start['idx']) / float(end['idx'] - start['idx'])\n if start['state'] == 'frame':\n func = mixes['linear']\n else:\n func = mixes[interpolation]\n\n val = func(np.array(start['value']), np.array(end['value']), idx)\n if isinstance(val, np.ndarray):\n setfunc(start['state'], val.ravel().tolist())\n else:\n setfunc(start['state'], val)\n self.getImage(filename%(i+offset), size=size)\n\n def _get_anim_seq(self, keyframes, fps=30, interpolation='linear'):\n \"\"\"Convert a list of keyframes to a list of EVERY frame in an animation.\n\n Utility function called by make_movie; separated out so that individual\n frames of an animation can be re-rendered, or for more control over the\n animation process in general.\n\n \"\"\"\n # Misc. setup\n fr = 0\n a = np.array\n func = mixes[interpolation]\n #skip_props = ['surface.{subject}.right', 'surface.{subject}.left', ] #'projection',\n # Get keyframes\n keyframes = sorted(keyframes, key=lambda x:x['time'])\n # Normalize all time to frame rate\n fs = 1./fps\n for k in range(len(keyframes)):\n t = keyframes[k]['time']\n t = np.round(t/fs)*fs\n keyframes[k]['time'] = t\n allframes = []\n for start, end in zip(keyframes[:-1], keyframes[1:]):\n t0 = start['time']\n t1 = end['time']\n tdif = float(t1-t0)\n # Check whether to continue frame sequence to endpoint\n use_endpoint = keyframes[-1]==end\n nvalues = np.round(tdif/fs).astype(int)\n if use_endpoint:\n nvalues += 1\n fr_time = np.linspace(0, 1, nvalues, endpoint=use_endpoint)\n # Interpolate between values\n for t in fr_time:\n frame = {}\n for prop in start.keys():\n if prop=='time':\n continue\n if (start[prop] is None) or (start[prop] == end[prop]) or isinstance(start[prop], (bool, str)):\n frame[prop] = start[prop]\n continue\n val = func(a(start[prop]), a(end[prop]), t)\n if isinstance(val, np.ndarray):\n frame[prop] = val.tolist()\n else:\n frame[prop] = val\n allframes.append(frame)\n return allframes\n\n def make_movie_views(self, animation, filename=\"brainmovie%07d.png\", \n offset=0, fps=30, size=(1920, 1080), alpha=1, frame_sleep=0.05,\n frame_start=0, interpolation=\"linear\"):\n \"\"\"Renders movie frames for animation of mesh movement\n\n Makes an animation (for example, a transition between inflated and\n flattened brain or a rotating brain) of a cortical surface. Takes a\n list of dictionaries (`animation`) as input, and uses the values in\n the dictionaries as keyframes for the animation.\n\n Mesh display parameters that can be animated include 'elevation',\n 'azimuth', 'mix', 'radius', 'target' (more?)\n\n\n Parameters\n ----------\n animation : list of dicts\n This is a list of keyframes for the animation. Each keyframe should be\n a dict in the form captured by the ._capture_view method. NOTE: every\n view must include all view parameters. Additionally, there should be\n one extra key/value pair for \"time\". The value for time should be\n in seconds. The list of keyframes is sorted by time before applying,\n so they need not be in order in the input.\n filename : string path name\n Must contain '%d' (or some variant thereof) to account for frame\n number, e.g. '/some/directory/brainmovie%07d.png'\n offset : int\n Frame number for first frame rendered. Useful for concatenating\n animations.\n fps : int\n Frame rate of resultant movie\n size : tuple (x, y)\n Size (in pixels) of resulting movie\n interpolation : {\"linear\", \"smoothstep\", \"smootherstep\"}\n Interpolation method for values between keyframes.\n\n Notes\n -----\n Make sure that all values that will be modified over the course\n of the animation are initialized (have some starting value) in the first\n frame.\n\n Example\n -------\n # Called after a call of the form: js_handle = cortex.webgl.show(DataViewObject)\n # Start with left hemisphere view\n js_handle._setView(azimuth=[90], altitude=[90.5], mix=[0])\n # Initialize list\n animation = []\n # Append 5 key frames for a simple rotation\n for az, t in zip([90, 180, 270, 360, 450], [0, .5, 1.0, 1.5, 2.0]):\n animation.append({'time':t, 'azimuth':[az]})\n # Animate! (use default settings)\n js_handle.make_movie(animation)\n \"\"\"\n allframes = self._get_anim_seq(animation, fps, interpolation)\n for fr, frame in enumerate(allframes[frame_start:], frame_start):\n self._set_view(**frame)\n time.sleep(frame_sleep)\n self.getImage(filename%(fr+offset+1), size=size)\n time.sleep(frame_sleep)\n\n class PickerHandler(web.RequestHandler):\n def get(self):\n pickerfun(int(self.get_argument(\"voxel\")), int(self.get_argument(\"vertex\")))\n\n class WebApp(serve.WebApp):\n disconnect_on_close = autoclose\n def get_client(self):\n self.connect.wait()\n self.connect.clear()\n return JSMixer(self.send, \"window.viewer\")\n\n def get_local_client(self):\n return JSMixer(self.srvsend, \"window.viewer\")\n\n if port is None:\n port = random.randint(1024, 65536)\n\n server = WebApp([(r'/ctm/(.*)', CTMHandler),\n (r'/data/(.*)', DataHandler),\n (r'/stim/(.*)', StimHandler),\n (r'/mixer.html', MixerHandler),\n (r'/picker', PickerHandler),\n (r'/', MixerHandler),\n (r'/static/(.*)', StaticHandler)],\n port)\n\n server.start()\n print(\"Started server on port %d\"%server.port)\n url = \"http://%s%s:%d/mixer.html\"%(serve.hostname, domain_name, server.port)\n if open_browser:\n webbrowser.open(url)\n client = server.get_client()\n client.server = server\n return client\n else:\n try:\n from IPython.display import display, HTML\n display(HTML('Open viewer: <a href=\"{0}\" target=\"_blank\">{0}</a>'.format(url)))\n except:\n pass\n return server", "def test_custom_decorator_displaytex_ok(self):\n\n tex = \"\\left.\\frac{x^3}{3}\\right|_0^1\" # noqa: W605\n\n self.assertEqual(\n DOM.render(\n DOM.create_element(\n ashley_render_children,\n {\n \"block\": {\n \"key\": \"a215p\",\n \"text\": \"\",\n \"type\": \"atomic\",\n \"data\": {\"tex\": tex, \"type\": \"TEXBLOCK\"},\n }\n },\n )\n ),\n f'<span class=\"ashley-latex-display\">{tex}</span>',\n )", "def render(self, n: int):\n if int(n) == 1:\n self.layout.children = [self.figures[0]]\n elif int(n) == 2:\n self.layout.children = [self.figures[0], self.figures[1]]\n elif int(n) == 3:\n self.layout.children = [\n self.figures[0],\n self.figures[1],\n self.figures[2],\n ]", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def bar_to_lilypond_duration(durations):\n size = len(durations)\n durations = [d for d in durations if d is not None]\n # Convert to lilypond note durations\n def is_pow2(n):\n return ((n & (n-1)) == 0)\n def compute_lp_duration(d,s):\n # If it's a power of 2\n if is_pow2(d):\n return str(int(size/d))\n # If it's a multiple of 3\n if d%3 == 0:\n if is_pow2(int(d/3)):\n return str(int(size/(d/3*2)))+\".\"\n # Otherwise, it's a tied note. Split into factors.\n # Test all possible splittings\n for i in range(1,int(d/2)+1):\n d1 = compute_lp_duration(d-i,s)\n d2 = compute_lp_duration(i,s)\n if d1 is None or d2 is None:\n continue\n if type(d1) is not list:\n d1 = [d1]\n if type(d2) is not list:\n d2 = [d2]\n return d1+d2\n return None\n lp_durations = [compute_lp_duration(d,size) for d in durations]\n return lp_durations", "def bullet_list(self, on, **kw):\n tag = 'ul'\n if on:\n tagstr = self._open(tag, newline=1, **kw)\n else:\n tagstr = self._close(tag, newline=1)\n return tagstr", "def markov_story():\n return render_template(\"markovstory.html\")", "def __init__(self, camera=None, light=None, font=None, string=None,\r\n x=0.0, y=0.0, z=1.0,\r\n sx=DEFAULT_FONT_SCALE, sy=DEFAULT_FONT_SCALE,\r\n is_3d=True, size=DEFAULT_FONT_SIZE,\r\n rx=0.0, ry=0.0, rz=0.0, justify=\"C\"):\r\n if not is_3d:\r\n sy = sx = size * 4.0\r\n super(String, self).__init__(camera, light, \"\", x, y, z,\r\n rx, ry, rz, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0)\r\n\r\n if VERBOSE:\r\n print(\"Creating string ...\")\r\n\r\n self.verts = []\r\n self.texcoords = []\r\n self.norms = []\r\n self.inds = []\r\n temp_verts = []\r\n\r\n xoff = 0.0\r\n yoff = 0.0\r\n lines = 0\r\n if not isinstance(string, text_type):\r\n string = string.decode('utf-8')\r\n nlines = string.count(\"\\n\") + 1\r\n\r\n def make_verts(): #local function to justify each line\r\n if justify.upper() == \"C\":\r\n cx = xoff / 2.0\r\n elif justify.upper() == \"L\":\r\n cx = 0.0\r\n else:\r\n cx = xoff\r\n for j in temp_verts:\r\n self.verts.append([(j[0] - cx) * sx,\r\n (j[1] + nlines * font.height * GAP / 2.0 - yoff) * sy,\r\n j[2]])\r\n\r\n default = font.glyph_table.get(unichr(0), None)\r\n for i, c in enumerate(string):\r\n if c == '\\n':\r\n make_verts()\r\n yoff += font.height * GAP\r\n xoff = 0.0\r\n temp_verts = []\r\n lines += 1\r\n continue #don't attempt to draw this character!\r\n\r\n glyph = font.glyph_table.get(c, default)\r\n if not glyph:\r\n continue\r\n w, h, texc, verts = glyph\r\n for j in verts:\r\n temp_verts.append((j[0]+xoff, j[1], j[2]))\r\n xoff += w\r\n for j in texc:\r\n self.texcoords.append(j)\r\n self.norms.extend(_NORMALS)\r\n\r\n # Take Into account unprinted \\n characters\r\n stv = 4 * (i - lines)\r\n self.inds.extend([[stv, stv + 2, stv + 1], [stv, stv + 3, stv + 2]])\r\n\r\n make_verts()\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))\r\n self.buf[0].textures = [font]\r\n self.buf[0].unib[1] = -1.0", "def unrendered(self) -> str:", "def make_a_sound(): # document string\n print('quack')", "def dual_bullet():\n text = pygame.font.SysFont('arial', 10).render(\"x2\", True, (0, 0, 0))\n image = pygame.Surface((20, 20))\n image.fill(colour.SILVER)\n image.blit(text, (9, 6))\n pygame.draw.rect(image, colour.RED, (2, 4, 2, 12))\n pygame.draw.rect(image, colour.RED, (6, 4, 2, 12))\n return image", "def render(self, vertex_highlighting=False):\n pass", "def draw(self):\n rendered_string = \"\"\n\n # extract the wire labels as strings and get their maximum length\n wire_names = []\n padding = 0\n for i in range(self.full_representation_grid.num_wires):\n wire_name = str(self.active_wires.labels[i])\n padding = max(padding, len(wire_name))\n wire_names.append(wire_name)\n\n for i in range(self.full_representation_grid.num_wires):\n # format wire name nicely\n wire = self.full_representation_grid.wire(i)\n s = \" {:>\" + str(padding) + \"}: {}\"\n\n rendered_string += s.format(wire_names[i], 2 * self.charset.WIRE)\n\n for s in wire:\n rendered_string += s\n\n rendered_string += \"\\n\"\n\n for symbol, cache in [\n (\"U\", self.representation_resolver.unitary_matrix_cache),\n (\"H\", self.representation_resolver.hermitian_matrix_cache),\n (\"M\", self.representation_resolver.matrix_cache),\n ]:\n for idx, matrix in enumerate(cache):\n rendered_string += \"{}{} =\\n{}\\n\".format(symbol, idx, matrix)\n\n return rendered_string" ]
[ "0.62011886", "0.60662764", "0.5725134", "0.5563053", "0.5401194", "0.52068543", "0.51799726", "0.51762205", "0.5175516", "0.5059088", "0.4895712", "0.4883887", "0.48677018", "0.4829131", "0.47872004", "0.4744463", "0.47235727", "0.4710764", "0.47090602", "0.4685067", "0.46768898", "0.46594623", "0.46565187", "0.46457106", "0.46297425", "0.46215582", "0.46142733", "0.46125826", "0.45717314", "0.45708984", "0.45681348", "0.45562664", "0.45557863", "0.45492578", "0.45474437", "0.45463955", "0.45441613", "0.45385104", "0.45358238", "0.4522162", "0.45144373", "0.4509671", "0.45066777", "0.4496718", "0.4493671", "0.4489353", "0.4456605", "0.44555202", "0.44542745", "0.4433842", "0.44321206", "0.44311145", "0.44279268", "0.44236612", "0.4403171", "0.44018412", "0.43935972", "0.4381242", "0.4372178", "0.43615687", "0.43615687", "0.43586814", "0.4358441", "0.4356635", "0.43483162", "0.4344174", "0.43417203", "0.43384272", "0.43278837", "0.43258977", "0.43190396", "0.43141198", "0.43001202", "0.4296311", "0.42907694", "0.42898703", "0.42852262", "0.4284557", "0.428206", "0.42791796", "0.42789868", "0.42780876", "0.42780876", "0.42780876", "0.42780876", "0.42780876", "0.42780876", "0.42775333", "0.42745999", "0.42729855", "0.42726487", "0.42693782", "0.42686448", "0.42682552", "0.42670473", "0.42618662", "0.4250752", "0.42500603", "0.4246816", "0.42403805" ]
0.7580346
0
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
def plot_confusion_matrix(self, y_test, y_pred, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): target_names = ['Thông thường', 'Đầu cơ'] cm = confusion_matrix(y_test, y_pred) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=0) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # print("Normalized confusion matrix") else: 1 # print('Confusion matrix, without normalization') # print(cm) thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_confusion_matrix(cm, classes=[0,1], normalize=False, title='Confusion matrix', print_matrix=False):\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n if print_matrix:\n print(cm)", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',saveas='cm', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n\n plt.figure() \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n foo_fig = plt.gcf() # 'get current figure'\n# foo_fig.savefig('confusion_matrix.eps', format='eps', dpi=1000) \n foo_fig.savefig(saveas, dpi=1000, bbox_inches='tight')\n plt.show()", "def plot_confusion_matrix(cm, y_test, y_pred, class_names,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('\\n')\n print(\"Normalized confusion matrix\")\n else:\n print('\\n')\n print('Confusion matrix, without normalization')\n print_cm(cm, class_names)\n text_labels = [['True Negative', 'False Positive'],\n ['False Negative', 'True Positive']]\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i - 0.1, format(cm[i, j], fmt),\n verticalalignment='bottom',\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.text(j, i + 0.1, text_labels[i][j],\n verticalalignment='top',\n horizontalalignment=\"center\",\n fontsize=12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n # Print accuracy and precision\n print('Accuracy: ', accuracy_score(y_test, y_pred, normalize=True))\n print('Precision: ', precision_score(y_test, y_pred, average='macro'))\n print('Roc-Auc: ', roc_auc_score(y_test, y_pred))\n # Plot non-normalized confusion matrix", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n #cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = cm.astype('float') / np.sum(cm.ravel())\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig=plt.figure\n plt.imshow(cm, interpolation='nearest', cmap=cmap )\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return fig", "def plot_confusion_matrix(\n y_true, y_pred, classes, normalize=True, title=\"Confusion matrix\", cmap=plt.cm.Blues\n):\n cm = confusion_matrix(y_true, y_pred)\n\n if normalize:\n cm = cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(\"Confusion matrix, without normalization\")\n\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \".2f\" if normalize else \"d\"\n thresh = cm.max() / 2.0\n for i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n #else:\n\n #print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n# plt.text(j, i, format(cm[i, j], fmt),\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n cm = confusion_matrix(y_test,predictions)\n plt.figure()\n plot_confusion_matrix(cm, classes=[0,1,2], normalize=True,\n title='Confusion Matrix')", "def plot_confusion_matrix(y_test, y_pred, classes,\n normalize=True,\n title='Average accuracy \\n',\n cmap=plt.cm.Blues, verbose = 0, precision = 0):\n from sklearn.metrics import confusion_matrix\n import itertools\n \n cm = confusion_matrix(y_test, y_pred)\n accuracy = (np.sum(np.diag(cm)) / np.sum(cm)) * 100.0\n\n if normalize:\n cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]) * 100.0\n if verbose == 1:\n print(\"Normalized confusion matrix\")\n else:\n if verbose == 1:\n print('Confusion matrix, without normalization')\n \n if verbose == 1:\n print(cm)\n\n plt.figure(figsize=(18, 9))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.format_map({'acc':accuracy}), fontsize=25)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45, fontsize=20)\n plt.yticks(tick_marks, classes, fontsize=20)\n\n fmt = '{:.'+ '%d'%(precision) +'f} %' if normalize else '{:d}'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, fmt.format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\", fontsize=16)\n plt.tight_layout()\n plt.ylabel('True label', fontsize=20)\n plt.xlabel('Predicted label', fontsize=20)", "def plot_confusion_matrix(self, cm, classes, normalize, cmap=plt.cm.Blues, title='confusin Matrix'):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n tick_marks = np.arange(len(classes))\r\n\r\n self.subplt.set_xlabel(\"Predicted label\")\r\n self.subplt.set_ylabel(\"True Label\")\r\n self.subplt.set_title(\"Confusion Matrix\")\r\n self.subplt.set_xticks(tick_marks,classes)\r\n self.subplt.set_yticks(tick_marks,classes)\r\n\r\n self.canvas2.show()", "def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n \n plt.title(title)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.colorbar()\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, \"%.2f\" % cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment='center',\n color='white' if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n #based on http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n cmap=plt.cm.Blues\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n np.set_printoptions(precision=2)\n \n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%1.2f' % cm[i, j],\n horizontalalignment=\"center\",\n fontsize =12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n #plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes)) \n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.axis('auto')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n print('Confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(confusion_matrix, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n confusion_matrix = confusion_matrix.astype(\n 'float') / confusion_matrix.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(confusion_matrix)\n\n thresh = confusion_matrix.max() / 2.\n for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):\n plt.text(j, i, confusion_matrix[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if confusion_matrix[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n # print(cm)\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n confusion_matrix_dir = './confusion_matrix_plots'\n if not os.path.exists(confusion_matrix_dir):\n os.mkdir(confusion_matrix_dir)\n\n plt.cla()\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"#BFD1D4\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n if normalize:\n plt.savefig(os.path.join(confusion_matrix_dir, 'normalized.jpg'))\n else:\n plt.savefig(os.path.join(confusion_matrix_dir, 'without_normalization.jpg'))", "def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.figure(figsize=(10,10))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = np.around(cm, decimals=2)\n cm[np.isnan(cm)] = 0.0\n print(\"Normalized confusion matrix\")\n\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n t = \"(%.2f)\"%(cm[i, j])\n #print t\n# plt.text(j, i, t,\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('IOB-Confusion-Matrix-SVM.png')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n # 1. find out how many samples per class have received their correct label\n # 计算真正类别为k的样本被预测成各个类别的比例\n # e.g. 有25个样本的 true label 是 6,其中10个样本被预测为类别7,那么在混淆矩阵中 true label = 6 并且 predicted label = 7 的一个格子中的值为 0.4\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n # 2. get the precision (fraction of class-k predictions that have ground truth label k)\n # 计算预测的准确率\n # e.g. 预测为类别k的有12个,但其中只有9个的真正类别是k,那么准确率为 0.75\n # cm = cm.astype('float') / cm.sum(axis=0)[:, np.newaxis]\n \n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n # tick_marks = np.arange(len(classes))\n # plt.xticks(tick_marks, classes, rotation=45)\n # plt.yticks(tick_marks, classes)\n\n # fmt = '.2f' if normalize else 'd'\n # thresh = cm.max() / 2.\n # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')", "def plotConfusionMatrix(self, cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig('confusion_matrix.png')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion Matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion Matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, weight='bold')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if i == 0:\n plt.text(j-0.1, i+0.3, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n if i == 1:\n plt.text(j-0.1, i-0.2, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True Label', weight='bold')\n plt.xlabel('Predicted Label', weight='bold')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n #pdb.set_trace()\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def sam_plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n plots_dims = itertools.product(list(range(cm.shape[0])),\n list(range(cm.shape[1])))\n for i, j in plots_dims:\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \n print(a)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0.0, vmax=1.0)\n\n plt.title(title)\n\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.3f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n # plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n # Tweak spacing to prevent clipping of tick-labels\n plt.subplots_adjust(bottom=0.2)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.xlabel('Predicted label') \n plt.ylabel('True label') \n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label') \n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Purples):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n # plt.grid('off')\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=True,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n return plt.gcf()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title + \"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(title + ' confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=None):\n if normalize:\n # cm = cm.T\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # cm = cm.T\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure(figsize=(4, 4))\n plt.imshow(cm, interpolation='nearest', cmap=cmap or plt.cm.Blues)\n plt.title(('Normalized ' if normalize else '') + title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(list(range(cm.shape[0])), list(range(cm.shape[1]))):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n else:\n 1#print('Confusion matrix, without normalization')\n\n #print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, fontsize=14)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title,fontsize=20)\n# plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, fontsize=15)\n plt.yticks(tick_marks, classes,rotation=30,fontsize=15)\n\n fmt = '.2f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=20)\n\n plt.tight_layout()\n plt.ylabel('True label',fontsize=20)\n plt.xlabel('Predicted label',fontsize=20)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Greens):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%.02f'%cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"red\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.split('/')[-1])\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n if title:\n plt.savefig(title+'.png')\n\n plt.close()", "def plot_confusion_matrix(cm, classes=[],\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.figure()\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.1f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.savefig('Logistik.png')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues, file_name='cm_plot'):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.size\"] = FONT_SIZE\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n fmt = '.6f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label', fontsize=FONT_SIZE)\n plt.xlabel('Predicted label', fontsize=FONT_SIZE)\n plt.subplots_adjust(bottom=0.13)\n with PdfPages(file_name) as pdf:\n pdf.savefig()\n plt.close()", "def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes=None, normalize=False,\n title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if classes:\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()", "def plot_confusion_matrix(cm,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n# print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots(figsize=(8, 8))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes)\n ax.set_title(title,size = 20)\n ax.set_ylabel('True label',size = 20)\n ax.set_xlabel('Predicted label',size = 20)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\",size = 18)\n plt.setp(ax.get_yticklabels(),size = 18)\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n name = OUTFOLDER + \"/confusion_matrix_batch%d_layers%d_epochs%d_f1%d\" % (BATCH_SIZE,LAYERS,EPOCHS,f1_mean_test*100)\n if normalize:\n name = name + \"_norm\"\n plt.savefig(name)\n plt.close()\n return ax", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t cmap=plt.cm.Blues):\n\tif normalize:\n\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\tprint(\"Normalized confusion matrix\")\n\telse:\n\t\tprint('Confusion matrix, without normalization')\n\n\tprint(cm)\n\n\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\tplt.title(title)\n\tplt.colorbar()\n\ttick_marks = np.arange(len(classes))\n\tplt.xticks(tick_marks, classes, rotation=45)\n\tplt.yticks(tick_marks, classes)\n\n\tfmt = '.2f' if normalize else 'd'\n\tthresh = cm.max() / 2.\n\tfor i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n\t\tplt.text(j, i, format(cm[i, j], fmt),\n\t\t\t\t horizontalalignment=\"center\",\n\t\t\t\t color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\tplt.tight_layout()\n\tplt.ylabel('True label')\n\tplt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('../results/conf_matr.png')\n\n return cm", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t\t cmap=plt.cm.Blues):\n\t\tif normalize:\n\t\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\t\tprint(\"Normalized confusion matrix\")\n\t\telse:\n\t\t\tprint('Confusion matrix, without normalization')\n\n\t\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\t\tplt.title(title)\n\t\tplt.colorbar()\n\t\ttick_marks = np.arange(len(classes))\n\t\tplt.xticks(tick_marks, classes, rotation=45)\n\t\tplt.yticks(tick_marks, classes)\n\n\t\tplt.tight_layout()\n\t\tplt.ylabel('True label')\n\t\tplt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot(self):\n plt.imshow(self.cm, interpolation='nearest', cmap=self.cmap)\n plt.title(self.title)\n plt.colorbar()\n tick_marks = np.arange(len(self.classes))\n plt.xticks(tick_marks, self.classes, rotation=45)\n plt.yticks(tick_marks, self.classes)\n \n if self.normalize:\n self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(self.cm)\n \n thresh = self.cm.max() / 2.\n for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):\n plt.text(j, i, self.cm[i, j], horizontalalignment=\"center\", color=\"white\" if self.cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted label')" ]
[ "0.8194862", "0.80949175", "0.8029915", "0.8019153", "0.79941195", "0.7991258", "0.7980955", "0.7976606", "0.79610753", "0.79590565", "0.79378676", "0.7934962", "0.7934504", "0.79313844", "0.7926313", "0.7924577", "0.79241234", "0.7923211", "0.7923023", "0.7921931", "0.7917871", "0.7916092", "0.79083747", "0.7907475", "0.79068965", "0.7904398", "0.7900711", "0.7900422", "0.7896704", "0.7894559", "0.7893862", "0.7891639", "0.78906786", "0.78895235", "0.7886698", "0.7884568", "0.78841054", "0.78773123", "0.78745896", "0.7869866", "0.7860299", "0.78572506", "0.7856715", "0.7853253", "0.7852508", "0.78493565", "0.78482205", "0.7847642", "0.7845746", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.7842821", "0.783704", "0.7836942", "0.7836734", "0.78358006", "0.78322923", "0.7831496", "0.78314656", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.7822435", "0.7822236", "0.7820784", "0.7820784", "0.7820304", "0.7817516", "0.78159386", "0.78157204", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7813965", "0.7813563" ]
0.0
-1
Get an AWS credential.
def get_aws_secret(role): global AWS_ID AWS_ID += 1 request.data return jsonify({ "request_id": f"a-request-id-{AWS_ID}", "lease_id": f"aws/creds/{role}/a-lease-id-{AWS_ID}", "renewable": True, "lease_duration": 3600, "data": { "access_key": "ASDF1234", "secret_key": "xljadslklk3mlkmlkmxklmx09j3990j", "security_token": None }, "wrap_info": None, "warnings": None, "auth": None })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aws_credentials(self) -> dict:\n response = self._session().get(self._cloud_access_url())\n if not response.ok:\n response.raise_for_status()\n cloud_access = response.json()\n creds = {\n 'aws_access_key_id': cloud_access['AccessKeyId'],\n 'aws_secret_access_key': cloud_access['SecretAccessKey'],\n 'aws_session_token': cloud_access['SessionToken'],\n }\n return creds", "def retrieve_credentials(credential_namespace, aws_region=None,\n aws_access_key_id=None, aws_secret_access_key=None):\n ssmname_to_key = _make_ssmname_to_key_map(credential_namespace)\n client = boto3.client('ssm', region_name=aws_region,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n response = client.get_parameters(Names=list(ssmname_to_key.keys()),\n WithDecryption=True)\n responsedict = _credentials_response_to_dict(ssmname_to_key, response)\n if not responsedict:\n raise LookupError('No credentials found for namespace:' + credential_namespace)\n return responsedict", "def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials", "def get_credentials():\n return ServiceAccountCredentials.from_json_keyfile_dict(SERVICE_ACCOUNT, scopes = SCOPES)", "def get_appengine_credentials():\n return get_credentials()", "def get_credentials(self, oid=None):\n path = '/credentials'\n key = 'credentials'\n if oid is not None:\n path = '%s/%s' % (path, oid)\n key = 'credential'\n res = self.client.call(path, 'GET', data='', token=self.token)\n self.logger.debug('Get openstack credentials: %s' % truncate(res))\n try:\n return res[0][key]\n except:\n raise OpenstackError('No credentials found')", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['REGION'] = 'region'", "def get_credentials(self):\n home_dir = os.path.expanduser(\"~\")\n credential_dir = os.path.join(home_dir, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, \"autoto.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, self.auth_flags)\n print(\"Storing credentials to \" + credential_path)\n return credentials", "def get_credentials(self):\n return self.credentials", "def credential(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"credential\")", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'\n os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'", "def aws_credentials():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def get_credentials():\n # Get the credential\n if os.path.exists(os.getenv(\"GCP_AUTOMATION_CONFIG\")):\n credential_location = os.getenv(\"GCP_AUTOMATION_CONFIG\")\n with open(credential_location) as f:\n credential_location = json.load(f)\n credential = credential_location['Config'][0]['Authentication']\n log.info(f\"Retrieved credentail location as {credential}\")\n else:\n raise ValueError(\"Error in get_credentials function when calling 'GCP_AUTOMATION_CONFIG'\")\n\n # Construct the credentials request\n try:\n # Turn provided string into a filepath\n credentials = service_account.Credentials.from_service_account_file(\n filename=credential,\n scopes=[\"https://www.googleapis.com/auth/cloud-platform\"],\n )\n log.info(\"Credentials object constructed from service account file\")\n return credentials\n except Exception as e:\n return e", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'\n os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'", "def get_credentials():\n credentials, _project_id = google.auth.default(scopes=SCOPES)\n\n # Credentials from the GCloud SDK, for example, do not implement Signing.\n assert isinstance(credentials, google.auth.credentials.Signing), \\\n \"Unsupported credential kind; credentials must implement Signing\"\n\n return credentials", "def aws_credentials():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def aws_credentials():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def _get_credentials(rse, endpoint):\n\n key = '%s_%s' % (rse, endpoint)\n result = REGION.get(key)\n if type(result) is NoValue:\n try:\n logging.debug(\"Loading account credentials\")\n result = config.get_rse_credentials(None)\n if result and rse in result:\n result = result[rse]\n result['is_secure'] = result['is_secure'][endpoint]\n REGION.set(key, result)\n else:\n raise Exception(\"Failed to load account credentials\")\n logging.debug(\"Loaded account credentials\")\n except KeyError as e:\n raise exception.CannotAuthenticate('RSE %s endpoint %s not in rse account cfg: %s' % (rse, endpoint, e))\n except:\n raise exception.RucioException(\"Failed to load credentials for RSE(%s) endpoint(%s), error: %s\" % (rse, endpoint, traceback.format_exc()))\n return result", "def credential(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"credential\")", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n credential_dir = os.path.dirname(os.path.realpath(CLIENT_SECRET_FILE))\n credential_path = os.path.join(\n credential_dir, 'sheets.googleapis.com-endosys-events.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _GetCredentials():\n return service_account.Credentials.from_service_account_file(\n KEY_FILE, scopes=_SCOPES)", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials(env=\"development\") -> dict:\n load_dotenv()\n credentials = {}\n\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"DEV_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"DEV_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"DEV_AWS_REGION\")\n\n if env == \"production\":\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"PROD_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"PROD_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"PROD_AWS_REGION\")\n\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n store = Storage(CLIENT_CREDENTIALS_FILE)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(resource_path(CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def credential(self):\n return self._credential", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def aws_credentials() -> None:\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def read_aws_credentials(filename='.aws_credentials.json'):\n\n try:\n with open(filename) as json_data:\n credentials = json.load(json_data)\n\n for variable in ('access_key_id', 'secret_access_key', 'region'):\n if variable not in credentials.keys():\n msg = '\"{}\" cannot be found in {}'.format(variable, filename)\n raise KeyError(msg)\n \n except FileNotFoundError:\n try:\n credentials = {\n 'access_key_id': os.environ['AWS_ACCESS_KEY_ID'],\n 'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY'],\n 'region': os.environ['AWS_REGION']\n }\n except KeyError:\n msg = 'no AWS credentials found in file or environment variables'\n raise RuntimeError(msg)\n\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(\n credential_dir, 'sheets.googleapis.com-python-quickstart.json'\n )\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reseller-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def GetCredentials(self):\n return self._session.get(_CREDENTIAL_KEY, credentials.MapdCredentials())", "def get_client(access_key, secret_key, region='eu-west-1', service='ec2'):\n return boto3.client(\n service,\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region\n )", "def credential(self):\n return self._tower.get_credential_by_id(self._data.get('credential'))", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'thejam_calendar.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def credentials(self) -> pulumi.Output[Optional['outputs.CredentialsResponse']]:\n return pulumi.get(self, \"credentials\")", "def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials() -> client.Credentials:\n\n credential_path = os.path.join(HOME_DIR, \"google-credentials.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(os.path.join(HOME_DIR, CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n # This attempts to open an authorization page in the default web browser, and asks the user\n # to grant the bot access to their data. If the user grants permission, the run_flow()\n # function returns new credentials.\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print(\"Storing credentials to \" + credential_path)", "def get_credentials(self):\n if getattr(self, 'credentials', None):\n return self.credentials\n\n scopes = settings.SCOPES\n client_secret_file = settings.CLIENT_SECRET_FILE\n application_name = 'Google Sheets API Python Quickstart'\n\n home_dir = os.path.expanduser(settings.CREDENTIALS_DIRECTORY)\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(client_secret_file, scopes)\n flow.user_agent = application_name\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n # print('Storing credentials to ' + credential_path)\n return credentials", "def find_credential(account):\n return Credentials.find_by_username(account)", "def get_credential(credential_id):\n session = db.get_session()\n try:\n return (session.query(network_models_v2.Credential).\n filter_by(credential_id=credential_id).one())\n except exc.NoResultFound:\n raise c_exc.CredentialNotFound(credential_id=credential_id)", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'google-photos-stats.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, flags)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credential(self, key):\n return self.creds.get(key, '')", "def credentials(self) -> Optional[pulumi.Input['CredentialsArgs']]:\n return pulumi.get(self, \"credentials\")", "def get_credentials():\n credential_dir = os.getcwd()\n credential_path = os.path.join(credential_dir,\n 'smarking_error_check.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.logSheets.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n return credentials", "def _request_sts_credentials(billing_account_id, options):\n try:\n\n # Setup Session\n session = boto3.session.Session()\n region_name = session.region_name\n partition = _get_partition(region_name)\n sts_client = session.client(\"sts\")\n\n role_name = options[\"--role-name\"]\n role_arn = f\"arn:{partition}:iam::{billing_account_id}:role/{role_name}\"\n response = sts_client.assume_role(\n RoleArn=role_arn,\n RoleSessionName=options[\"--session-name\"],\n DurationSeconds=int(options[\"--session-ttl\"]),\n )\n return response[\"Credentials\"]\n except ClientError as client_error:\n LOGGER.error(\"Failed to assume into role\")\n LOGGER.exception(client_error)\n raise", "def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_storage_account_credential_output(credential_name: Optional[pulumi.Input[str]] = None,\n manager_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetStorageAccountCredentialResult]:\n ...", "def get_secret():\n\n secret_name = \"Jido-Active-Directory-Service-Account\"\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name= os.environ.get(\"AWS_DEFAULT_REGION\")\n )\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId= secret_name\n )\n except ClientError as e:\n print(\"Error getting secret key!: \" + str(e))\n return None\n else:\n # Decrypts secret using the associated KMS CMK.\n if 'SecretString' in get_secret_value_response:\n return get_secret_value_response['SecretString']\n\n return None", "def get_client(self, service):\n if self.cfg.iam == \"\":\n return boto3.client(service, region_name=self.cfg.region)\n\n if self.cfg.credentials == {}:\n logger.info(\"assume Role: {}\".format(self.cfg.iam))\n sts_client = boto3.client(\"sts\")\n self.cfg.credentials = sts_client.assume_role(\n RoleArn=self.cfg.iam, RoleSessionName=\"ssm-run\")[\"Credentials\"]\n\n return boto3.client(\n service,\n region_name=self.cfg.region,\n aws_access_key_id=self.cfg.credentials[\"AccessKeyId\"],\n aws_secret_access_key=self.cfg.credentials[\"SecretAccessKey\"],\n aws_session_token=self.cfg.credentials[\"SessionToken\"])", "def find_aws_credentials(profile):\n if not profile:\n access_key = None\n secret_key = None\n region = None\n token = \"\"\n credentials = botocore.session.get_session().get_credentials()\n if credentials:\n access_key = credentials.access_key\n secret_key = credentials.secret_key\n region = credentials.region\n token = getattr(credentials, \"token\") or \"\"\n if not access_key or not secret_key:\n raise RuntimeError(\"No Default AWS profile set\")\n\n ret = {\n \"aws_access_key_id\": access_key,\n \"aws_secret_access_key\": secret_key,\n \"aws_session_token\": token,\n }\n # only add the region if it is defined\n if region:\n ret[\"region\"] = region\n\n return ret\n else:\n\n folder = os.path.join(os.path.expanduser(\"~\"), \".aws\")\n filename = os.path.join(folder, \"credentials\")\n cfg = configparser.ConfigParser()\n with open(filename) as fp:\n cfg.read_file(fp)\n ret = {}\n if profile not in cfg:\n raise RuntimeError(\n \"No AWS profile '%s' found in %s\" % (profile, filename)\n )\n for key in cfg[profile]:\n ret[key] = cfg[profile][key]\n return ret", "def credential_get(uniqueID: str):\n\n cert = safeisland.certificate(uniqueID)\n return {\"payload\": cert}", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n credential_dir = os.path.realpath('.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path) # stores the users credentials --> TODO: put in database\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n\n credentials = tools.run_flow(flow, store, flags)\n\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\n try:\n with open(self.credentials_file, 'r') as fh_credentials:\n credentials_dict = json.loads(fh_credentials.read())\n return credentials_dict\n except IOError:\n self.reset_credentials()\n with open(self.credentials_file, 'r') as fh_credentials:\n return json.loads(fh_credentials.read())", "def GetAccountNameAndPassword(credential,\n credentials_path=DEFAULT_CREDENTIAL_PATH):\n if (credentials_path == DEFAULT_CREDENTIAL_PATH and not\n os.path.exists(DEFAULT_CREDENTIAL_PATH)):\n cloud_storage.GetIfChanged(\n DEFAULT_CREDENTIAL_PATH, DEFAULT_CREDENTIAL_BUCKET)\n\n with open(credentials_path, 'r') as f:\n credentials = json.load(f)\n c = credentials.get(credential)\n return c['username'], c['password']", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'grader.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args(args=[]))\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self, **kwargs):\n creds_file = os.path.join(kwargs['user_dir'], 'credentials.json')\n\n # Getting credentials from Storage\n store = file.Storage(creds_file)\n creds = store.get()\n\n # Validating or refreshing credentials, if necessary\n if creds is None or creds.invalid:\n flow = client.flow_from_clientsecrets(self.client_secret_file,\n self.scopes)\n creds = tools.run_flow(flow, store)\n elif creds.access_token_expired:\n creds.refresh(httplib2.Http())\n else:\n pass\n\n return creds", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'admin-directory_v1-NestedGroupSync.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print 'Storing credentials to' + credential_path\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'fb-drive.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def find_credential(account):\n return Credentials.find_credential(account)", "def get_credentials(client_secrets='client_secrets.json',\n scope_='https://www.googleapis.com/auth/drive',\n redirect_uri_='http://localhost:8080'):\n flow = client.flow_from_clientsecrets(client_secrets,\n scope=scope_,\n redirect_uri=redirect_uri_)\n credentials = tools.run_flow(flow, Store(), None)\n return credentials", "def getAwsKeypair(directory=None):\n if directory is None:\n directory = './'\n with open(directory + 'access.key', 'r+') as fp:\n access_key = fp.read()\n with open(directory + 'secret.key', 'r+') as fp:\n secret_key = fp.read()\n return (access_key, secret_key)", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def _get_credential(self):\n creds = None\n\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', self.config['SCOPES'])\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n self.service = build('drive', 'v3', credentials=creds)", "def get_credentials(servise: str) -> google.oauth2.credentials.Credentials:\n\n # SQL query to get the credentials for the current user from servise credentials table\n query = f\"\"\"\n SELECT token, token_uri, client_id, refresh_token, client_secret, scopes\n FROM {servise}_credentials\n WHERE user_id=?;\n \"\"\"\n\n # Get the credentials\n with connect(DATABASE) as db:\n credentials = db.execute(query, (session[\"user_id\"],)).fetchone()\n\n # Return None if it doesn't exist it the database\n if not credentials: return None\n\n # Transfer the credentials to a dictionary\n credentials_dict = {\n \"token\": credentials[0],\n \"token_uri\": credentials[1],\n \"client_id\": credentials[2],\n \"refresh_token\": credentials[3],\n \"client_secret\": credentials[4],\n \"scopes\": None if credentials[5] is None else credentials[5].split(\" \")\n }\n\n # Return a google Credentials object\n return google.oauth2.credentials.Credentials(**credentials_dict)", "def get_credentials():\n #home_dir = os.path.expanduser('~')\n home_dir = (HOME_DIR)\n credential_dir = os.path.join(home_dir, '.credentials')\n print(\"Credentials folder: \",credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'gmail-python-spam-filter.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials" ]
[ "0.696194", "0.6921474", "0.6762357", "0.6643356", "0.65502733", "0.650131", "0.6463692", "0.6458278", "0.64089775", "0.6393392", "0.6385138", "0.6384381", "0.63843215", "0.63843215", "0.63843215", "0.63800085", "0.636566", "0.63641655", "0.635698", "0.635698", "0.63561594", "0.6344541", "0.6325649", "0.6311994", "0.6308211", "0.6293804", "0.62622446", "0.6257438", "0.6257438", "0.625509", "0.62350065", "0.6229288", "0.62257236", "0.6218102", "0.6218102", "0.62164015", "0.62151337", "0.62149704", "0.6212991", "0.62004197", "0.62004197", "0.62004197", "0.62004197", "0.6198613", "0.6194289", "0.61884093", "0.61817646", "0.6178639", "0.617491", "0.6162791", "0.61565757", "0.6151678", "0.6150703", "0.614352", "0.61352175", "0.61207867", "0.60999763", "0.60991704", "0.6075154", "0.60659885", "0.6063711", "0.60429937", "0.60358477", "0.60342765", "0.60272175", "0.6025302", "0.6023661", "0.6023661", "0.6023661", "0.6023661", "0.6023661", "0.6013428", "0.6013428", "0.6012371", "0.5999484", "0.5993495", "0.5968403", "0.59521633", "0.59511906", "0.5950209", "0.5945191", "0.5938202", "0.59352106", "0.592888", "0.59274495", "0.59254664", "0.592495", "0.59124964", "0.59105647", "0.5908752", "0.58918303", "0.58918303", "0.58918303", "0.58918303", "0.5891011", "0.5889224", "0.5881913", "0.5870577", "0.58704644", "0.5862026" ]
0.6363484
18
Look up an auth token.
def look_up_a_token(): try: data = request.get_json(force=True) except Exception: data = None if data: tok = data['token'] else: tok = request.headers.get('TOK_ID') request.data try: creation_time = int(round(datetime.timestamp(tokens[tok]), 0)) issue_time = tokens[tok].isoformat() except Exception: _now = datetime.now(UTC) creation_time = int(round(datetime.timestamp(_now))) issue_time = _now.isoformat() tokens[tok] = _now expire_time = datetime.fromtimestamp(creation_time + 2764790) return jsonify({ "data": { "accessor": "8609694a-cdbc-db9b-d345-e782dbb562ed", "creation_time": creation_time, "creation_ttl": 2764800, "display_name": "fooname", "entity_id": "7d2e3179-f69b-450c-7179-ac8ee8bd8ca9", "expire_time": expire_time.isoformat(), "explicit_max_ttl": 0, "id": tok, "identity_policies": [ "dev-group-policy" ], "issue_time": issue_time, "meta": { "username": "tesla" }, "num_uses": 0, "orphan": True, "path": "auth/kubernetes/login", "policies": [ "default" ], "renewable": True, "ttl": 2764790 } })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._post(path, data=self.api_token, skip_auth=True)\n return base64.b64encode(res.text)", "def find_token_for_authorization(authorization):\n return None", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def get_auth_token():\n auth_token_value = memcache.get('authtoken')\n if not auth_token_value:\n entity = Token.get_by_key_name(key_names = 'authtoken')\n if entity:\n auth_token_value= entity.value\n memcache.set('authtoken', auth_token_value)\n else:\n auth_token_value = None\n return auth_token_value", "def _get_token(self):\n return user.get_token()", "def getUser(self, authenticationToken):\r\n pass", "def _retrieve_token(request):\n auth_string = request.headers.get('Authorization')\n try:\n match = re.match(\"Bearer (.+)\", auth_string)\n except TypeError:\n match = None\n if match:\n return match.groups()[0]", "def auth_token(self):", "def auth_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_token\")", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def token(cls, token):\n user_db = User.get_by('token', token)\n if not user_db:\n raise ValueError('Sorry, your token is either invalid or expired.')\n return token", "def get_by_auth_token(cls, user_id, token, subject='auth'):\n token_key = cls.token_model.get_key(user_id, subject, token)\n user_key = ndb.Key(cls, user_id)\n # Use get_multi() to save a RPC call.\n valid_token, user = ndb.get_multi([token_key, user_key])\n if valid_token and user:\n timestamp = int(time.mktime(valid_token.created.timetuple()))\n return user, timestamp\n return None, None", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def get_token(id=None, name=None):\n\tif id is None and name is None:\n\t\tname = config['username']\n\treturn get_user(id=id, name=name, get_missing=False).token", "def get_user(self, token: str) -> Optional[User]:", "def get_user(self, token: str) -> Optional[User]:", "def get(uid: int, token_id: int):\n\n token = Token.get(uid, token_id).as_dto().to_primitive()\n\n if token:\n return token.to_primitive()\n else:\n raise NotFound(\"Token Not Found\")", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def get_by_auth_token(cls, user_id, token, subject='auth'):\n token_key = cls.token_model.get_key(user_id, subject, token)\n user_key = ndb.Key(cls, user_id)\n # Use get_multi() to save a RPC call.\n valid_token, user = ndb.get_multi([token_key, user_key])\n if valid_token and user:\n timestamp = int(time.mktime(valid_token.created.timetuple()))\n return user, timestamp\n\n return None, None", "def LookupToken(self, dmtoken):\n self.ReadClientStateFile()\n return self._registered_tokens.get(dmtoken, None)", "def getToken(self):\n query = \"SELECT token FROM token WHERE id = 1\"\n res = self.db.execute(query).fetchone()\n if res:\n return res[0]\n return False", "def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )", "async def token(request: Request):\n return get_token()", "def get_auth_token(self):\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)", "def find_auth_token(document_html):\n search_result = re.search(AUTH_TOKEN_REGEX, document_html)\n if search_result:\n return search_result.group('auth_token')", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def verify_token(token):\n return AuthToken.query.filter_by(auth_token=token).first()", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def lookup_token(self, token=None, accessor=False, wrap_ttl=None):\n if token:\n if accessor:\n path = '/v1/auth/token/lookup-accessor/{0}'.format(token)\n return self._post(path, wrap_ttl=wrap_ttl).json()\n else:\n return self._get('/v1/auth/token/lookup/{0}'.format(token)).json()\n else:\n return self._get('/v1/auth/token/lookup-self', wrap_ttl=wrap_ttl).json()", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def get_auth_token(cls, endpoint, headers):\n\n token = None\n scope = None\n resp = requests.post(endpoint, headers=headers)\n if resp.status_code == 200:\n auth_resp_json = resp.json()\n token = auth_resp_json[\"access_token\"]\n try:\n scope = auth_resp_json[\"scope\"]\n except KeyError:\n scope = None\n if resp.status_code == 401:\n token = \"BAD\"\n return token, scope", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def _getAuth(self):\r\n parameters = {\r\n 'service' : 'reader',\r\n 'Email' : self.username,\r\n 'Passwd' : self.password,\r\n 'accountType' : 'GOOGLE'}\r\n req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)\r\n if req.status_code != 200:\r\n raise IOError(\"Error getting the Auth token, have you entered a\"\r\n \"correct username and password?\")\r\n data = req.text\r\n #Strip newline and non token text.\r\n token_dict = dict(x.split('=') for x in data.split('\\n') if x)\r\n return token_dict[\"Auth\"]", "def check_token(token):\n return conn.hget('login:', token)", "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "def get_oauth_token():\n return session.get('remote_oauth')", "def validate_token(self, token):\n\n try:\n if not token:\n raise AuthException(\"Needed a token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n # try to get from cache first\n now = time()\n token_info = self.token_cache.get(token)\n if token_info and token_info[\"expires\"] < now:\n # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del\n self.token_cache.pop(token, None)\n token_info = None\n\n # get from database if not in cache\n if not token_info:\n token_info = self.db.get_one(\"tokens\", {\"_id\": token})\n if token_info[\"expires\"] < now:\n raise AuthException(\"Expired Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n\n return token_info\n\n except DbException as e:\n if e.http_code == HTTPStatus.NOT_FOUND:\n raise AuthException(\"Invalid Token or Authorization HTTP header\", http_code=HTTPStatus.UNAUTHORIZED)\n else:\n raise\n except AuthException:\n if self.config[\"global\"].get(\"test.user_not_authorized\"):\n return {\"id\": \"fake-token-id-for-test\",\n \"project_id\": self.config[\"global\"].get(\"test.project_not_authorized\", \"admin\"),\n \"username\": self.config[\"global\"][\"test.user_not_authorized\"], \"admin\": True}\n else:\n raise\n except Exception:\n self.logger.exception(\"Error during token validation using internal backend\")\n raise AuthException(\"Error during token validation using internal backend\",\n http_code=HTTPStatus.UNAUTHORIZED)", "def get_token(self):\n if not self.is_valid():\n logger.warn(\"TokenWall form data is not valid.\")\n return None\n \n tt = self.cleaned_data['token']\n logger.debug(\"Looking for token '%s'\"%tt)\n return Token.objects.get(value=tt)", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def verify_auth_token(token):\n ser = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = ser.loads(token)\n except (BadSignature, SignatureExpired):\n return None\n return User.query.get(data['id'])", "def get_token(self):\n self.session.headers.pop(\"Authorization\", None) # delete old token if was\n\n data = json.dumps({\"password\": self.password, \"username\": self.username})\n answer = self.server_request(self._authTokenPath, data=data)\n\n try:\n self.token = json.loads(answer)[\"token\"]\n self.session.headers.update({\"Authorization\": \"Token \" + self.token})\n except KeyError as err:\n print_unexpected_json_error_key(err, answer, self._authTokenPath)\n exit(1)", "def verify_auth_token(cls, token):\n s = Serializer(current_app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except:\n return None\n user = User.query.get(data['id'])\n if user and user.session_token == token:\n return user\n return None", "def get(self):\n if current_user and not current_user.is_anonymous:\n user = current_user\n tok = Token(user, 3600)\n return tok\n return jsonify({404: 'User not found'})", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def get_token(self):\n token = self._session.token\n return token", "def get_token(self, *args, **kwargs):\n if \"SHIB-ECP\" == self._auth_mode:\n return self._shib_get_token(*args, **kwargs)\n elif \"MAST-AUTH\" == self._auth_mode:\n return self._get_token(*args, **kwargs)\n else:\n raise Exception(\"Unknown MAST Auth mode %s\" % self._auth_mode)", "def get_token(headers):\n bearer = headers.get('Authorization')\n if bearer:\n try:\n token_type, token = bearer.rsplit(' ', 1)\n except ValueError:\n raise TokenError('Wrong bearer string: %s', bearer)\n\n if token_type != 'Bearer':\n raise TokenError('Wrong token type: %s, must be %s',\n token_type, 'Bearer')\n return token\n raise TokenError('No token is given in the Authorization header')", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token(self):\n response = self.client.post(\n url_for('auth.login'),\n data=json.dumps({'username': '[email protected]', 'password': 'denno'}),\n headers={'content_type': 'application/json'})\n return json.loads(response.data)['token']", "def UserToken(self) -> object:", "def get_token_auth_header(params):\n auth = get_token(params)\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must start with Bearer\"}, 401)\n\n if len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Token not found\"}, 401)\n\n if len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\", \"description\": \"Authorization header must be Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def get_token(username, password):\n\t\ttoken = cf.get_token(username, password)\n\t\treturn token", "def getUser():\n\n if 'token' in session:\n return \"Authenticated\"\n else:\n return \"Unauthenticated\"", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token", "def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token", "def GetAuthToken(email, password):\n return AuthToken(email, password).GetAuthToken()", "def get_token_auth_header():\n # Get authorization form request header\n auth = request.headers.get('Authorization', None)\n # Check if authorization header exists\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is MISSING!'\n }, abort(401))\n # If bearer token, then first part of string = 'bearer'\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\"'\n }, abort(401))\n # Authorization header string length must be 2\n elif len(parts) != 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be a BEARER token'\n }, abort(401))\n\n token = parts[1]\n return token", "def get_token():\n if os.path.exists(AUTH_TOKEN_PATH):\n with open(str(AUTH_TOKEN_PATH), 'r') as TokenObj:\n try:\n data = TokenObj.read()\n except (OSError, IOError) as e:\n echo(e)\n data = json.loads(data)\n token = data[\"token\"]\n return token\n else:\n echo(\"\\nYour token file doesn't exists.\")\n echo(\"\\nIt should be present at ~/.evalai/token.json\\n\")\n return None", "def get_token(self):\r\n token = {'id': self.catalog['access']['token']['id'],\r\n 'expires': self.catalog['access']['token']['expires'], }\r\n try:\r\n token['user_id'] = self.catalog['access']['user']['id']\r\n token['tenant_id'] = (\r\n self.catalog['access']['token']['tenant']['id'])\r\n except Exception:\r\n # just leave the tenant and user out if it doesn't exist\r\n pass\r\n return token", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def get(self, token):\n args = (token, )\n row = self.db_manager.execute_sql_and_fetchone(SQL_TOKEN_GET, args)\n if row:\n token_object = convert_db_row_to_dict(row, TOKEN_MODEL_FIELDS)\n else:\n token_object = {}\n return token_object", "def get_token(alias, reg_code, privKey):\n data = json.dumps({\n \"namespace\": alias,\n \"reg_code\": reg_code\n })\n url = endpoint('auth')\n r = requests.post(url,data=data) \n token_str = (r.__dict__['_content']).decode()\n r_token_obj = json.loads(token_str)\n token_cipher = ast.literal_eval( r_token_obj[\"token\"] )\n token_obj = dict()\n token_obj = {\n \"authToken\": decrypt_message( privKey, token_cipher),\n \"expiration_minutes\": r_token_obj[\"expiration_minutes\"],\n \"expiration\": str(datetime.datetime.now() + datetime.timedelta(minutes=r_token_obj[\"expiration_minutes\"]))\n }\n expiration = token_obj[\"expiration\"]\n expiration = parser.parse(expiration)\n if datetime.datetime.now() > expiration:\n print(\"Token has expired\")\n else:\n c = expiration - datetime.datetime.now()\n valid_minutes = str(divmod(c.total_seconds(), 60)[0])\n return token_obj[\"authToken\"]", "def get_current(self):\n auth_token = session.get(\"auth_token\")\n print(auth_token)\n if not auth_token:\n return None\n user = db.user.find_one({\"auth_token\":auth_token})\n\n return user", "def verify_auth_token(token):\n serializer = Serializer(SECRET_KEY)\n try:\n data = serializer.loads(token)\n except (SignatureExpired, BadSignature):\n return None\n else:\n user = User.get(User.id == data['id'])\n return user", "def retrieve_token():\n try:\n deserialized_message = json.loads(peek_app_token())\n\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n if expires_at and check_expired_time(expires_at):\n return deserialized_message.get('token')\n else: # Token expired, refresh it\n refresh_token()\n\n deserialized_message = peek_app_token()\n expires_at = deserialized_message.get('expires_at')\n # Token is good, return it\n try:\n assert(expires_at and check_expired_time(expires_at))\n return deserialized_message.get('token')\n except:\n raise # When all else fails\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)\n\n return None", "def EstablishAuthToken(self, opener):\n url = 'https://www.pivotaltracker.com/services/v3/tokens/active'\n data = parse.urlencode((('username', self.username),\n ('password', self.password)))\n try:\n req = opener.open(url, data.encode())\n except error.HTTPError as e:\n if e.code == 404:\n raise NoTokensAvailableException(\n 'Did you create any? Check https://www.pivotaltracker.com/profile')\n else:\n raise\n\n res = req.read()\n\n dom = minidom.parseString(res)\n token = dom.getElementsByTagName('guid')[0].firstChild.data\n\n return token", "async def get_token(self, *args, **kwargs) -> Optional[OAuth2Token]:\n token_record = ...\n\n if token_record is not None:\n return OAuth2Token(\n access_token=token_record.access_token,\n refresh_token=token_record.refresh_token,\n scope=token_record.scope,\n issued_at=token_record.issued_at,\n expires_in=token_record.expires_in,\n client_id=token_record.client_id,\n token_type=token_record.token_type,\n revoked=token_record.revoked,\n )", "def get_user_by_token(token):\n collection = get_collection(\"user\")\n user_info = collection.find_one({\"token\": token})\n return user_info", "def authenticationToken(self):\n return self.authToken", "def token(self):\n return self[\"token\"]", "def verify_auth_token(token):\n\n s = Serializer(current_app.config['SECRET_KEY'])\n\n try:\n data = s.loads(token)\n except SignatureExpired:\n print \"EXP\", token\n return None\n except BadSignature:\n print \"BAD\", token\n return None\n\n user = User.query.get(data['id'])\n return user", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n elif auth.split()[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n elif len(auth.split()) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be include type and token.'\n }, 401)\n elif len(auth.split()) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be Bearer token.'\n }, 401)\n else:\n token = auth.split()[1]\n return token", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def get_auth_token(self, username, password):\n url = '/'.join([self.base_url, self.TOKEN_ENDPOINT])\n r = requests.get(url, auth=(username, password))\n if r.status_code == 200:\n return r.content\n return r", "def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}", "def verify_auth_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except BadSignature:\n return None # invalid token\n user = User.query.get(data['email'])\n return user", "def load_token(self):\n token = None\n\n if config.outlook_token:\n token = self.token_constructor(config.outlook_token)\n\n return token", "def getToken(self):\n \n data = '''\n {\n \"auth\": \n {\n \"username\" : \"%s\",\n \"password\" : \"%s\"\n }\n }\n ''' % (self.username, self.password)\n \n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.appnexus.com'\n }\n r = requests.post(self.auth_url, data=data, \n headers=headers)\n ac_data = r.json()\n \n if ac_data['response']['status'] != 'OK':\n self.stream_logger.error('Error while retrieving access token')\n self.stream_logger.error('Status code {0}'\\\n .format(ac_data['response']['status']))\n return False\n\n return ac_data['response']['token']", "def verify_token(self, token: str) -> str:\n return decode(self.rd.hget(\"auth:by_token\", token))", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "async def get_user_token(\n token: Optional[str] = None,\n x_token: Optional[str] = Header(None),\n authorization: Optional[str] = Header(None),\n sessiontoken: Optional[str] = Cookie(None),\n) -> Optional[str]:\n if token:\n return token\n if x_token:\n return x_token\n if authorization and authorization.startswith(\"Bearer \"):\n return authorization[7:]\n if sessiontoken:\n return sessiontoken\n return None", "def __get_token(self):\n r = requests.post(self.credentials.conf('endpoint') + '/tokens', json={\n 'auth': {\n 'passwordCredentials': {\n 'username': self.credentials.conf('username'),\n 'password': self.credentials.conf('password'),\n },\n 'tenantId': self.credentials.conf('tenant_id'),\n },\n })\n logger.debug('request:')\n logger.debug('%s', r.request.body)\n #print(r.status_code)\n if r.status_code != 200:\n logger.debug('%s', r.content)\n logger.debug('%s', r.json())\n raise RuntimeError('It failed to get token.')\n logger.debug('%s', r.content)\n j = r.json()\n logger.debug('%s', j)\n token = j['access']['token']['id']\n \n # Get DNS URL.\n \n dns_vers_url = None\n for svc in j['access']['serviceCatalog']:\n if svc['type'] == 'dns':\n for ep in svc['endpoints']:\n if ep['region'] == self.credentials.conf('region'):\n dns_vers_url = ep['publicURL']\n if not dns_vers_url:\n raise RuntimeError('It failed to get DNSv1 URL.')\n \n # Get DNSv1 URL.\n r = requests.get(dns_vers_url, headers={'Accept': 'application/json'})\n #print(r.status_code)\n if r.status_code != 300:\n logger.debug('%s', r.content)\n logger.debug('%s', r.json())\n raise RuntimeError('It failed to get DNS URLs.')\n logger.debug('%s', r.content)\n j = r.json()\n logger.debug('%s', j)\n \n url = None\n for val in j['versions']['values']:\n if val['id'] == 'v1':\n url = val['links'][0]['href']\n if not url:\n raise RuntimeError('No DNS v1 URL.')\n return (token, url)", "def get_token(self):\n auth_data = {\"auth\": {\"tenantName\": 'service',\n \"passwordCredentials\":{ \"username\": 'vsm',\n \"password\": self._password}}}\n\n auth_request = urllib2.Request(self._auth_url)\n auth_request.add_header(\"content-type\", \"application/json\")\n auth_request.add_header('Accept', 'application/json')\n auth_request.add_header('User-Agent', 'python-mikeyp')\n auth_request.add_data(json.dumps(auth_data))\n auth_response = urllib2.urlopen(auth_request)\n response_data = json.loads(auth_response.read())\n\n self._token = response_data['access']['token']['id']\n\n service_list = response_data['access']['serviceCatalog']\n for s in service_list:\n if s['type'] == 'vsm' and s['name'] == 'vsm':\n self._vsm_url = s['endpoints'][0]['publicURL']\n break\n\n url_id = self._vsm_url.split('/')[-1]\n return self._token + \"-\" + url_id", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "def CheckEnrollmentToken(self):\n match = re.match('GoogleEnrollmentToken token=(\\\\S+)',\n self.headers.getheader('Authorization', ''))\n if match:\n return match.group(1)\n\n return None", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def get(self):\n # Login of authorized user stores in Flask g object\n user = User.query.filter_by(username=g.user.username).first()\n # Generate token\n token = user.generate_auth_token()\n # Send token in ASCII format\n return {'token': token.decode('ascii')}", "def getToken(self):\n tokens=self._CFG.CRED_TYPE.split(\":\")\n CredType=tokens[0].lower()\n if len(tokens) > 1 :\n CredArgs=self._CFG.CRED_TYPE[len(CredType)+1:]\n else :\n CredArgs = \"\"\n # acquire token, if required \n if CredType == \"pag\" :\n pass\n elif CredType == \"krb5_keytab\" :\n KRB5CCNAME=self._krb5DAO.getTicketbyKeytab(CredArgs, self._CFG.KRB5_PRINC,self._CFG.KRB5_REALM)\n self._pagDAO.obtainTokenFromTicket(KRB5CCNAME, self._CFG.KRB5_REALM, self._CFG.CELL_NAME)\n self._krb5DAO.destroyTicket(KRB5CCNAME)\n elif CredType == \"krb5_password\" :\n if CredArgs != \"\" :\n passwd=CredArgs\n else :\n passwd = getpass.getpass(\"Password for %s@%s: \" % (self._CFG.KRB5_PRINC,self._CFG.KRB5_REALM))\n KRB5CCNAME=self._krb5DAO.getTicketbyPassword(passwd, self._CFG.KRB5_PRINC,self._CFG.KRB5_REALM)\n self._pagDAO.obtainTokenFromTicket(KRB5CCNAME, self._CFG.KRB5_REALM, self._CFG.CELL_NAME)\n self._krb5DAO.destroyTicket(KRB5CCNAME)\n # get token-info from pag\n AFSID, Cellname = self._pagDAO.getTokeninPAG(cellname=self._CFG.CELL_NAME)\n Cellname=Cellname.lower()\n token=afs.model.Token.Token(AFSID, Cellname)\n return token", "def getToken(request):\n try:\n token = request.META['HTTP_AUTHORIZATION'].split()[1]\n except:\n token = \"\"\n return token", "def get_token(self):\n if time.time() > self.expiration:\n # need to re-authenticate and get a new token and catalog\n self._authenticate()\n \n return self.token, self.catalog" ]
[ "0.746558", "0.74365014", "0.7382705", "0.721436", "0.7206889", "0.7119792", "0.7098907", "0.69588864", "0.6947529", "0.690765", "0.6845077", "0.68132395", "0.68104905", "0.67953134", "0.67916936", "0.6760361", "0.6760361", "0.6753948", "0.67510355", "0.67459446", "0.67286247", "0.6725746", "0.6710572", "0.6671277", "0.6665821", "0.6663356", "0.665941", "0.6630983", "0.66208625", "0.6569407", "0.6569407", "0.65660965", "0.65621567", "0.65621316", "0.6558551", "0.65456206", "0.6536205", "0.653456", "0.6529211", "0.6529115", "0.65207815", "0.6514062", "0.6505765", "0.65020114", "0.64943594", "0.6492381", "0.6491904", "0.64735395", "0.64716387", "0.64707655", "0.6467919", "0.6459156", "0.64572895", "0.64572895", "0.645574", "0.6455716", "0.64548236", "0.6454165", "0.6451354", "0.6450701", "0.64260757", "0.64252275", "0.64121836", "0.640922", "0.6406682", "0.6399454", "0.63938904", "0.63847476", "0.6377413", "0.6376171", "0.637025", "0.63701296", "0.6366606", "0.6364289", "0.6349067", "0.6348628", "0.63479465", "0.6346964", "0.6334711", "0.6332469", "0.63313115", "0.6326389", "0.6322281", "0.6320033", "0.6318183", "0.6310464", "0.63079756", "0.6306746", "0.6279132", "0.6279105", "0.6273131", "0.6272035", "0.6268702", "0.6261051", "0.6261051", "0.62553334", "0.6252285", "0.6249826", "0.6240343", "0.6240189" ]
0.6357753
74
Attach the Market Cap CustomFactor to the Pipeline returns Pipeline (numpy.array) An array containing all data needed for the algorithm
def make_pipeline(): mkt_cap_screen = (morningstar.valuation.market_cap.latest > 1e9) return Pipeline( columns={ 'Free Cash Flow': morningstar.cash_flow_statement.free_cash_flow.latest, }, screen=mkt_cap_screen)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill_pipeline():\n\n # m1_pca = PCA()\n m1_pca = PCA(svd_solver='randomized', whiten=True) # 与官网里子一致的后2个参数,否则分数很差\n # m1_pca.fit(X_train)\n\n m2_svc = SVC(kernel='rbf', class_weight='balanced')\n\n pipe = Pipeline(steps=[('pca', m1_pca),\n ('svc', m2_svc)])\n print('\\n===================原 estimator')\n pprint(pipe.named_steps)\n return pipe", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )", "def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])", "def make_pipeline(context):\n \n # Base universe of top 500 US stocks.\n base_universe_filter = Q500US()\n\n # Stocks of only tech sector.\n tech_sector = Sector(mask=base_universe_filter)\n tech_universe_filter = base_universe_filter & tech_sector.eq(311)\n\n # Top 10 tech stocks with largest market cap.\n mkt_cap_filter = morningstar.valuation.market_cap.latest\n top_mkt_cap_tech_filter = mkt_cap_filter.top(context.NUM_SYMBOLS, mask=tech_universe_filter)\n\n # Bollinger band factor with Stdev factor 2.\n lower_band_factor, middle_factor, upper_band_factor = BollingerBands(window_length=22, k=2, mask=top_mkt_cap_tech_filter)\n\n # Percent difference between (price, lower_band) and (price, upper_band).\n price = USEquityPricing.close.latest\n buy_percent_factor = ((lower_band_factor - price)*100)/price\n sell_percent_factor = ((price - upper_band_factor)*100)/price\n\n # Mean reversion buy and sell filters.\n # Sell when price exceeds upper-band and buy when price is below lower-band.\n buy_filter = buy_percent_factor > 0\n sell_filter = sell_percent_factor > 0\n\n # Build and return the Pipeline.\n pipe_bbands = Pipeline(columns={'buy_percent': buy_percent_factor,\n 'lower_band': lower_band_factor,\n 'buy': buy_filter,\n 'price': price,\n 'sell': sell_filter,\n 'upper_band': upper_band_factor,\n 'sell_percent': sell_percent_factor}, screen=top_mkt_cap_tech_filter)\n \n return pipe_bbands", "def make_pipeline():\n universe = TradableStocksUS('Real Estate') | TradableStocksUS('Utilities') | \\\n TradableStocksUS('Consumer Staples') | TradableStocksUS('Technology') | \\\n TradableStocksUS('Financials') | TradableStocksUS('Energy') | \\\n TradableStocksUS('Materials') | TradableStocksUS('Health Care') | \\\n TradableStocksUS('Industrials') | TradableStocksUS('Consumer Discretionary') | \\\n TradableStocksUS('Communications')\n\n roic = shfd.slice(dimension='MRT', period_offset=0).ROIC.latest\n ebit = shfd.slice(dimension='MRQ', period_offset=0).EBIT.latest\n ev = shfd.slice(dimension='MRQ', period_offset=0).EV.latest\n volatility = AnnualizedVolatility(window_length=100)\n value = ebit / ev\n\n roic_rank = roic.rank(mask=universe)\n value_rank = value.rank(mask=universe)\n volatility_rank = volatility.rank(mask=universe, ascending=False)\n\n spy_ma100_price = SMA(inputs=[USEquityPricing.close], \n window_length=100)[algo.sid(\"FIBBG000BDTBL9\")]\n spy_price = USEquityPricing.close.latest[algo.sid(\"FIBBG000BDTBL9\")]\n\n momentum_score = MomentumScore()\n\n overall_rank = roic_rank + value_rank + volatility_rank\n\n # seven_month_returns = Returns(window_length=148, mask=universe,)\n # one_month_returns = Returns(window_length=30, mask=universe,)\n\n pipeline = Pipeline(\n columns={\n 'stock' : master.SecuritiesMaster.Symbol.latest,\n 'sid': master.SecuritiesMaster.Sid.latest,\n 'sector' : master.SecuritiesMaster.usstock_Sector.latest,\n 'average_dollar_volume': AverageDollarVolume(window_length=200),\n 'price': EquityPricing.close.latest,\n 'volume': EquityPricing.volume.latest,\n 'roic' : roic,\n 'value' : value,\n 'volatility': volatility,\n 'roic_rank' : roic_rank,\n 'value_rank' : value_rank,\n 'momentum': momentum_score,\n 'momentum_decile': momentum_score.deciles(),\n 'volatility_decile' : volatility.deciles(),\n 'overall_rank' : overall_rank,\n 'overall_rank_decile': overall_rank.deciles(),\n 'trend_filter': spy_price > spy_ma100_price,\n # 'returns' : one_month_returns - seven_month_returns\n }, \n screen = universe\n )\n\n return pipeline", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def pipeline(self):\n\n transformers = []\n\n custom = self.CustomFeature()\n #transformers.append(('custom', custom))\n n_features = int(self.n_features/2)\n\n #kbest = SelectKBest(score_func=chi2, k=n_features)\n #transformers.append(('kbest', kbest))\n\n # pca = PCA(n_components=n_features, svd_solver='randomized', whiten=True)\n # transformers.append(('pca', pca))\n\n if self.definer.problem_type == 'classification':\n extraTC = SelectFromModel(ExtraTreesClassifier(criterion='entropy'))\n else:\n extraTC = SelectFromModel(ExtraTreesRegressor())\n\n transformers.append(('extraTC', extraTC))\n\n #scaler = StandardScaler()\n #transformers.append(('scaler', scaler))\n #binarizer = Binarizer()\n return FeatureUnion(transformers)", "def similar_bonds_pipeline():\n pipeline = Pipeline(\n steps=[\n ('scaler', StandardScaler()),\n #('encoder', OneHotEncoder()),\n ('pca', PCA(n_components=3)),\n ('knn', KNN()),\n ]\n )\n return pipeline", "def build_own_pipeline() -> Pipeline:\n nn_pipeline = None\n\n nn_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MLPClassifier()) \n ])\n \n return nn_pipeline", "def create(pdef):\n from sklearn.pipeline import Pipeline\n return [Pipeline(p) for p in pdef]", "def get_cb_pipeline(train):\n from src.features import alchemy_feat, counting_feat, nltk_feat\n features = [\n ('sentiment', alchemy_feat.Sentiment()),\n ('sent_len', counting_feat.SentenceLength()),\n ('tfidf', counting_feat.BagOfTfIDF(train)),\n ('ner', alchemy_feat.NER()),\n ('pos', nltk_feat.POS())\n ]\n return get_pipeline(features)", "def make_pipeline():\r\n base_universe = Q1500US()\r\n sector = Sector() \r\n # screen is based off of returns\r\n returns = Returns(window_length = 2)\r\n # check if stock price has good strength, but not necessarily overbought\r\n rsi = RSI() \r\n price = USEquityPricing.close.latest\r\n # creating filter by specifying the type of returns desired\r\n top_return_stocks = returns.top(1,mask=base_universe, groupby=sector)\r\n pipe = Pipeline(\r\n columns = {\r\n 'rsi': rsi,\r\n 'price': price\r\n },\r\n # filter top return stocks, and stocks that are not being overbought\r\n # but are not too oversold either\r\n screen = base_universe & top_return_stocks & (20 < rsi < 80)\r\n # the above is equivalent to: choose stocks from the base universe that have had the top returns in their sectors and have a good RSI value\r\n )\r\n return pipe", "def make_full_pipeline(\n preprocess_pipe: ColumnTransformer, model: BaseEstimator\n) -> Pipeline:\n full_pipe = sklearn.pipeline.Pipeline(\n [(\"preprocess\", preprocess_pipe), (\"model\", model)]\n )\n return full_pipe", "def build_own_pipeline() -> Pipeline:\n clf = svm.LinearSVC(C=2, loss='hinge')\n vect = TfidfVectorizer(ngram_range=(1, 2))\n\n pipeline = None\n ##### Write code here #######\n pipeline = Pipeline([\n ('vect', vect),\n ('tfidf', TfidfTransformer()),\n ('clf', clf)\n ])\n ##### End of your work ######\n return pipeline", "def build_numerical_pipeline(self) -> Pipeline:\n pipeline = Pipeline([\n ('extract_data', FunctionTransformer(self.get_numerical_features)),\n ('impute', SimpleImputer(missing_values=np.nan)),\n ('standard_scaler', CustomStandardScaler())\n ])\n return pipeline", "def make_pipeline():\n \n # Base universe set to the QTradableStocksUS\n base_universe = QTradableStocksUS()#Q500US()\n base_universe = (base_universe & Q500US())\n base_universe = (base_universe & Fundamentals.market_cap.latest.top(150))\n \n # Factor of yesterday's close price.\n #yesterday_close = USEquityPricing.close.latest\n \n pipe = Pipeline(\n columns={\n #'close': yesterday_close,\n 'sector': Sector(),\n },\n screen=base_universe\n )\n return pipe", "def _get_pipeline(self, params_dict):\n p = Pipeline(steps=[('normalise', StandardScaler()),\n ('add_noise', NoiseAdder()),\n ('dim_reduce', PCA()),\n ('cluster', KMeans())])\n p.set_params(**params_dict)\n return p", "def get_pipeline(self):\n if hasattr(self, \"pipeline\"):\n return self.pipeline\n steps = [\n # before preprocessor, comes the feature extractor\n ('extractor', TurkishFeatureExtractor()),\n # first the pre-processor\n (\"preprocessor\", TurkishPreprocessor(self.stemmer_name_to_method[self.stemmer_method])),\n (\"vectorizer\", TurkishVectorizer(self.feature_name_to_class[self.feature])),\n # use pca\n # (\"pca\", TruncatedSVD(n_components=20, n_iter=10)),\n (\"adder\", TurkishFeatureAdder(n_components=20, n_iter=10)),\n (\"model\", self.model_name_to_class[self.model])\n ]\n self.pipeline = Pipeline(steps)\n return self.pipeline", "def make_pipeline(context):\n \n # Base universe set to the Q1500US\n base_universe = Q500US()\n \n #Get all industry codes\n industry=morningstar.asset_classification.morningstar_industry_code.latest\n #Get all sector codes\n sector = Sector()\n \n # Create filters (to be used as masks) of different industries/sectors \n # This is the mask that should exclude the most stocks. \n # Note that these may need to be even further filtered to exclude securities outside of a \n # similar range of volumes/size. For instance, the defense sector stock provides stocks as large as # LMT but also small defense companies. Although this shouldn't matter due to the second filter of \n # crosscorrelation, this may be unnecassary computational expense. \n pipe=Pipeline()\n #Below forms a \"sentiment screen\" that takes only stocks that have been rated a certain number of times and of those ratings there are at least 2.85 times as many bull scored messages as there are bear scored messages. \n pipe.add(st.bull_scored_messages .latest, 'bull_scored_messages')\n pipe.add(st.bear_scored_messages .latest, 'bear_scored_messages')\n sentimentScreen=(((st.bull_scored_messages.latest) > (context.Sentiment_multiplier*st.bear_scored_messages.latest)) & (st.bear_scored_messages.latest > 5))\n \n dFilt=sector.eq(310) #Indicates aerospace/defense sector\n dFilt2=industry.eq(31052107) #Indicates aerospace/defense industry\n tFilt=sector.eq(311) #Indicates consumer electronics sector\n tFilt2=industry.eq(31167138) #Indicates consumer electronics industry \n cFilt=sector.eq(101) #Chemical sector\n cFilt2=industry.eq(10103003)\n aFilt=sector.eq(102)\n aFilt2=industry.eq(10209017) #Auto manufacturing industry\n depFilt2=industry.eq(10217034) #Department store industry\n #dFilt2,tFilt2,cFilt2,aFilt2=True,True,True,True #Remove industry requirement\n defenseFilt= dFilt & dFilt2 #Combination of filters\n techFilt= tFilt & tFilt2\n chemFilt = cFilt & cFilt2 \n autoFilt = aFilt & aFilt2 \n tradable=base_universe & (defenseFilt | techFilt | chemFilt | autoFilt | depFilt2) & sentimentScreen\n \n \n pipe.set_screen(tradable)\n pipe.add(defenseFilt,'defenseFilt')\n pipe.add(techFilt,'techFilt')\n pipe.add(chemFilt,'chemFilt')\n pipe.add(autoFilt,'autoFilt')\n pipe.add(depFilt2,'depFilt')\n \n \n \n #TODO: May also want to return stock sentiment data and further filter tuple couples by only accepting couples with sentiment data in a similar range (further attributing to the validity of the calculated cross-correlation)\n \n return pipe", "def set_pipeline(self):\n feateng_steps = self.kwargs.get('feateng', ['runtime', 'country', 'language',\n 'genre', 'age', 'rated', 'released',\n 'writer', 'director', 'actors', 'production'])\n \n pipe_runtime_features = Pipeline([\n ('runtime', SimpleImputer(strategy='constant', fill_value=\"0\")),\n ('runtime_encoder', CleanRuntimeEncoder()),\n ('runtime_scaler', StandardScaler())])\n \n pipe_country_features = Pipeline([\n ('country', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('country_encoder', CleanCountryEncoder())])\n \n pipe_language_features = Pipeline([\n ('language', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('language_encoder', CleanLanguageEncoder())])\n \n pipe_genre_features = Pipeline([\n ('genre', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('genre_transformer', FunctionTransformer(np.reshape, kw_args={'newshape':-1})), \n ('genre_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_age_features = Pipeline([\n ('age', SimpleImputer(strategy='median')),\n ('age_enconder', CleanAgeEncoder())])\n \n pipe_rated_features = Pipeline([\n ('rated', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('rated_encoder', CleanRatedEncoder()),\n ('rated_ohe', OneHotEncoder(handle_unknown='ignore'))])\n \n pipe_released_features = Pipeline([\n ('released', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('released_encoder', CleanReleasedEncoder()),\n ('released_ohe', OneHotEncoder(handle_unknown='ignore'))])\n\n pipe_writer_features = Pipeline([\n ('writer', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('writer_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('writer_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_director_features = Pipeline([\n ('director', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('director_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('director_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_actors_features = Pipeline([\n ('actors', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('actors_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('actors_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n pipe_production_features = Pipeline([\n ('production', SimpleImputer(strategy='constant', fill_value='unknown')),\n ('production_transformer', FunctionTransformer(np.reshape, kw_args={'newshape': -1})), \n ('production_vectorizer', CountVectorizer(token_pattern='[a-zA-Z][a-z -]+', max_features=10))])\n \n # define default feature engineering blocks\n feateng_blocks = [\n ('runtime', pipe_runtime_features, ['Runtime']),\n ('country', pipe_country_features, ['Country']),\n ('genre', pipe_genre_features, ['Genre']),\n ('age', pipe_age_features, ['Year']),\n ('rated', pipe_rated_features, ['Rated']),\n ('released', pipe_released_features, ['Released']),\n ('writer', pipe_writer_features, ['Writer']),\n ('director', pipe_director_features, ['Director']),\n ('actors', pipe_actors_features, ['Actors']),\n ('language', pipe_language_features, ['Language']),\n ('production', pipe_production_features, ['Production'])]\n \n # filter out some blocks according to input parameters\n for block in feateng_blocks:\n if block[0] not in feateng_steps:\n feateng_blocks.remove(block)\n\n features_encoder = ColumnTransformer(feateng_blocks,\n n_jobs=None,\n remainder='drop')\n\n self.pipeline = Pipeline(steps=[\n ('features', features_encoder),\n ('rgs', self.get_estimator())])", "def build_pipeline():\n full_df = pd.read_csv(\"../data/healthcare-dataset-stroke-data.csv\",index_col = \"id\").drop(columns = [\"stroke\"],axis=1)\n #transform functions to make the pipeline work\n one_hot_encode_transformed = FunctionTransformer(one_hot_encode)\n impute_transformed = FunctionTransformer(impute)\n add_bodytype_transformed = FunctionTransformer(add_bodytype)\n add_diabetes_transformed = FunctionTransformer(add_diabetes)\n add_preexisting_transformed = FunctionTransformer(add_preexisting)\n add_missing_cols_transformed = FunctionTransformer(add_missing_cols,kw_args={\"total_tags\":get_all_tags(full_df)})\n pipeline = Pipeline([\n\n \n (\"add_bodytype\",add_bodytype_transformed),\n (\"add_diabetes\",add_diabetes_transformed),\n (\"add_preexisting\",add_preexisting_transformed),\n (\"impute\",impute_transformed),\n (\"one_hot_encode\",one_hot_encode_transformed),\n (\"add_missing_cols\",add_missing_cols_transformed),\n #use all available threads\n (\"over_under\" , SMOTEENN()),\n (\"pred\",XGBClassifier(nthread = -1,verbosity = 0,tree_method = 'gpu_hist',eval_metric = \"aucpr\",sampling_method = \"gradient_based\"))\n ])\n \n #set up parameters to test\n parameters = {\n\n 'pred__scale_pos_weight' : list(range(1,60,5)),\n 'over_under__sampling_strategy' : ['auto',0.1,0.2,0.3,0.4,0.5],\n \"pred__max_delta_step\": list(range(0,11))\n \n } \n \n grid = GridSearchCV(pipeline, param_grid=parameters,n_jobs = -1 ,scoring =\"average_precision\",verbose = 1)\n\n return grid", "def build_naive_bayes():\n nb_pipeline = None\n ##### Write code here\n nb_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', ComplementNB())\n ])\n\n ##### End of your work ######\n return nb_pipeline", "def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)", "def _pipeline(self, vectorizer, n_features, ngram_range, C):\n classifier = SVC(kernel=\"linear\", C=C, max_iter=1000000, shrinking=1, tol=0.0001)\n vectorizer.set_params(stop_words=None, max_features=self.max_features, ngram_range=ngram_range)\n \n checker_pipeline = Pipeline([\n ('vectorizer', vectorizer),\n ('reduce_dim', SelectKBest(chi2, k=n_features)),\n ('classify', classifier)])\n\n return checker_pipeline", "def init_bel():\n # Pipeline before CCA\n X_pre_processing = Pipeline(\n [\n (\"scaler\", StandardScaler(with_mean=False)),\n (\"pca\", PCA()),\n ]\n )\n Y_pre_processing = Pipeline(\n [\n (\"scaler\", StandardScaler(with_mean=False)),\n (\"pca\", PCA()),\n ]\n )\n\n # Canonical Correlation Analysis\n # Number of CCA components is chosen as the min number of PC\n n_pc_pred, n_pc_targ = (\n 50,\n 30,\n )\n cca = CCA(n_components=min(n_pc_targ, n_pc_pred), max_iter=500 * 20, tol=1e-6)\n\n # Pipeline after CCA\n X_post_processing = Pipeline(\n [(\"normalizer\", PowerTransformer(method=\"yeo-johnson\", standardize=True))]\n )\n Y_post_processing = Pipeline(\n [(\"normalizer\", PowerTransformer(method=\"yeo-johnson\", standardize=True))]\n )\n\n # Initiate BEL object\n bel_model = BEL(\n X_pre_processing=X_pre_processing,\n X_post_processing=X_post_processing,\n Y_pre_processing=Y_pre_processing,\n Y_post_processing=Y_post_processing,\n cca=cca,\n )\n\n # Set PC cut\n bel_model.X_n_pc = n_pc_pred\n bel_model.Y_n_pc = n_pc_targ\n\n return bel_model", "def initialize(context):\n pipe = Pipeline()\n attach_pipeline(pipe, 'ff_example')\n\n # common_stock = CommonStock()\n # # filter down to securities that are either common stock or SPY\n # pipe.set_screen(common_stock.eq(1))\n mkt_cap = MarketEquity()\n pipe.add(mkt_cap, 'market_cap')\n\n book_equity = BookEquity()\n # book equity over market equity\n be_me = book_equity/mkt_cap\n pipe.add(be_me, 'be_me')\n\n returns = Returns(window_length=2)\n pipe.add(returns, 'returns')\n \n dt = get_datetime().normalize()\n start_ = dt if dt > START_DATE else START_DATE\n context.result = result.loc[start_: , :]", "def build_svm_pipeline():\n svm_pipeline = None\n\n svm_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', SGDClassifier()),\n ])\n\n return svm_pipeline", "def preprocessing_pipeline(self):\n self.__multilabel_processing()\n self.__split_dataset()\n self.__save_datasets()", "def find_correct_pipeline(data_name):\n\n if data_name == 'GSE13355':\n pipeline = PL.make_pipeline(\n PP.Binarizer(threshold=0.7000000000000001),\n E.RandomForestClassifier(bootstrap=False, criterion=\"entropy\", max_features=0.35000000000000003,\n min_samples_leaf=3, min_samples_split=18, n_estimators=100)\n )\n elif data_name == 'GSE14905':\n pipeline = PL.make_pipeline(svm.LinearSVC(C=5.0, dual=True, loss=\"squared_hinge\", penalty=\"l2\", tol=0.001))\n elif data_name == 'GSE27887':\n pipeline = PL.make_pipeline(T.DecisionTreeClassifier(criterion=\"gini\", max_depth=4, min_samples_leaf=1, min_samples_split=10))\n elif data_name == 'GSE30999':\n pipeline = PL.make_pipeline(N.KNeighborsClassifier(n_neighbors=9, p=2, weights=\"distance\"))\n elif data_name == 'GSE32924':\n pipeline = PL.make_pipeline(E.GradientBoostingClassifier(learning_rate=1.0, max_depth=8, max_features=0.7500000000000001, min_samples_leaf=4,\n min_samples_split=3, n_estimators=100, subsample=0.7000000000000001))\n\n elif data_name == 'GSE34248':\n pipeline = PL.make_pipeline(E.RandomForestClassifier(bootstrap=True, criterion=\"entropy\", max_features=0.9000000000000001, min_samples_leaf=3,\n min_samples_split=6, n_estimators=100))\n elif data_name == 'GSE41662':\n pipeline = PL.make_pipeline(svm.LinearSVC(C=0.001, dual=True, loss=\"hinge\", penalty=\"l2\", tol=0.01))\n elif data_name == 'GSE78097':\n pipeline = PL.make_pipeline(\n E.RandomForestClassifier(bootstrap=False, criterion=\"gini\", max_features=1.0, min_samples_leaf=4,\n min_samples_split=10, n_estimators=100))\n elif data_name == 'GSE36842':\n raise NotImplementedError()\n else:\n raise NotImplementedError('No pipeline is created for this data set')\n\n return pipeline", "def setup_pipeline(self, estimator=None, biclass=True):\n if biclass:\n self.pipeline = Pipeline(estimator)\n else:\n self.pipeline = OneVsOneClassifier(Pipeline(estimator))", "def pipeline():\n\n test_pipeline = (Pipeline()\n .init_variable('current_loss')\n .init_model('model', C('model_class'),\n 'dynamic', C('model_config'))\n .to_array(dtype='float32')\n .train_model('model',\n inputs=B('images'),\n targets=B('labels'),\n outputs='loss',\n save_to=V('current_loss'))\n )\n return test_pipeline", "def set_pipeline(self):\n pipe_distance = make_pipeline(DistanceTransformer(), RobustScaler())\n pipe_time = make_pipeline(TimeFeaturesEncoder(time_column='pickup_datetime'), OneHotEncoder(handle_unknown='ignore'))\n dist_cols = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude']\n time_cols = ['pickup_datetime']\n feat_eng_bloc = ColumnTransformer([('time', pipe_time, time_cols),\n ('distance', pipe_distance, dist_cols)]\n )\n self.pipeline = Pipeline(steps=[('feat_eng_bloc', feat_eng_bloc),\n ('regressor', RandomForestRegressor())])\n return self.pipeline", "def set_flow_array(self):\n flow = self.flow_data.flow\n pressure_drop = self.flow_data.pressure_drop\n popt, pcov = spopt.curve_fit(self.get_flow, pressure_drop,\n flow, p0=self.exh.flow_coeff) \n self.exh.flow_coeff = popt\n self.exh.flow_array = ( self.exh.flow_coeff *\n self.exh.pressure_drop**0.5 )", "def build_logistic_regr():\n logistic_pipeline = None\n ##### Write code here #######\n logistic_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', LogisticRegression())\n ])\n ##### End of your work ######\n return logistic_pipeline", "def __build_ml_pipeline(self, clf: MultiOutputClassifier) -> Pipeline:\r\n pipeline = Pipeline([\r\n ('features', FeatureUnion(\r\n [('text', Pipeline(\r\n [('text_field_extractor', \r\n basic_utils.TextFieldExtractor('message')), \r\n #('vect', CountVectorizer(tokenizer=clean_text)),\r\n #('tfidf', TfidfTransformer())\r\n ('tfidf', TfidfVectorizer(tokenizer=basic_utils.tokenize, \r\n min_df=.0025, max_df=0.25, \r\n ngram_range=(1,2)))\r\n ])),\r\n \r\n ('numerics', FeatureUnion(\r\n [('text_len', \r\n Pipeline([('text_len_extractor', \r\n basic_utils.NumericFieldExtractor('text_len')), \r\n ('text_len_scaler', StandardScaler())\r\n ])),\r\n ('punt_perc', \r\n Pipeline([('punt_perc_extractor', \r\n basic_utils.NumericFieldExtractor('punt_perc')), \r\n ('punt_perc_scaler', StandardScaler())\r\n ]))\r\n ])),\r\n ('starting_verb', basic_utils.PosFieldExtractor('starting_verb_flag'))\r\n ])),\r\n \r\n ('clf', clf)\r\n ])\r\n \r\n return pipeline", "def _create_pipeline(self) -> TfmIterator:\n # 1. Initialise TubRecord -> x, y transformations\n def get_x(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting x from record for training\"\"\"\n out_dict = self.model.x_transform(record, self.image_processor)\n # apply the normalisation here on the fly to go from uint8 -> float\n out_dict['img_in'] = normalize_image(out_dict['img_in'])\n return out_dict\n\n def get_y(record: TubRecord) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\" Extracting y from record for training \"\"\"\n y = self.model.y_transform(record)\n return y\n\n # 2. Build pipeline using the transformations\n pipeline = self.sequence.build_pipeline(x_transform=get_x,\n y_transform=get_y)\n return pipeline", "def make_pipeline(model):\n\n steps = [\n (\"imp\", SimpleImputer(strategy=\"most_frequent\")),\n (\"norm\", MinMaxScaler()),\n (\"reg\", model)\n ]\n pipeline = Pipeline(steps=steps)\n\n return pipeline", "def __init__(self, x, y, numWeights, netHeight, netWidth, PBC, minVal=[], maxVal=[], pcaVec=[], weiArray=[]):\r\n\r\n self.PBC = PBC\r\n self.pos = hx.coorToHex(x, y)\r\n self.weights = []\r\n\r\n self.netHeight = netHeight\r\n self.netWidth = netWidth\r\n\r\n if weiArray == [] and pcaVec == []:\r\n # select randomly in the space spanned by the data\r\n for i in range(numWeights):\r\n if round(np.random.random()) >= 0.5:\r\n self.weights.append(1)\r\n else:\r\n self.weights.append(0)\r\n\r\n # self.weights.append(np.random.random()*(maxVal[i]-minVal[i])+minVal[i])\r\n elif weiArray == [] and pcaVec != []:\r\n # select uniformly in the space spanned by the PCA vectors\r\n self.weights = (x - self.netWidth / 2) * 2.0 / self.netWidth * pcaVec[0] + (\r\n y - self.netHeight / 2) * 2.0 / self.netHeight * pcaVec[1]\r\n else:\r\n for i in range(numWeights):\r\n self.weights.append(weiArray[i])", "def preproc_pipeline(data):\n # Preprocess\n data = preprocess(data)\n\n # Optional --> run a technical analysis on it and add more features\n data = generate_ta(data)\n \n # Split\n train_set, validation_set, test_set = train_val_test_split(data)\n \n # Set up for Keras\n train_set = shape_for_keras(train_set)\n validation_set = shape_for_keras(validation_set)\n test_set = shape_for_keras(test_set)\n\n # We could save this to csv.\n return train_set, validation_set, test_set", "def set_pipeline(self):\n dist_pipe = Pipeline([\n ('dist_trans', DistanceTransformer()),\n ('stdscaler', StandardScaler())\n ])\n\n time_pipe = Pipeline([\n ('time_enc', TimeFeaturesEncoder('pickup_datetime')),\n ('ohe', OneHotEncoder(handle_unknown='ignore'))\n ])\n\n preproc_pipe = ColumnTransformer([\n ('distance', dist_pipe, [\"pickup_latitude\", \"pickup_longitude\", 'dropoff_latitude', 'dropoff_longitude']),\n ('time', time_pipe, ['pickup_datetime'])\n ], remainder=\"drop\")\n\n pipe = Pipeline([\n ('preproc', preproc_pipe),\n ('linear_model', LinearRegression())\n ])\n return pipe", "def getIdealSec(context, data): #This replaced before_trading_start(context, data)\n record(Leverage = \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t context.account.leverage,pos=len(context.portfolio.positions))\n context.output = pipeline_output('my_pipeline')\n #print('Pipeout: ')\n #print(context.output)\n \n # These are the securities that we are interested in trading each day.\n # Note: As it stands, the securities in this list are from two different industries (defense and\n # consumer electronics). Although more computationally expensive then dividing them out into their \n # two respective industries prior to cross correlating, leaving them in the same matrix/data set and \n # cross correlating them gives us a way to 'check' that the crosscorrelation is valid, since securities within the same industry should typically cross correlate to a higher degree than across industries. ***\n context.security_list = context.output.index \n context.defenseList = context.output[context.output['defenseFilt']].index.tolist()\n #print(context.defenseList)\n context.autoList = context.output[context.output['autoFilt']].index.tolist()\n #print(context.autoList)\n context.chemList = context.output[context.output['chemFilt']].index.tolist()\n #print(context.chemList)\n context.techList = context.output[context.output['techFilt']].index.tolist()\n #print(context.techList)\n context.depList = context.output[context.output['depFilt']].index.tolist()\n # Within each sector, calculate the mean (and max, since we may choose only to trade the maximally correlated securities regardless of industry) crosscorrelation between all combinations of stocks. \n #This will only run every trading day to prevent computational expense. In that \n #respect, performs identically to a pipeline add-on (but allows the use of \"history\") \n #Try block here incase pipe returns no valid securities. \n try:\n \tprice_history = np.transpose(data.history(context.security_list, fields=\"price\", bar_count=context.lookback,frequency=\"1m\"))\n \tprice_history=price_history.as_matrix()\n except:\n price_history=[[0],[0],[0]]\n #This returns three arrays, containing a filtered set of maximally cross correlated securities within the last time range (given by context.lookback), their associated (and filtered) time delays corresponding to their maximum correlation, and the degree of their correlation in the given time frame. Essentially, since tau has already been filtered for, the degree of their correlation should be used as a confidence feature to make predictions off of, and tau should be used to determine when to make purchases/sales. \n #hCorrVals,maxSecs,timeDelays,short_timeDelays=crossCorr(context.security_list,price_history,context)\n #The best securities to trade using this algorithm (each day) are listed in the below lists ***\n try:\n \thCorrVals,maxSecs,timeDelays,short_timeDelays=crossCorr(context.security_list,price_history,context) \n except: \n print('Crosscorr Failed')\n maxSecs,hCorrVals,timeDelays,short_timeDelays=[],[],[],[]\n #\"Globalize\" the returned information so that we can handle these commodities every minute. \n context.Securities=maxSecs\n context.CorrVals=hCorrVals\n context.timeDelays=short_timeDelays #************Used to be timeDelays, now however, we calculate a more recent tau\n context.actionList,context.timerList,context.tradeList,context.tradingNow=[0]*len(context.Securities),[0]*len(context.Securities),[0]*len(context.Securities),[0]*len(context.Securities) #list of zeros indicating that no stocks should currently be trading\n #(Note that all stocks should be sold at end of every tradinng day.) ", "def beam_pipeline_args(self):\n return self._beam_pipeline_args", "def run(self):\n self.set_pipeline()\n self.pipeline.fit(self.X, self.y)", "def before_trading_start(context, data):\n context.output = pipeline_output('my_pipeline')", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def run(self):\n self.pipeline = self.set_pipeline()\n self.pipeline.fit(self.X,self.y)\n return self", "def get_pipeline(self, y, n_quantiles=None):\n\n if n_quantiles is None:\n n_quantiles = _n_samples(y)\n\n self.pipe = _make_pipeline(estimator=self._regressor,\n transform=self.pipeline_transform,\n n_targets=_n_targets(y),\n random_state=self.random_state,\n verbose=self.verbose,\n n_jobs=self.n_jobs,\n cv=self.cv,\n memory=self.pipeline_memory,\n n_quantiles=n_quantiles,\n chain_order=self.chain_order,\n n_estimators=self.n_regressors,\n target_index=self.target_index,\n boosting_loss=self.boosting_loss,\n regularization=self.line_search_regularization,\n line_search_options=self.line_search_options)", "def populate_data_channel_coders(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> Iterable[Stage]\n for stage in stages:\n for transform in stage.transforms:\n if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,\n bundle_processor.DATA_OUTPUT_URN):\n if transform.spec.urn == bundle_processor.DATA_INPUT_URN:\n sdk_pcoll_id = only_element(transform.outputs.values())\n else:\n sdk_pcoll_id = only_element(transform.inputs.values())\n pipeline_context.add_data_channel_coder(sdk_pcoll_id)\n\n return stages", "def convert(self, data, cf_cap=None):\n\n cf_cap = default(cf_cap, tuple(stage.meta.n_caps-1 for stage in self.stages))\n assert len(cf_cap) == len(self.stages)\n\n assert np.size(data, -1) == self.stages[0].meta.n_diff\n base_shape = np.shape(data)[:-1]\n base_len = len(base_shape)\n\n result = []\n colors = ['g','c','b']\n\n for stage, cf_ii in zip(self.stages, cf_cap):\n meta = stage.meta\n cs_ii = list(range(0, cf_ii)) + list(range(cf_ii+1, meta.n_caps))\n p_map = pipe_map(meta.n_caps - 1, meta.n_refs,\n differential=meta.differential)\n\n thres = stage.thres[(np.newaxis,) * (base_len + 1) + (Ellipsis,)]\n code = np.diff(data, axis=-1, keepdims=True) if meta.differential else data\n code = np.sum(code[..., np.newaxis] >= thres, axis=-1)\n\n diff_idx = (1,) * base_len + (meta.n_diff,)\n diff_idx = np.reshape(list(range(meta.n_diff)), diff_idx)\n refs_idx = p_map[:, code, diff_idx]\n\n refs_idx = np.transpose(refs_idx, tuple(range(1, base_len + 1)) + (0, -1,))\n result.append(refs_idx)\n\n g = stage.caps[cs_ii, ...] / stage.caps[[cf_ii], ...]\n g = g[(np.newaxis,) * base_len + (Ellipsis,)]\n\n cs_ii = np.reshape(cs_ii, (1,) * base_len + (len(cs_ii), 1,))\n refs = stage.refs[cs_ii, refs_idx, diff_idx[np.newaxis, ...]]\n\n data = data + np.sum(g*(data[..., np.newaxis, :] - refs), axis=-2)\n data = data * stage.eff + (1 - stage.eff) * stage.common_mode\n\n thres = self.tail[(np.newaxis,) * base_len + (Ellipsis,)]\n code = np.diff(data, axis=-1, keepdims=True) if meta.differential else data\n code = np.sum(code[..., np.newaxis] >= thres, axis=(-1, -2,))\n\n return tuple(result), code", "def pipeline_artifact(self):\n pass", "def pipeline(self):\n return self._pipeline", "def pipeline(self):\n return self._pipeline", "def _augment_pipeline_cfg(self):", "def test_fit(self, pipeline):\n pipeline.fit(X, Y)", "def fit(self, data: pd.DataFrame) -> None:\n self.pipeline = Pipeline([\n ('pipeline', FeatureUnion([\n ('categorical', self.build_categorical_pipeline()),\n ('numerical', self.build_numerical_pipeline())\n ]))\n ])\n self.pipeline.fit(data)", "def test_export_pipeline():\n tpot_obj = TPOTClassifier()\n pipeline = creator.Individual.\\\n from_string(\"KNeighborsClassifier(CombineDFs(GradientBoostingClassifier(input_matrix, 38.0, 0.87), SelectKBest(input_matrix, 5)), 18, 33)\", tpot_obj._pset)\n\n expected_code = \"\"\"import numpy as np\n\nfrom sklearn.ensemble import GradientBoostingClassifier, VotingClassifier\nfrom sklearn.feature_selection import SelectKBest, f_classif\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.preprocessing import FunctionTransformer\n\n# NOTE: Make sure that the class is labeled 'class' in the data file\ntpot_data = np.recfromcsv('PATH/TO/DATA/FILE', delimiter='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = np.delete(tpot_data.view(np.float64).reshape(tpot_data.size, -1), tpot_data.dtype.names.index('class'), axis=1)\ntraining_features, testing_features, training_classes, testing_classes = \\\\\n train_test_split(features, tpot_data['class'], random_state=42)\n\nexported_pipeline = make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n SelectKBest(k=5, score_func=f_classif)\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\n\nexported_pipeline.fit(training_features, training_classes)\nresults = exported_pipeline.predict(testing_features)\n\"\"\"\n\n assert expected_code == export_pipeline(pipeline)", "def test_chain(self):\n self._test_chain(self.array_dense,\n ['min-max', 'pca', 'min-max', 'rbf', 'svm'],\n [{'feature_range': (-5, 5)}, {},\n {'feature_range': (0, 1)}, {}, {}],\n y=self.labels)", "def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)", "def pipeline_logreg():\n\n\n '''\n Setting up a pipeline\n Pipeline 1 - SelectKBest and Logistic Regression (non-neg only)\n PRIMARY_MERCHANT_NAME\n Pipeline 1; 2020-04-29 11:02:06\n {'feature_selection__k': 5, 'reg__max_iter': 800}\n Overall score: 0.3696\n Best accuracy with parameters: 0.34202115158636903\n Pipeline 1; 2020-05-01 09:44:29\n {'feature_selection__k': 8, 'reg__max_iter': 800}\n Overall score: 0.5972\n Best accuracy with parameters: 0.605607476635514\n CITY\n Pipeline 1; 2020-05-04 14:38:23 Full Set\n {'feature_selection__k': 8, 'reg__max_iter': 800}\n Overall score: 0.7953\n Best accuracy with parameters: 0.8155763239875389\n ----\n Pipeline 1; 2020-05-04 17:00:59 Sparse Set\n {'feature_selection__k': 5, 'reg__max_iter': 800}\n Overall score: 0.4706\n Best accuracy with parameters: 0.5158026283963557\n\n #SelectKBest picks features based on their f-value to find the features that can optimally predict the labels\n #F_CLASSIFIER;FOR CLASSIFICATION TASKS determines features based on the f-values between features & labels;\n #Chi2: for regression tasks; requires non-neg values\n #other functions: mutual_info_classif; chi2, f_regression; mutual_info_regression\n\n takes unscaled numerical so far and minmax scaled arguments\n #numerical and minmax scaled leads to the same results being picked\n f_classif for classification tasks\n chi2 for regression tasks\n '''\n\n #Create pipeline with feature selector and regressor\n #replace with gradient boosted at this point or regressor\n pipe = Pipeline([\n ('feature_selection', SelectKBest(score_func = chi2)),\n ('reg', LogisticRegression(random_state = 15))])\n\n #Create a parameter grid\n #parameter grids provide the values for the models to try\n #PARAMETERS NEED TO HAVE THE SAME LENGTH\n params = {\n 'feature_selection__k':[5, 6, 7, 8, 9],\n 'reg__max_iter':[800, 1000],\n 'reg__C':[10, 1, 0.1]\n }\n\n #Initialize the grid search object\n grid_search_lr = GridSearchCV(pipe, param_grid = params)\n\n #best combination of feature selector and the regressor\n #grid_search.best_params_\n #best score\n #grid_search.best_score_\n\n #Fit it to the data and print the best value combination\n print(f\"Pipeline logreg; {dt.today()}\")\n print(grid_search_lr.fit(X_train, y_train).best_params_)\n print(\"Overall score: %.4f\" %(grid_search_lr.score(X_test, y_test)))\n print(f\"Best accuracy with parameters: {grid_search_lr.best_score_}\")\n\n return grid_search_lr", "def build_naive_bayes():\n nb_pipeline = None\n \n nb_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', ComplementNB()),\n ])\n \n return nb_pipeline", "def model_pipeline(catnums):\n pipe = make_pipeline(\n Imputer(strategy='most_frequent'),\n OneHotEncoder(categorical_features=catnums, sparse=False),\n PolynomialFeatures(),\n Ridge(alpha=25)\n )\n return pipe", "def setup(self):\n ### Set Names\n # Name of the pipeline reduction step\n self.name='coadd'\n # Shortcut for pipeline reduction step and identifier for\n # saved file names.\n self.procname = 'coadd'\n # Set Logger for this pipe step\n self.log = logging.getLogger('pipe.step.%s' % self.name)\n ### Set Parameter list\n # Clear Parameter list\n self.paramlist = []\n # Append parameters\n self.paramlist.append(['kernel','square',\n 'Specifies the kernel used to determine spreading of input pixels onto output pixels \\\n - options are square, point, gaussian, smoothing, tophat'])\n self.paramlist.append(['pixfrac', 1.,\n 'The fraction of an output pixel(s) that an input pixel\\'s flux is confined to'])\n self.paramlist.append(['resolution', 1.,\n 'Pixel scale divisor for output image (higher gives more resolution, lower gives less)'])\n self.paramlist.append(['pad', 0,\n 'Extra padding outside maximum extent of inputs'])\n self.paramlist.append(['fillval', np.nan,\n 'Value for filling in the area(s) in the output where there is no input data'])\n self.paramlist.append(['drizzleweights','exptime',\n 'How each input image should be weighted when added to the output \\\n - options are exptime, expsq and uniform'])\n self.paramlist.append(['outangle',0.,\n 'Output angle of drizzled image (currently not functional)'])", "def preprocess(self, data_group):\n\n input_data = data_group.preprocessed_case\n\n # Split Channels\n if self.channels is None:\n channel_subset = np.copy(input_data)\n else:\n all_channels = set(range(input_data.shape[-1]))\n remaining_channels = list(all_channels.difference(set(self.channels)))\n reminaing_channel_subset = np.take(input_data, remaining_channels, axis=-1)\n channel_subset = np.take(input_data, self.channels, axis=-1)\n\n # Merge Target Channels\n if self.merge_method == 'maximum':\n channel_subset = np.max(channel_subset, axis=-1)[..., np.newaxis]\n\n # Join Channels\n if self.channels is None:\n output_data = channel_subset\n else:\n output_data = np.concatenate((reminaing_channel_subset, channel_subset), axis=-1)\n\n data_group.preprocessed_case = output_data\n self.output_data = output_data", "def low_rank_pca(cube, Y, cv, n_components=400, reg_rank=[6],\n fit_intercept=True, pen='rank'):\n if pen == 'rank':\n clf = LinearRegression(fit_intercept=True)\n pca_ = PCA(n_components=reg_rank[0])\n tuned_parameters = [{'pca__n_components': reg_rank}]\n clf = Pipeline([('pca', pca_), ('reg', clf)])\n elif pen == 'ridge':\n clf = RidgeCV(fit_intercept=fit_intercept)\n elif pen == 'lasso':\n clf = LassoLarsCV(fit_intercept=fit_intercept)\n elif pen == 'trees':\n clf = ExtraTreesRegressor(n_estimators=10, max_features='auto',\n random_state=0)\n elif pen == 'knn':\n clf = KNeighborsRegressor()\n else:\n clf = LinearRegression(fit_intercept=fit_intercept)\n pca = PCA(n_components=n_components)\n W = cube.T.reshape(n_clusters, n_subjects * n_ref).T\n w = pca.fit_transform(W)\n sse = np.zeros(Y.shape[1])\n for train, test in cv:\n Y0 = Y[train].mean(0)\n y = pca.transform(Y[train] - Y0)\n proj = []\n for x, y_train in zip(w.T, y.T):\n x_ = x.reshape(n_ref, n_subjects).T\n x_train, x_test = x_[train], x_[test]\n if pen in ['ridge', 'lasso'] or len(reg_rank) == 1:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n clf.fit(x_train, y_train)\n proj.append(clf.predict(x_test))\n else:\n pclf = GridSearchCV(clf, tuned_parameters, cv=5,\n scoring='mean_squared_error', n_jobs=1)\n pclf.fit(x_train, y_train)\n proj.append(pclf.predict(x_test))\n proj = np.array(proj).T\n Y_pred = pca.inverse_transform(proj) + Y0\n sse += np.mean((Y[test] - Y_pred) ** 2, 0)\n return sse", "def __init__(self, pipeline=PIPELINE, name=\"fake_estimator\"):\n super().__init__(pipeline=pipeline, name=name)", "def _create_pipeline(self) -> codepipeline.Pipeline:\n source_output = codepipeline.Artifact()\n build_output = codepipeline.Artifact()\n return codepipeline.Pipeline(\n self,\n 'Pipeline',\n stages=[\n self._create_source_stage('Source', source_output),\n # self._create_image_build_stage(\n # 'Build', source_output, build_output),\n # self._create_deploy_stage('Deploy', build_output)\n ]\n )", "def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError", "def encode_dataset(batch_size,downscale_factor,dataset, pooling_function):\n \n n,l=np.shape(dataset)\n f=downscale_factor\n n_batches=n//batch_size\n batches=np.linspace(1,n_batches,n_batches, dtype=int) * batch_size\n\n gaf = GramianAngularField(image_size=1., method='summation')\n \n print('Encoding started...')\n for p in range(n_batches):\n if p==0:\n X_gaf = gaf.transform(dataset[0:batches[p],:])\n sample=block_reduce(X_gaf[0], block_size=(f, f), func=pooling_function)\n l_red = sample.shape[0]\n X_gaf_red = np.zeros((n,l_red,l_red))\n print('output 3D Matrix shape: ', np.shape(X_gaf_red))\n\n j=0\n for i in range(0,batches[p]):\n X_gaf_red[i] = block_reduce(X_gaf[j], block_size=(f, f) , func=pooling_function)\n j+=1\n\n else: \n X_gaf = gaf.transform(X[batches[p-1]:batches[p],:])\n\n j=0\n for i in range(batches[p-1],batches[p]):\n X_gaf_red[i] = block_reduce(X_gaf[j], block_size=(f, f) , func=pooling_function)\n j+=1\n \n print('Encoding successful!')\n print('#####################################')\n \n return X_gaf_red", "def pipeline_rfe():\n\n\n\n #cols = [c for c in bank_df if bank_df[c].dtype == 'int64' or 'float64']\n #X_train = bank_df[cols].drop(columns = ['primary_merchant_name'], axis = 1)\n #y_train = bank_df['primary_merchant_name']\n #X_test = bank_df[cols].drop(columns = ['primary_merchant_name'], axis = 1)\n #y_test = bank_df['primary_merchant_name']\n\n #build a logistic regression and use recursive feature elimination to exclude trivial features\n log_reg = LogisticRegression(C = 1.0, max_iter = 2000)\n # create the RFE model and select most striking attributes\n rfe = RFE(estimator = log_reg, n_features_to_select = 8, step = 1)\n rfe = rfe.fit(X_train, y_train)\n #selected attributes\n print('Selected features: %s' % list(X_train.columns[rfe.support_]))\n print(rfe.ranking_)\n #following df contains only significant features\n X_train_rfe = X_train[X_train.columns[rfe.support_]]\n X_test_rfe = X_test[X_test.columns[rfe.support_]]\n #log_reg_param = rfe.set_params(C = 0.01, max_iter = 200, tol = 0.001)\n return X_train_rfe, X_test_rfe", "def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX", "def build_logistic_regr():\n logistic_pipeline = None\n\n logistic_pipeline = Pipeline([\n ('vect', CountVectorizer()),\n ('clf', LogisticRegression()), \n ])\n \n return logistic_pipeline", "def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)", "def q4():\n\n # Selecionando só as colunas numéricas:\n num_features = countries.select_dtypes(exclude=['object'])\n\n # Lista de etapas do pipeline:\n pipe = Pipeline(steps=[\n (\"imputer\", SimpleImputer(strategy=\"median\")), \n ('scale', StandardScaler())])\n\n # Aplicando o pipeline somente nas variáveis dos tipos especificados:\n pipe.fit(num_features)\n\n # Aolicando o mesmo pipeline nos dados test_country:\n pipe_transform = pipe.transform([test_country[2:]])\n\n # Valor da variável Arable após o pipeline:\n answer = pipe_transform[:, num_features.columns.get_loc(\"Arable\")]\n return np.round(answer.item(), 3)", "def __iadd__(self, pipe_element):\n if isinstance(pipe_element, Preprocessing):\n self.preprocessing = pipe_element\n elif isinstance(pipe_element, CallbackElement):\n pipe_element.needs_y = True\n self.elements.append(pipe_element)\n else:\n if isinstance(pipe_element, PipelineElement) or issubclass(\n type(pipe_element), PhotonNative\n ):\n self.elements.append(pipe_element)\n else:\n raise TypeError(\"Element must be of type Pipeline Element\")\n return self", "def compositionalActflowDecodings(data, nov_actflow_data, prac_actflow_data, effects=False, featsel=True, ncvs=1, nproc=5):\n\n nSubjs = data.shape[2]\n stats = np.zeros((1,))\n \n ncond = data.shape[1]\n\n nsamples = nSubjs * ncond\n nfeatures = data.shape[0]\n\n # Label array for supervised learning\n labels = np.tile(range(ncond),nSubjs)\n subjarray = np.repeat(range(nSubjs),ncond)\n\n svm_mat = np.zeros((nsamples,nfeatures))\n nov_svm_mat = np.zeros((nsamples,nfeatures))\n prc_svm_mat = np.zeros((nsamples,nfeatures))\n samplecount = 0\n scount = 0\n for subj in range(nSubjs):\n origdata = data[:,:,scount]\n nov_data = nov_actflow_data[:,:,scount]\n prc_data = prac_actflow_data[:,:,scount]\n svm_mat[samplecount:(samplecount+ncond),:] = origdata.T\n nov_svm_mat[samplecount:(samplecount+ncond),:] = nov_data.T\n prc_svm_mat[samplecount:(samplecount+ncond),:] = prc_data.T\n\n scount += 1\n samplecount += ncond\n\n # Spatially demean matrix across features\n# samplemean = np.mean(svm_mat,axis=1)\n# samplemean.shape = (len(samplemean),1)\n# svm_mat = svm_mat - samplemean\n#\n# samplemean = np.mean(actflow_svm_mat,axis=1)\n# samplemean.shape = (len(samplemean),1)\n# actflow_svm_mat = actflow_svm_mat - samplemean\n\n scores, rmatch, rmismatch = compositionalActflowRandomSplitLOOBaselineCV(ncvs, svm_mat, nov_svm_mat, prc_svm_mat, labels, subjarray, featsel=featsel, nproc=nproc)\n# stats = np.mean(scores)\n stats = scores \n if effects: \n return stats, rmatch,rmismatch\n else:\n return stats", "def to_sklearn(self):\n import sklearn.pipeline as skp\n\n steps = []\n for step in self.steps:\n steps += [(step[0], step[1].to_sklearn())]\n return skp.Pipeline(steps)", "def __init__(self, **kwargs):\n super(VeryCleverBeamsplitter, self).__init__(**kwargs)\n self.shader_source = IL_SHADER_SOURCE\n self.centre = [0.5, 0.5]\n self.blazing_function = np.linspace(0,1,32)\n self.zernike_coefficients = np.zeros(12)", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n pass", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def pv_chain(self):\n index = self._ordered_input_names.index('pv_chain')\n return self._inputs[index]", "def transform(self, inputs: list, stage: str) -> datapack.DataPack:", "def pipeline_test_data(self):\n if self.linearity:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n #'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n else:\n Detector1Pipeline.call(self.ramp_file, save_results=True, output_dir=self.output_dir, output_use_model=True,\n steps={'ipc': {'skip': True},\n 'rscd': {'skip': True},\n 'lastframe': {'save_results': True,\n 'output_dir': self.output_dir},\n 'dark_current': {'save_results': True,\n 'output_dir': self.output_dir},\n 'linearity': {'skip': True},\n 'jump': {'save_results': True,\n 'output_dir': self.output_dir}})\n\n self.pre_dark_file = os.path.join(self.output_dir, 'step_lastframe.fits')\n self.post_dark_file = os.path.join(self.output_dir, 'step_dark_current.fits')\n self.jump_file = os.path.join(self.output_dir, 'step_jump.fits')\n self.rate_file = os.path.join(self.output_dir, 'step_rate.fits')", "def boost(self):\n ch = self.gamma\n sh = self.gamma*self.beta\n return( np.array( [ [ch, -sh], [-sh, ch] ] ) )", "def classical_preprocessing(*args, **kwargs):\r\n qnode.construct(args, kwargs)\r\n return qml.math.stack(qnode.qtape.get_parameters())", "def compositionalActflowRandomSplitLOOBaselineCV(ncvs, svm_mat, nov_svm_mat, prc_svm_mat, labels, subjarray, featsel=True, nproc=5):\n \n ntasks = len(np.unique(labels))\n nsamples = svm_mat.shape[0]\n nsubjs = nsamples/ntasks\n\n subjects = np.unique(subjarray)\n indices = np.arange(nsamples)\n \n numsubjs_perfold = 1\n if nsubjs%numsubjs_perfold!=0: \n raise Exception(\"Error: Folds don't match number of subjects\")\n \n nfolds = nsubjs/numsubjs_perfold\n subj_array_folds = subjarray.copy()\n \n inputs = [] \n \n for fold in range(nfolds):\n #test_subjs = np.random.choice(subj_array_folds,numsubjs_perfold,replace=False)\n test_subjs = [subjects[fold]]\n train_subjs_all = np.delete(subjects,test_subjs)\n for cv in range(ncvs):\n # Randomly sample half of train set subjects for each cv (CV bootstrapping)\n# train_subjs = np.random.choice(train_subjs_all,\n# int(np.floor(len(train_subjs_all)*(4.0))),\n# replace=True)\n train_subjs = train_subjs_all\n\n train_ind = []\n for subj in train_subjs:\n train_ind.extend(np.where(subjarray==subj)[0])\n\n test_ind = []\n for subj in test_subjs:\n test_ind.extend(np.where(subjarray==subj)[0])\n \n train_ind = np.asarray(train_ind)\n test_ind = np.asarray(test_ind)\n\n trainset = prc_svm_mat[train_ind,:]\n testset = nov_svm_mat[test_ind,:]\n orig_training = svm_mat[train_ind,:]\n\n ## Normalize trainset and testset\n trainmean = np.mean(prc_svm_mat[train_ind,:],axis=0)\n trainmean.shape = (1,len(trainmean))\n trainstd = np.std(prc_svm_mat[train_ind,:],axis=0)\n trainstd.shape = (1,len(trainstd))\n #\n ## Normalize trainset and testset\n testmean = np.mean(nov_svm_mat[train_ind,:],axis=0)\n testmean.shape = (1,len(testmean))\n teststd = np.std(nov_svm_mat[train_ind,:],axis=0)\n teststd.shape = (1,len(teststd))\n\n trainset = np.divide((trainset - trainmean),trainstd)\n testset = np.divide((testset - testmean),teststd)\n\n ######## FEATURE SELECTION & REDUCTION\n ## Feature selection and downsampling\n trainlabels = labels[train_ind]\n testlabels = labels[test_ind]\n unique_labels = np.unique(labels)\n feat1_labs = np.where(trainlabels==0)[0]\n feat2_labs = np.where(trainlabels==1)[0]\n # Perform t-test\n t, p = stats.ttest_rel(orig_training[feat1_labs,:],orig_training[feat2_labs,:],axis=0)\n #t, p = stats.ttest_rel(trainset[feat1_labs,:],trainset[feat2_labs,:],axis=0)\n h0, qs = mc.fdrcorrection0(p)\n\n ### BEGIN REGULAR FEATURE SELECTION ###\n if featsel:\n thresh = 0.05\n feat_mask = np.where(qs < thresh)[0]\n feat_mask = np.intersect1d(feat_mask,np.where(np.isnan(trainset[0,:])==False)[0]) # make sure no bad values are included\n inputs.append((trainset[:,feat_mask],testset[:,feat_mask],labels[train_ind],labels[test_ind])) \n ### END REGULAR FEATURE SELECTION ###\n\n# ### BEGIN DIGIT REPRESENTATION FEATURE SELECTION ###\n# # Construct feature masks\n# feat1_mask = np.multiply(t<0,qs<0.05)\n# feat2_mask = np.multiply(t>0,qs<0.05)\n# #feat1_mask = t>0\n# #feat2_mask = t<0\n# \n# # Downsample training set into original vertices into 2 ROI signals\n# trainset_downsampled = np.zeros((trainset.shape[0],2))\n# trainset_downsampled[:,0] = np.nanmean(trainset[:,feat1_mask],axis=1)\n# trainset_downsampled[:,1] = np.nanmean(trainset[:,feat2_mask],axis=1)\n# # Downsample test set into original vertices\n# testset_downsampled = np.zeros((testset.shape[0],2))\n# testset_downsampled[:,0] = np.nanmean(testset[:,feat1_mask],axis=1)\n# testset_downsampled[:,1] = np.nanmean(testset[:,feat2_mask],axis=1)\n# if np.nansum(feat1_mask)==0 or np.nansum(feat2_mask)==0:\n# print 'not running feature selection'\n# inputs.append((trainset,testset,labels[train_ind],labels[test_ind]))\n# else:\n# inputs.append((trainset_downsampled,testset_downsampled,labels[train_ind],labels[test_ind]))\n# ### END DIGIT REPRESENTATION FEATURE SELECTION ###\n else:\n inputs.append((trainset,testset,labels[train_ind],labels[test_ind])) \n \n subj_array_folds = np.delete(subj_array_folds,test_subjs)\n \n pool = mp.Pool(processes=nproc)\n scores = pool.starmap_async(_decoding,inputs).get()\n pool.close()\n pool.join()\n\n acc = []\n r_match = []\n r_mismatch = []\n for score in scores:\n acc.extend(score[0])\n r_match.append(score[1])\n r_mismatch.append(score[2])\n \n return acc, r_match, r_mismatch", "def process(self, mat):", "def before_trading_start(context, data):\r\n context.output = pipeline_output('pipeline')\r\n\r\n # sort by earning yield\r\n context.output = context.output.sort(\r\n columns='Free Cash Flow', ascending=False)\r\n\r\n # get top 20 stocks as security list\r\n context.eligible_assets = context.output.iloc[:19]", "def add_processor(self, termprocessor):\n self.pipeline.append(termprocessor)", "def predict_collect(self, src, collector): # real signature unknown; restored from __doc__\n pass", "def _apply_encoder(self, frame, prop, encoder, encoder_type=\"category\"):\n pass", "def run_preprocessing(self, serie):\n pass", "def _fit(self):\n # Paramters of the steps\n param_grid = {\n \"converter__to_convert\": [True, False],\n \"pca__n_components\": [0.3, 0.5, 0.7, 0.9],\n \"regressor__estimator__max_depth\": list(range(1, 5)),\n }\n # Fit with pipeline\n steps = [\n (\"converter\", _RateConverter()),\n (\"scaler\", MinMaxScaler()),\n (\"pca\", PCA(random_state=0)),\n (\"regressor\", MultiOutputRegressor(LGBMRegressor(n_estimators=200, random_state=0))),\n ]\n tscv = TimeSeriesSplit(n_splits=5).split(self._X_train)\n pipeline = GridSearchCV(Pipeline(steps=steps), param_grid, n_jobs=-1, cv=tscv)\n pipeline.fit(self._X_train, self._Y_train)\n # Update regressor\n self._pipeline = pipeline\n # Update param\n self._param.update(**{k: type(v) for (k, v) in steps})", "def pre_process(self, dataset):\n\n # np.empty creates an empty array only. You have to replace this with your code.\n X = np.empty((0,0))\n y = np.empty((0))\n\n if dataset == 0:\n # Implement for the abalone dataset\n df = pd.DataFrame(columns=['sex', 'length', 'diameter', 'height', 'whole_weight', 'shucked_weight', 'viscera_weight', 'shell_weight', 'rings'])\n count = 0\n\n with open('Dataset.data') as file: # reading data from file\n data = file.read()\n\n data = data.split('\\n') # split data into different rows\n data = data[:-1] # last one is empty\n for row in data:\n row = row.split()\n df.loc[count] = row # add in dataframe\n count += 1\n\n df['M'] = np.where(df.sex=='M', 1,0) # genders are turned to a one hot encoding\n df['F'] = np.where(df.sex=='F', 1,0)\n df['I'] = np.where(df.sex=='I', 1,0)\n df = df.drop(['sex'], axis=1)\n df = df.dropna()\n\n df = df.sample(frac=1).reset_index(drop=True) # shuffle dataframe\n\n X = df.drop(['rings'], axis=1)\n X = X.values\n X = X.astype(float)\n y = df['rings'].values\n y = y.astype(float)\n\n elif dataset == 1:\n # Implement for the video game dataset\n df = pd.read_csv('VideoGameDataset - Video_Games_Sales_as_at_22_Dec_2016.csv') # read csv directly into a dataframe\n df1 = df[['Critic_Score', 'User_Score', 'Global_Sales']]\n df1 = df1.dropna()\n df1 = df1[df1.User_Score != 'tbd']\n\n df1 = df1.sample(frac=1).reset_index(drop=True) # shuffle rows\n\n X = df1.drop(['Global_Sales'], axis=1)\n X = X.values\n X = X.astype(float)\n y = df1['Global_Sales'].values\n y = y.astype(float)\n\n elif dataset == 2:\n # Implement for the banknote authentication dataset\n df = pd.DataFrame(columns=['variance', 'skewness', 'curtosis', 'entropy', 'class'])\n count = 0\n\n with open('data_banknote_authentication.txt') as file: # reading file \n data = file.read()\n data = data.split('\\n')\n data = data[:-1]\n for row in data:\n row = row.split(',')\n df.loc[count] = [float(elt) for elt in row[:-1]] + [int(row[-1])] # last column has class so it is int rest are float\n count += 1\n\n df = df.sample(frac=1).reset_index(drop=True) # shuffle dataset\n\n X = df.drop(['class'], axis=1)\n X = X.values\n y = df['class'].values\n y = y.astype(int)\n\n return X, y", "def preprocess(self, cfg_pipeline):\n return", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 1] * self.scale", "def transform(self, data: np.ndarray) -> np.ndarray:\n for i in range(self.n_layers):\n new_data = np.nan_to_num(data)\n new_data = self.pca_list[i].transform(X=new_data)\n if i != self.n_layers - 1:\n new_data = self.power_list[i].inverse_transform(new_data)\n data = new_data\n return data", "def get_loading_pipeline(pipeline):\n loading_pipeline = []\n for transform in pipeline:\n is_loading = is_loading_function(transform)\n if is_loading is None: # MultiScaleFlipAug3D\n # extract its inner pipeline\n if isinstance(transform, dict):\n inner_pipeline = transform.get('transforms', [])\n else:\n inner_pipeline = transform.transforms.transforms\n loading_pipeline.extend(get_loading_pipeline(inner_pipeline))\n elif is_loading:\n loading_pipeline.append(transform)\n assert len(loading_pipeline) > 0, \\\n 'The data pipeline in your config file must include ' \\\n 'loading step.'\n return loading_pipeline", "def build_categorical_pipeline(self) -> Pipeline:\n pipeline = Pipeline([\n ('extract_data', FunctionTransformer(self.get_categorical_features)),\n ('impute', SimpleImputer(missing_values=np.nan, strategy='median')),\n ('ohe', OneHotEncoder(handle_unknown='ignore', sparse=False))\n ])\n return pipeline" ]
[ "0.6081324", "0.59062445", "0.5773329", "0.56232554", "0.5604073", "0.56040514", "0.56028223", "0.5570722", "0.5512372", "0.54467845", "0.544168", "0.54007536", "0.5379913", "0.5364416", "0.53506017", "0.5311731", "0.5311245", "0.52821445", "0.5277087", "0.5264683", "0.52084905", "0.5206279", "0.52017844", "0.52000225", "0.5142435", "0.5137535", "0.50947934", "0.5064849", "0.5040154", "0.5034462", "0.5025374", "0.50019145", "0.49902725", "0.49800217", "0.49767503", "0.49744302", "0.4965227", "0.49631718", "0.49224734", "0.4907021", "0.48961234", "0.488805", "0.48878318", "0.48807082", "0.487908", "0.4865678", "0.48576707", "0.48450908", "0.48361894", "0.48321185", "0.48284695", "0.48232058", "0.48232058", "0.48192316", "0.48182017", "0.47451094", "0.47275084", "0.471437", "0.4710549", "0.47054985", "0.47028232", "0.4689277", "0.46728057", "0.4656381", "0.46547124", "0.46420205", "0.4641988", "0.4634001", "0.4630328", "0.46288133", "0.46248728", "0.46180442", "0.4617433", "0.46121287", "0.46090958", "0.4603483", "0.46014702", "0.45990378", "0.45915994", "0.4579545", "0.4579545", "0.45740137", "0.45689625", "0.4567389", "0.45640126", "0.45606643", "0.45602417", "0.45525232", "0.45414355", "0.45372626", "0.45295337", "0.45277074", "0.4524207", "0.45230427", "0.45152363", "0.45106563", "0.45069408", "0.45067823", "0.45019665", "0.45001486" ]
0.55851984
7
Called every day before market open.
def before_trading_start(context, data): context.output = pipeline_output('pipeline') # sort by earning yield context.output = context.output.sort( columns='Free Cash Flow', ascending=False) # get top 20 stocks as security list context.eligible_assets = context.output.iloc[:19]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_market_info(self):\n pass", "def isMarketOpen(self):\n if not self.normalDay:\n return False\n now = datetime.now()\n if now.hour >= 9 and now.hour < 16:\n if now.hour == 9 and now.minute < 30:\n return False\n return True\n return False", "def marketOpen():\n tz = conf['GLOBAL']['timezone']\n today = datetime.today().astimezone(pytz.timezone(tz))\n today_fmt = today.strftime('%Y-%m-%d')\n tdHoursURL = conf['TD']['hoursURL']\n key = conf['TD']['key']\n params = {\n 'apikey': key,\n 'date': today_fmt\n }\n\n request = requests.get(\n url=tdHoursURL,\n params=params\n ).json()\n \n \n if request['equity']['EQ']['isOpen'] is True:\n return(True)\n else:\n return(False)", "def before_trading_start(context, data):", "def updateToday(tradingDay):\n if date.today() != tradingDay.today:\n tradingDay = TradingDay(tradingDay.contractDetails)\n\n if tradingDay.isMarketOpen():\n if not tradingDay.marketOpen:\n tradingDay.marketOpen = True\n console().info(\"The Market Has Opened\")\n else:\n if tradingDay.marketOpen:\n tradingDay.marketOpen = False\n console().info(\"The Market Has Closed\")\n return tradingDay", "def before_trading_start(context, data):\r\n # These are the securities that we are interested in trading each day.\r\n context.output = pipeline_output('my_pipeline')\r\n context.equities = context.output.index.tolist()\r\n log.info(\"Stocks today\") \r\n print(context.equities)", "def onMarketUpdate(self, data):\n pass", "def _open(self):\n \n # Set initial time\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n self._last = timestamp\n \n # Nothing else to do... already open", "def sellAtMarketOpen(self):\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # CHECK IF MARKET OPEN AND NOT WEEKEND\n if tm == \"08:30\" and day not in weekdays:\n\n queue_orders = self.mongo.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id, \"Order_Type\" : \"SELL\"})\n\n for order in queue_orders:\n\n # CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(order[\"Order_ID\"])\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n trade_data = {\n \"Symbol\": order[\"Symbol\"],\n \"Side\": \"SELL\",\n \"Aggregation\": order[\"Aggregation\"],\n \"Strategy\": order[\"Strategy\"],\n \"Asset_Type\": order[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n # SELL MARKET ORDER\n self.placeOrder(trade_data, order, orderType=\"MARKET\")", "def on_market(self, oid, body):\n\t\tif body['freq'] != self.freq: return\n\n\t\tticks = body['ticks']\n\t\tself._update_data(ticks)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._calculate_signals()\n\n\t\t\t# publish generated signals\n\t\t\tequity = self.total_bp\n\t\t\tbp = copy(self.avaliable_bp) # current snap_shot of buying power\n\t\t\tfor S, pos in self.pos.items():\n\t\t\t\tfor order, lvl in pos.generate_orders(equity):\n\t\t\t\t\tused_bp = self.on_order(order, lvl, bp)\n\t\t\t\t\tbp -= used_bp\n\t\t\t\t\n\t\t\t# save old strategy performance history\n\t\t\tself._pbar.update(1)\n\t\t\n\t\t# if ticks.timestamp >= self.start_dt:\n\t\t\t# self.basic_publish('next', sender=self.id)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._save_positions()", "def check_market_status():\n today_ny = datetime.datetime.now(pytz.timezone('America/New_York'))\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_ny - pd.Timedelta('10 days'), end_date=today_ny)\n if today_ny.date() in open_days.index:\n return open_days\n else:\n return None", "def check_market_status():\n # today = datetime.datetime.now(pytz.timezone('America/New_York')).date()\n today_utc = pd.to_datetime('now').date()\n ndq = mcal.get_calendar('NASDAQ')\n open_days = ndq.schedule(start_date=today_utc - pd.Timedelta('10 days'), end_date=today_utc)\n if today_utc in open_days.index:\n return open_days\n else:\n return None", "def initialize(context):\n context.stocks = {symbol(\"TMF\"): 0.2, symbol(\"UJB\"): 0.2, symbol(\"TQQQ\"): 0.6}\n\n context.target_leverage = 1\n\n schedule_function(\n rebalance, date_rules.every_day(), time_rules.market_open(minutes=11)\n )", "def buy_stock (self, ticker, buy_date, sell_date, amount):\n\n if self.__buy_stock_init__(ticker, buy_date, sell_date, amount) == False:\n return\n\n if self.__get_hist__() == False:\n return\n\n self.__calc_no_shares_to_buy__()\n self.__update_buy_amount__() \n self.__save_buy__()", "def grabDaily(self):\n raise NotImplemented(\"method should be redefined in a subclass\")", "async def daily(self, ctx):\r\n # TODO: Asssess whether this can be cleaned up. \r\n # As it stands, very similar to inv()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, s.purchase_price, close.close, s.quantity*close.close - s.quantity*s.purchase_price ]) \r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n inv_df['sign'] = np.where(inv_df['Current Value']>=0, '+', '-')\r\n inv_df['%'] = abs(((inv_df['Close'] - inv_df['Purchase Price']) / inv_df['Purchase Price']) * 100)\r\n inv_df['%'] = inv_df['%'].round(1)\r\n inv_df = inv_df.sort_values(['Symbol'])\r\n inv_df = inv_df[['sign', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value']]\r\n aggregated = tabulate(inv_df.values.tolist(), headers=['Δ', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n await ctx.send(f'```diff\\n{aggregated}```')", "def _fill_market_order(self, order_event):\n if order_event.quantity == 0:\n return\n fill_time = self._get_fill_time(order_event.order_time, order_event.symbol)\n sym_data = self.curr_day_data[order_event.symbol]\n direction = self._get_order_direction(order_event)\n if direction == 1:\n fill_price = sym_data['level_1_price_sell'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)\n elif direction == -1:\n fill_price = sym_data['level_1_price_buy'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)", "def calculate_signals(self, event):\n if event.type == 'MARKET':\n for s in self.symbol_list:\n highs = self.bars.get_latest_bars_values(\n s, \"high\", N=self.long_window\n )\n lows = self.bars.get_latest_bars_values(\n s, \"low\", N=self.long_window\n )\n close = self.bars.get_latest_bar_value(s, 'close')\n bar_date = self.bars.get_latest_bar_datetime(s)\n bar_date = datetime.strptime(bar_date[:-4], \"%Y-%m-%dT%H:%M:%S.%f\")\n if highs is not None and len(highs) == self.long_window and \\\n lows is not None and len(lows) == self.long_window:\n\n # close all orders before the end of weekend, Friday 17:00 in this case\n # uncomment this chunk of code if not\n # if bar_date.weekday() == 4 and bar_date.hour is 17:\n # action = ActionEvent(s, 'CLOSE_ALL')\n # self.events.put(action)\n # return\n R_max = np.max(highs[-self.short_window:])\n R_min = np.min(lows[-self.short_window:])\n R = (R_max - R_min) * 10000\n R = round(R, 1)\n\n R2_max = np.max(highs[-self.long_window:])\n R2_min = np.min(lows[-self.long_window:])\n R2 = (R2_max - R2_min) * 10000\n R2 = round(R2, 1)\n\n real_date = bar_date+timedelta(hours=4)\n # print('<----- K 线时间 {} -----> (当前实际时间是 {} 的第一秒)'.format(bar_date, real_date))\n # print('过去 {} 个小时, 最高价是 {}, 最低价是 {}. 波动值 R2 是 {} 个 Pips.'.format( 4*self.long_window, R2_max, R2_min, R2))\n if R2 < self.c1 or R2 > self.c2:\n # print('当前 R2 波动值不满足限制条件: {} < R2 < {}'.format(self.c1, self.c2))\n # print('不交易,略过。\\n\\n')\n return\n\n # print('当前 R2 波动值满足限制条件: {} < R2 < {} \\n'.format(self.c1, self.c2))\n # print('过去 {} 个小时, 最高价是 {}, 最低价是 {}. 波动值 R 是 {} 个 Pips.'.format( 4*self.short_window, R_max, R_min, R))\n\n buy_under = round(self.k1 * R, 1)\n limit_price = round(close - buy_under/10000, 5)\n # print('当前价格是 {}. {} 倍的 R 是 {} 个 pips '.format(close,self.k1, buy_under))\n # print('开一个限价的买单 (Limit Buy Order) 在当前价格 {} 的 {} 个 pips 之下,即 {}.'.format(close, buy_under, limit_price))\n\n profit_target = round(self.k2 * R, 1)\n # print('目标盈利 ( profit_target ) 是 {} 倍的 R,即 {} 个 pips.'.format(self.k2, profit_target))\n profit_target = round(limit_price + profit_target / 10000, 5)\n # print('即, {}'.format(profit_target))\n # print('止损 (stop_loss) 为固定的 {} 个 pips.'.format(self.sl))\n stop_loss = round(limit_price - self.sl / 10000, 5)\n # print('即, {}'.format(stop_loss))\n signal_type = 'LONG'\n signal = SignalEvent(s, real_date, signal_type, 'LMT',\n limit_price, stop_loss, profit_target)\n self.events.put(signal)", "def everytime(self):\n return True", "def add_stock_info(day, stock_price):\n global stock_info\n // your code here", "def sell_all_holdings(self, date):\n for s in self.symbol_list:\n if self.current_positions[s] > 0:\n price = self.bars.get_latest_bar_value(s, 'True_close')\n sell_all_event = SignalEvent(1, s, date, 'EXIT', 1.0, price)\n self.events.put(sell_all_event)", "def opened_at(self, datetime: datetime) -> None:", "def perform_trading(self, event : event.EventMarket)-> event.EventFilled:\n pass", "def __handle_open_orders(self):\n portfolio = self.get_portfolio_object()\n # only take complete orders\n orders = [order for order in portfolio.orders if order.status == Status.confirmed]\n time_zone = TraderBase.get_timezone()\n now = datetime.datetime.now(time_zone)\n for order in orders:\n price = self.db_tool.session.query(Series)\\\n .filter(order.stock_id == Series.stock_id) \\\n .filter(Series.date.between(order.date, now)) \\\n .filter(order.price >= Series.pricehigh)\\\n .order_by(Series.date.asc()).first()\n if price:\n order.status = Status.completed\n order.date = price.date\n self.connect_related_order(order)\n else:\n diff = now - order.date.replace(tzinfo=time_zone)\n hours = diff.total_seconds() / 60\n if hours >= self.expire_in_hours:\n self.logger.info(\"Order is expired because limit {} for {} \"\n \"was not reached during the day\".\n format(order.price, order.stock_id))\n order.status = Status.expired\n portfolio.cash -= order.price_complete", "def run_daily_hygienist(self):\n self.remove_priorities_from_all_not_due_today()", "def on_expire(self):\n pass", "def before_trading_start(context, data):\n context.output = pipeline_output('my_pipeline')\n context.current_stock_list = context.output.index.tolist()\n #print(context.output['weekly_classifier'])\n context.daily_stat_history.append(context.output)\n if len(context.daily_stat_history) > 2: # only keep last two units\n context.daily_stat_history.pop(0)\n\n # print context.output['daily_classifier']\n sig_counts = context.output['daily_classifier'].value_counts()\n if 2.0 not in sig_counts.index:\n sig_counts[2.0] = 0.0\n if 4.0 not in sig_counts.index:\n sig_counts[4.0] = 0.0\n if 8.0 not in sig_counts.index:\n sig_counts[8.0] = 0.0\n if 10.0 not in sig_counts.index:\n sig_counts[10.0] = 0.0\n if 12.0 not in sig_counts.index:\n sig_counts[12.0] = 0.0\n if 16.0 not in sig_counts.index:\n sig_counts[16.0] = 0.0\n if 18.0 not in sig_counts.index:\n sig_counts[18.0] = 0.0\n if 20.0 not in sig_counts.index:\n sig_counts[20.0] = 0.0", "def do(self, market_data):\r\n self.data.history = self.data.history + market_data", "def give_raise(self):\r\n self.hourly_pay = 12.00", "def weekly():", "def isNormalTradingDay(self):\n days = self.contractDetails.tradingHours.split(\";\")\n dateString = self.today.strftime(\"%Y%m%d\")\n today = [x for x in days if x.split(\":\")[0] == dateString]\n if not today:\n console().error(\"Missing Contract Market Hours for Today.\")\n hours = today[0].split(\":\")[1]\n if hours == \"CLOSED\" or hours != config.NORMAL_TRADING_HOURS:\n return False\n return True", "def iam(self):\n print(\"I am company\", self.ticker)", "def new_day(self):\n self.previous_days.append(self.energy_debt)\n self.energy_debt = defaultdict(lambda: 0.0)\n\n #TODO: add the settelement mechanism here", "def menu_python_daily(self, event=None):\n self.link('http://www.pythonware.com/daily/')", "def every_day():\n logger.info('[ EVERY_DAY ] [ %s ]' % str(datetime.now().time()))", "def buy_to_open(self, symbol, date, price):\n\n # Figure out how much we are willing to spend\n cash_available = self.cash - self.trade_fee\n cash_to_spend = cash_available / self.free_position_slots\n \n # Calculate buy_price and number of shares. Fractional shares allowed.\n purchase_price = (1 + self.percent_slippage) * price\n shares = cash_to_spend / purchase_price\n\n # Spend the cash\n self.cash -= cash_to_spend + self.trade_fee\n assert self.cash >= 0, 'Spent cash you do not have.'\n self.portfolio_history.record_cash(date, self.cash) \n\n # Record the position\n positions_by_symbol = self.active_positions_by_symbol\n assert not symbol in positions_by_symbol, 'Symbol already in portfolio.' \n position = Position(symbol, date, purchase_price, shares)\n positions_by_symbol[symbol] = position", "def initialize(context): \n log.info(\"initialzing\")\n context.prime = False\n \n # Rebalance every day, 1 hour after market open.\n schedule_function(my_rebalance, date_rules.month_start(), time_rules.market_open(hours=1))\n \n # Record tracking variables at the end of each day.\n schedule_function(my_record_vars, date_rules.every_day(), time_rules.market_close())\n \n\n # Create our dynamic stock selector.\n attach_pipeline(make_pipeline(), 'my_pipeline')", "def before_run(self):\n self.trade_data = []\n return self", "def on_tick(self, tick: TickData):\n\n # TODO 如何拿到另一个合约\n \n\n # 更新近月,远月\n tickDate = tick.datetime.strftime('%Y-%m-%d')\n if self.current_date != tickDate:\n self.current_date = tickDate\n # 选择01、02\n future_contract = jq.get_future_contracts(self.underlying_symbol, self.current_date)\n new_code_01 = future_contract[0]\n new_code_02 = future_contract[1]\n if self.config[\"code_01\"] != new_code_01:\n print(\"new code 01: \" + new_code_01 + \", old code 01: \" + self.config[\"code_01\"] + \", current date: \" + self.current_date)\n self.config[\"code_01\"] = new_code_01\n # 交割日\n self.config[\"de_date\"] = self.get_CCFX_end_date(self.config[\"code_01\"])\n print(\"交割日: \" + self.config[\"de_date\"].strftime(\"%Y/%m/%d, %H:%M:%S\") + \", current date: \" + self.current_date)\n if self.config[\"code_02\"] != new_code_02:\n print(\"new code 02: \" + new_code_02 + \", old code 02: \" + self.config[\"code_02\"] + \", current date: \" + self.current_date)\n self.config[\"code_02\"] = new_code_02\n \n # 下面的计算会在 on_bar 里完成\n # 计算信号\n # if (tick.datetime.second == 0):\n # self.spread_cal()\n \n # 交易时间限制 交割日\n if tick.datetime == self.config[\"de_date\"]:\n de_sign = tick.datetime.time() < self.config[\"close_time\"]\n else:\n de_sign = 1\n\n \n # 获取最新的 tick 数据\n # tick_data_01 = jq.get_current_tick(self.config[\"code_01\"])\n # tick_data_02 = jq.get_current_tick(self.config[\"code_02\"])\n\n # JQ data structure\n # future_tick_fields = ['datetime', 'current', 'high', 'low', 'volume', 'money', 'position', 'a1_p', 'a1_v', 'b1_p', 'b1_v']\n\n # tick数据存在时读取数据,不足时跳过\n if (type(tick_data_01).__name__ == 'Tick') & (type(tick_data_02).__name__ == 'Tick'):\n a_01 = tick_data_01.a1_p\n b_01 = tick_data_01.b1_p\n a_02 = tick_data_02.a1_p\n b_02 = tick_data_02.b1_p\n else:\n return 0\n \n spread_delta_1 = a_01 - b_02\n spread_delta_2 = b_01 - a_02\n\n \n len_short = len(context.portfolio.short_positions)\n len_long = len(context.portfolio.long_positions)\n \n # 开仓\n if (len_short == 0) and (len_long == 0) & (de_sign):\n # 向下突破布林线+判别因子通过,做多\n if (spread_delta_1 < self.config[\"lower\"]) & (self.config[\"ito\"] < self.config[\"e\"]):\n order(self.config[\"code_01\"], 1, side='long')\n order(self.config[\"code_02\"], 1, side='short')\n elif (spread_delta_2 > self.config[\"upper\"]) & (self.config[\"ito\"] < self.config[\"e\"]):\n order(self.config[\"code_01\"], 1, side='short')\n order(self.config[\"code_02\"], 1, side='long')\n # 平仓\n elif (len_short > 0) and (len_long > 0):\n long_code = list(context.portfolio.long_positions.keys())[0]\n if de_sign:\n if (spread_delta_2 > self.config[\"ma\"]) & (long_code == self.config[\"code_01\"]):\n order_target(self.config[\"code_01\"], 0, side='long')\n order_target(self.config[\"code_02\"], 0, side='short')\n elif (spread_delta_1 < self.config[\"ma\"]) & (long_code == self.config[\"code_02\"]):\n order_target(self.config[\"code_01\"], 0, side='short')\n order_target(self.config[\"code_02\"], 0, side='long')\n else:\n # 交割日强制平仓\n order_target(long_code, 0, side='long')\n order_target(list(context.portfolio.short_positions.keys())[0], 0, side='short')\n\n self.bg.update_tick(tick)", "async def dailytomorrow(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def on_date(self, date):\n print 'This is an empty on_date(date={}) function.\\nThe user must override this.'.format(date)\n return self", "def on_before_close(self):\n pass", "def OnRtnDepthMarketData(self, data: dict) -> None:\n current_date = data[\"TradingDay\"]\n current_time = data[\"UpdateTime\"]\n dt = datetime.strptime(\n f'{current_date}-{current_time}', \"%Y%m%d-%H:%M:%S\"\n )\n # dt = CHINA_TZ.localize(dt)\n\n tick = TickData(\n symbol=data[\"SecurityID\"],\n exchange=EXCHANGE_TORA2VT[bytes.decode(data[\"ExchangeID\"])],\n datetime=dt,\n name=data[\"SecurityName\"],\n volume=0,\n open_interest=data[\"OpenInterest\"],\n last_price=data[\"LastPrice\"],\n last_volume=data[\"Volume\"],\n limit_up=data[\"UpperLimitPrice\"],\n limit_down=data[\"LowerLimitPrice\"],\n open_price=data[\"OpenPrice\"],\n high_price=data[\"HighestPrice\"],\n low_price=data[\"LowestPrice\"],\n pre_close=data[\"PreClosePrice\"],\n bid_price_1=data[\"BidPrice1\"],\n ask_price_1=data[\"AskPrice1\"],\n bid_volume_1=data[\"BidVolume1\"],\n ask_volume_1=data[\"AskVolume1\"],\n gateway_name=self.gateway_name\n )\n\n if data[\"BidVolume2\"] or data[\"AskVolume2\"]:\n tick.bid_price_2 = data[\"BidPrice2\"]\n tick.bid_price_3 = data[\"BidPrice3\"]\n tick.bid_price_4 = data[\"BidPrice4\"]\n tick.bid_price_5 = data[\"BidPrice5\"]\n\n tick.ask_price_2 = data[\"AskPrice2\"]\n tick.ask_price_3 = data[\"AskPrice3\"]\n tick.ask_price_4 = data[\"AskPrice4\"]\n tick.ask_price_5 = data[\"AskPrice5\"]\n\n tick.bid_volume_2 = data[\"BidVolume2\"]\n tick.bid_volume_3 = data[\"BidVolume3\"]\n tick.bid_volume_4 = data[\"BidVolume4\"]\n tick.bid_volume_5 = data[\"BidVolume5\"]\n\n tick.ask_volume_2 = data[\"AskVolume2\"]\n tick.ask_volume_3 = data[\"AskVolume3\"]\n tick.ask_volume_4 = data[\"AskVolume4\"]\n tick.ask_volume_5 = data[\"AskVolume5\"]\n\n self.gateway.on_tick(tick)", "def start(self):\n\t\tself.load_market_data = True\n\t\twhile(self.load_market_data):#TODO lock\n\t\t\tif(len(self.registered_symbol_list) > 0):\n\t\t\t\tOMSLogger.info(\"Start loading market data for registered symbols...\")\n\t\t\t\tself.__reload_market_data__(self.market_data_list, self.registered_symbol_list)\n\t\t\t\tOMSLogger.info(\"Successfully loaded market data for registered symbols!\")\n\t\t\ttime.sleep(10)\t\t\t\n\n\t\t# print self.market_data_list", "def test_event_historical(self):\n url = os.path.join(\n settings.POSTGREST_BASE_URL,\n 'rpc/flood_event_historical_forecast_list_f')\n events = requests.post(url, data={\n 'forecast_date_range_start': self.forecast_date_range_start,\n 'forecast_date_range_end': self.forecast_date_range_end\n }).json()\n self.assertTrue(len(events) != 0)", "def isFresh(self, timestamp):\n pass;", "def auto_update_stock(self, ctx):\n woo_instance_id = ctx.get('woo_instance_id', False)\n instance = self.woo_instance_id.browse(woo_instance_id)\n if not instance:\n return True\n self.update_stock(instance, instance.last_inventory_update_time)\n return True", "async def daily(self, ctx: commands.Context):\n self.check_if_exist(ctx.guild)\n\n if ctx.invoked_subcommand == None:\n await ctx.reply(\"Options: `channel`, `timezone`, `ping`\")", "def _onPremade(self, event):\n self.openPremade()", "def OnRtnDepthMarketData(self, data: dict) -> None:\n current_date = data[\"TradingDay\"]\n current_time = data[\"UpdateTime\"]\n dt = datetime.strptime(\n f'{current_date}-{current_time}', \"%Y%m%d-%H:%M:%S\"\n )\n dt = CHINA_TZ.localize(dt)\n\n tick = TickData(\n symbol=data[\"SecurityID\"],\n exchange=EXCHANGE_TORA2VT[bytes.decode(data[\"ExchangeID\"])],\n datetime=dt,\n name=data[\"SecurityName\"],\n volume=0,\n open_interest=data[\"OpenInterest\"],\n last_price=data[\"LastPrice\"],\n last_volume=data[\"Volume\"],\n limit_up=data[\"UpperLimitPrice\"],\n limit_down=data[\"LowerLimitPrice\"],\n open_price=data[\"OpenPrice\"],\n high_price=data[\"HighestPrice\"],\n low_price=data[\"LowestPrice\"],\n pre_close=data[\"PreClosePrice\"],\n bid_price_1=data[\"BidPrice1\"],\n ask_price_1=data[\"AskPrice1\"],\n bid_volume_1=data[\"BidVolume1\"],\n ask_volume_1=data[\"AskVolume1\"],\n gateway_name=self.gateway_name\n )\n\n if data[\"BidVolume2\"] or data[\"AskVolume2\"]:\n tick.bid_price_2 = data[\"BidPrice2\"]\n tick.bid_price_3 = data[\"BidPrice3\"]\n tick.bid_price_4 = data[\"BidPrice4\"]\n tick.bid_price_5 = data[\"BidPrice5\"]\n\n tick.ask_price_2 = data[\"AskPrice2\"]\n tick.ask_price_3 = data[\"AskPrice3\"]\n tick.ask_price_4 = data[\"AskPrice4\"]\n tick.ask_price_5 = data[\"AskPrice5\"]\n\n tick.bid_volume_2 = data[\"BidVolume2\"]\n tick.bid_volume_3 = data[\"BidVolume3\"]\n tick.bid_volume_4 = data[\"BidVolume4\"]\n tick.bid_volume_5 = data[\"BidVolume5\"]\n\n tick.ask_volume_2 = data[\"AskVolume2\"]\n tick.ask_volume_3 = data[\"AskVolume3\"]\n tick.ask_volume_4 = data[\"AskVolume4\"]\n tick.ask_volume_5 = data[\"AskVolume5\"]\n\n self.gateway.on_tick(tick)", "def __periodic_maintenance__(self):\n pass", "def _refresh_tickers(self):\n if self._tickers is None or (time.time() - self._tickers_age) > self.tickers_update_interval:\n res = self.get('/v1/tickers')\n self._tickers = {m['id']: m for m in res['markets']}\n self._tickers.update({m['id_hr']: m for m in res['markets']})\n self._tickers_age = time.time()", "def visitBefore(self, date):\n raise NotImplementedError()", "def before_tick(self, time):\n pass", "async def daily(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def _on_op_private_ticker(self, msg):\r\n msg = msg[\"ticker\"]\r\n if msg[\"sell\"][\"currency\"] != self.curr_quote:\r\n return\r\n if msg[\"item\"] != self.curr_base:\r\n return\r\n bid = int(msg[\"buy\"][\"value_int\"])\r\n ask = int(msg[\"sell\"][\"value_int\"])\r\n\r\n self.debug(\" tick: %s %s\" % (\r\n self.quote2str(bid),\r\n self.quote2str(ask)\r\n ))\r\n self.signal_ticker(self, (bid, ask))", "async def stocks(self, ctx):\n\t\tpass", "def reqData(self):\r\n #self.reqGlobalCancel()\r\n #self.add_historical(\"Stock('TSLA', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('IBM', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('MSFT', 'SMART', 'USD')\")\r\n self.add_historical(\"Stock('FB', 'SMART', 'USD')\")", "def test_in_date_ordered_first_in_fifo(self):\n # Populate all `in_date` fields, with quant2 being the oldest\n now = datetime.now()\n oldest_time = now - timedelta(days=5)\n self.quant1.in_date = now\n self.quant2.write({\"in_date\": oldest_time, \"package_id\": self.pack2.id})\n self.quant3.in_date = now - timedelta(days=3)\n\n # Assert we are as expected\n self.assertEqual(self.quant2.package_id, self.pack2)\n for q in self.quant1 | self.quant3:\n self.assertFalse(q.package_id)\n\n # Reserve quantity - one apple\n reserved_quants = self.Quant._update_reserved_quantity(\n self.apple, self.test_stock_location_01, 1\n )\n reserved_quant = reserved_quants[0][0]\n\n self.assertEqual(reserved_quant, self.quant2)\n self.assertEqual(reserved_quant.in_date, oldest_time)\n self.assertEqual(self.quant2.package_id, self.pack2)", "def logDayDetails(self):\n console().info(\"Today is {}.\".format(self.today.strftime(DATE_FMT)))\n hours = self.contractDetails.tradingHours.split(\";\")[0].split(\":\")[1]\n console().info(\"Today's Trading Hours Are: {}\".format(hours))\n if self.normalDay:\n console().info(\"Today is a Valid Day for Trading\")\n else:\n console().info(\"Today is not a Valid Trading Day. Sleeping Until Tomorrow\")", "def OnEnterEpisode(self):\n pass", "def check_if_up_to_date():\n last_daily = get_latest_dl_date()\n last_trading_day = get_last_open_trading_day()", "def collect_data_date(self, date=None):\n if date is None:\n date = self.date\n # TODO make it so it doenst re-collect all data and just adds historical's data\n self.collect_all_stock_data()", "def onOpen(self):", "def test_handle_weather_message_calls_tomorrow(self):\n pass", "def before_trading_start(context, data):\r\n context.output = algo.pipeline_output('pipeline')\r\n\r\n # These are the securities that we are interested in trading each day.\r\n context.security_list = context.output.index\r\n \r\n # Loop through all assets in pipeline.\r\n for asset, row in context.output.iterrows():\r\n context.price[asset] = row.close\r\n \"\"\"\r\n # Skip entries with no flags.\r\n if row.flag_type != 'UP' and row.flag_type != 'DOWN':\r\n continue\r\n \r\n log.info('%s flag for %s. Price level = %f' % (row.flag_type, asset, context.price[asset]))\r\n \r\n # Count flags for asset in context.flags\r\n if asset in context.flags:\r\n context.flags[asset][row.flag_type] += 1\r\n else:\r\n if row.flag_type == 'UP':\r\n context.flags[asset] = {'UP': 1, 'DOWN': 0}\r\n \r\n elif row.flag_type == 'DOWN':\r\n context.flags[asset] = {'UP': 0, 'DOWN': 1}\r\n \"\"\" \r\n \r\n context.up_ratios[asset] = row.up_ratio\r\n \r\n if math.isnan(row.up_flags):\r\n continue\r\n \r\n context.flags[asset] = {'UP': row.up_flags, 'DOWN': row.down_flags}\r\n \r\n # In 2020, activate overweighting\r\n if not context.overweighting:\r\n today = get_datetime('US/Eastern')\r\n if today.year == 2020:\r\n context.overweighting = True", "def kickerOn(self):\n self.sKicker.set(.3 if not config.isPracticeBot else 0)\n if not self.lastKicker:\n self.datalogger.event(\"Fire!\")\n self.lastKicker = True", "def friewallOn():\n pass", "def prices(symbol):\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e", "def ticker(self, symbol, **kwargs):\n pass", "def caculate_signals(self, event):\n\t\tif event.type == 'MARKET':\n\t\t\tfor s in self.symbol_list:\n\t\t\t\tbars = self.bars.get_latest_bars(s, N=1)\n\t\t\t\tif bars is not None and bars != []:\n\t\t\t\t\tif self.bought[s] == False:\n\t\t\t\t\t\t# (Symbol, Datetime, Type = LONG, SHORT or EXIT)\n\t\t\t\t\t\tsignal = SignalEvent(bars[0][0], bars[0][1], 'LONG')\n\t\t\t\t\t\tself.events.put(signal)\n\t\t\t\t\t\tself.bought[s] = False", "def before_trading_start(context, data):\n pipe_bbands = algo.pipeline_output('pipe_bbands') \n\n # Find list of symbols to buy/sell.\n context.buy = pipe_bbands[pipe_bbands['buy']].index.tolist()\n context.sell = pipe_bbands[pipe_bbands['sell']].index.tolist()", "def initialize(context):\n # Rebalance every day, 1 hour after market open.\n algo.schedule_function(\n rebalance,\n algo.date_rules.month_start(),\n algo.time_rules.market_open(hours=1),\n )\n\n algo.schedule_function(\n bonds,\n algo.date_rules.month_start(days_offset=1),\n algo.time_rules.market_open(hours=1),\n )\n\n algo.set_benchmark(algo.sid(\"FIBBG000BDTBL9\"))\n\n # Create a pipeline to select stocks each day.\n algo.attach_pipeline(make_pipeline(), 'pipeline')\n \n # algo.set_min_leverage(0, datetime.timedelta(30))\n # algo.set_max_leverage(1.2)\n\n context.trend_filter = False", "def initialize(context):\n # Rebalance every day, 1 hour after market open.\n set_slippage(slippage.FixedSlippage(spread=0.00))\n set_commission(commission.PerShare(cost=0.0, min_trade_cost=0.0))\n context.lookback = 60\n context.leverage = 0.02\n context.day = 1\n #context.ETFs = []\n context.market = [symbol('SPY')]\n context.bo = 1.25\n context.so = 1.25\n context.bc = 0.75\n context.sc = 0.5\n context.stocks = []\n context.initialized = False\n context.holding_book_shares = None\n context.order_hist = {}\n \n context.xlb = symbol('XLB') #sid(19654) #Materials 101\n context.xly = symbol('XLY') #sid(19662) #Consumer Discretionary 102\n context.xlf = symbol('XLF') #sid(19656) #Financials 103\n context.xlre = symbol('IYR') #sid() #Real estate 104\n context.xlp = symbol('XLP') #sid(19659) #Consumer Staples 205\n context.xlv = symbol('XLV') #sid(19661) #Health Care 206\n context.xlu = symbol('XLU') #sid(19660) #Utilities 207\n context.xtl = symbol('IYZ') #sid() #Communication Services 308\n context.xle = symbol('XLE') #sid(19655) #Energy 309\n context.xli = symbol('XLI') #sid(19657) #Industrials 310\n context.xlk = symbol('XLK') #sid(19658) #Technology 311\n \n context.ETF_lookup = {context.xlb:101, 101:context.xlb,\n context.xly:102, 102:context.xly,\n context.xlf:103, 103:context.xlf,\n context.xlre:104, 104:context.xlre,\n context.xlp:205, 205: context.xlp,\n context.xlv:206, 206: context.xlv,\n context.xlu:207, 207:context.xlu,\n context.xtl:308, 308:context.xtl,\n context.xle:309, 309:context.xle,\n context.xli:310, 310:context.xli,\n context.xlk:311, 311:context.xlk}\n\n context.ETFs = [context.xlb,\n context.xly,\n context.xlf,\n context.xlre,\n context.xlp,\n context.xlv,\n context.xlu,\n context.xtl,\n context.xle,\n context.xli,\n context.xlk\n ]", "def trigger(self, trade) -> bool:\n pass", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def wakeup(self, currentTime):\n # Parent class handles discovery of exchange times and market_open wakeup call.\n super().wakeup(currentTime)\n\n if not self.mkt_open or not self.mkt_close:\n # No logging if market is closed\n return\n\n self.measureFundamental()\n self.setWakeup(currentTime + self.getWakeFrequency())", "def update_historical_data():\n print('updating historical data')\n for sp in SupplyPoint.objects.filter(supplypointwarehouserecord__isnull=True).exclude(type__code=SupplyPointCodes.ZONE):\n update_historical_data_for_supply_point(sp)", "def on_tick(self, tick: TickData):\n if tick and tick.bid_price_1 > 0:\n self.tick = tick", "def test_stock_exchange():\n # create fake company profiles and list\n stock_exchange = session10.create_stock_exchange(num_of_listed_comp = 100)\n\n # Stock market details\n day_open,day_high,day_low,day_close = session10.stock_exchange_details(stock_exchange)\n\n assert day_low<=day_high, \"Implementation of Stock Exchange is not correct\"\n assert day_close<=day_high, \"Implementation of Stock Exchange is not correct\"", "def update_data(self):\n data, meta_data = ts.get_daily(symbol=self.stock_ticker, outputsize='full')\n self.data = data\n self.meta_data = meta_data", "def insert_day():\n analytics.insert_day(6)", "def before_trading_start(context, data):\n context.output = algo.pipeline_output('pipeline')\n\n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index", "def calculate_signals(self, event: MarketEvent):\n for symbol, bars in event.symbol_data.items():\n if not self.bought[symbol]:\n signal = SignalEvent(bars[-1].symbol, bars[-1].time, 'LONG')\n self.events.add_event(signal)\n self.bought[symbol] = True", "def tick(self):\n \n # next historical order to be sent\n mktorder = self.hist_orders[self.mkt_idx+1]\n # if I have queued orders\n if self.my_queue:\n # if my order reaches the market before the next historical order\n if self.my_queue[0].timestamp < mktorder[self.col_idx['timestamp']]:\n my_order = self.my_queue.popleft()\n self._send_to_market(my_order, is_mine=True)\n self.mkt_time = my_order[self.col_idx['timestamp']]\n return\n \n # otherwise sent next historical order\n self._send_historical_order(mktorder)", "def is_there_new_filling(stock_ticker: str):\n return {f\"Function is not implemented yet\"}", "def updateAllHistorical():\n now = datetime.datetime.fromtimestamp(getTime())\n fiveDaysAgo = datetime.datetime.fromtimestamp(\n getTime() - daysToSeconds(5)\n )\n for stockName in db.STOCK_MAP.keys():\n try:\n historicalData = getHistoricalData(stockName, fiveDaysAgo)\n with open(\n \"static/data/\" + stockName.lower() + \".csv\",\n \"a\"\n ) as f:\n try:\n f.write(\",\".join(\n str(d) for d in historicalDictToList(\n historicalData[\"history_list\"][0]\n )\n ) + \"\\n\")\n except KeyError:\n pass\n except IOError as e:\n print \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n db.UPDATING_HISTORICAL = False", "def one_hour_ticker(*args):\n markets = fetch_markets()\n map(populate_one_hour_data, markets)\n return", "async def on_trade_expire(self, trade: \"steam.TradeOffer\") -> None:", "def startService(self):\n super(_SiteScheduler, self).startService()\n self._transientSchedule(self.now(), self.now())", "def low_stock_date(self):\n return self._low_stock_date", "def test_fill_data_with_close_in_strikes(self):\n date = pd.to_datetime('2009-03-31')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n self.assertTrue(len(df_iv))", "def test_fill_quote_history(self):\n ticker = \"ibm\"\n name = \"IBM\"\n data = {'name': name, 'ticker': ticker}\n request = self.client.post('/stocks/addstock/', data, follow=True, secure=True)\n stock_id = request.content\n data = DailyStockQuote.objects.filter(stock_id=stock_id)\n stock_data = Stock.objects.filter(id=stock_id)\n self.assertGreater(len(data), 0)\n self.assertEqual(len(stock_data), 1)", "def initialize(context):\n set_benchmark(sid(21519)) # I can't remember what stock this was supposed to be, but it's likely I just used SPY as the benchmark\n attach_pipeline(make_pipeline_buy(), 'pipeline_buy')\n schedule_function(rebalance, date_rules.month_start(), time_rules.market_open(minutes=60))", "def simulate_future_prices(self, market_names, fixing_dates, observation_date, path_count, calibration_params):", "def ticker_wrapper(ticker):", "def __init__(self):\n self._update_scheduled = False", "def on_tick(self, _):\n now = datetime.datetime.now()\n is_weekday = (now.weekday() <= 5)\n is_workhour = (now.hour >= 7 and now.hour <= 16)\n is_top_of_the_hour = (now.minute >= 25 and now.minute <= 29)\n is_bottom_of_the_hour = (now.minute >= 55 and now.minute <= 59)\n is_break = is_top_of_the_hour or is_bottom_of_the_hour\n if is_weekday and is_workhour and not self.pause:\n if is_break:\n if self.app.title == 'work':\n rumps.notification(\"Break\", \"Time to take a break\", \"ok\")\n self.app.title = 'break'\n else:\n if self.app.title == 'break':\n rumps.notification(\"Work\", \"Time to work\", \"\")\n self.app.title = 'work'", "def precheck(self):\n # making sure it's a time for pull, otherwise just sleep\n if datetime.now() < self.startTime + timedelta(hours=int(self.newsFrequency)):\n logging.info(\"Didn't reach time to wakeup yet, going to sleep\")\n self.sleep()", "def setMarketCallBack(self, markets):\n self.ws.setMarketCallBack(markets)" ]
[ "0.6588593", "0.65427184", "0.6336546", "0.62522703", "0.61856025", "0.61591846", "0.6053704", "0.60283375", "0.60003495", "0.5933112", "0.5917485", "0.590598", "0.5869488", "0.5780416", "0.57541144", "0.57364595", "0.5636754", "0.561605", "0.5577664", "0.55399215", "0.5519666", "0.5486484", "0.5456792", "0.54452103", "0.5437262", "0.54327524", "0.54066086", "0.5398537", "0.539245", "0.5388288", "0.5382791", "0.5369419", "0.53574157", "0.5357074", "0.5353266", "0.5346379", "0.53405464", "0.53252155", "0.5313732", "0.53076845", "0.52850205", "0.5284972", "0.52840734", "0.5272899", "0.5267843", "0.52664703", "0.525782", "0.5237978", "0.52246517", "0.52232873", "0.52151686", "0.5213497", "0.52127916", "0.5209739", "0.52046245", "0.5202006", "0.5201573", "0.51930577", "0.51872545", "0.518337", "0.51751137", "0.5172775", "0.5171862", "0.5156565", "0.51554954", "0.5154561", "0.51384693", "0.5137703", "0.5129181", "0.5122717", "0.51201725", "0.51100266", "0.5109887", "0.51096165", "0.5109226", "0.51066333", "0.510616", "0.50993645", "0.5083873", "0.5083287", "0.5076491", "0.5069806", "0.5065792", "0.50478375", "0.50401574", "0.5030315", "0.50295", "0.5015912", "0.5006381", "0.49980357", "0.4995511", "0.49952313", "0.49922618", "0.49805102", "0.4980239", "0.49793988", "0.4978176", "0.49756485", "0.49733654", "0.49558282" ]
0.496805
99
This function places an order for "context.index" in the amount required to neutralize the beta exposure of the portfolio. Note that additional leverage in the account is taken on, however, net market exposure is reduced.
def hedge_portfolio(context, data): factors = get_alphas_and_betas(context, data) beta_exposure = 0.0 count = 0 for asset in context.portfolio.positions: if asset in factors and asset != context.index: if not np.isnan(factors[asset].beta): beta_exposure += factors[asset].beta count += 1 beta_hedge = -1.0 * beta_exposure / count dollar_amount = context.portfolio.portfolio_value * beta_hedge record(beta_hedge=beta_hedge) if not np.isnan(dollar_amount): order_target_value(context.index, dollar_amount)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alphas_and_betas(context, data):\r\n all_assets = context.portfolio.positions.keys()\r\n if context.index not in all_assets:\r\n all_assets.append(context.index)\r\n prices = data.history(all_assets, 'price', context.lookback, '1d')\r\n returns = prices.pct_change()[1:]\r\n # index_returns = returns[context.index]\r\n factors = {}\r\n for asset in context.portfolio.positions:\r\n try:\r\n y = returns[asset]\r\n factors[asset] = linreg(returns[context.index], y)\r\n except:\r\n log.warn(\"[Failed Beta Calculation] asset = %s\" % asset.symbol)\r\n return pd.DataFrame(factors, index=['alpha', 'beta'])", "def place_order(env, inventory_stock):\n yield env.timeout(LEAD_TIME)\n #amount = inventory_stock.capacity - inventory_stock.level\n amount = EOQ\n print('Inventory refilled by {1} products at {0} '.format(env.now, amount))\n print('Inventory Level = {}'.format(inventory_stock.capacity))\n order_arrival_time.append(env.now)\n order_amount.append(amount)\n yield inventory_stock.put(amount)", "def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)", "def update_order_index(self, index=None):\n if index is None:\n index = getattr(self, \"current_order_index\", 0)\n\n session = self.parent.session\n self.current_order_index = index\n self.current_order \\\n = session.input_spectra[self.current_order_index].copy()\n\n # Apply any RV correction.\n try:\n v = session.metadata[\"rv\"][\"rv_applied\"]\n except (AttributeError, KeyError):\n v = 0\n\n self.current_order._dispersion *= (1 - v/c)\n\n # Update the view if the input settings don't match the settings used\n # to normalize the current order.\n self.check_for_different_input_settings()\n\n return None", "def beta(self, index):\n index_change = index.close.pct_change()\n beta = self.pct_change.cov(index_change) / index_change.var()\n return beta", "def initialize(context):\n # Rebalance every day, 1 hour after market open.\n set_slippage(slippage.FixedSlippage(spread=0.00))\n set_commission(commission.PerShare(cost=0.0, min_trade_cost=0.0))\n context.lookback = 60\n context.leverage = 0.02\n context.day = 1\n #context.ETFs = []\n context.market = [symbol('SPY')]\n context.bo = 1.25\n context.so = 1.25\n context.bc = 0.75\n context.sc = 0.5\n context.stocks = []\n context.initialized = False\n context.holding_book_shares = None\n context.order_hist = {}\n \n context.xlb = symbol('XLB') #sid(19654) #Materials 101\n context.xly = symbol('XLY') #sid(19662) #Consumer Discretionary 102\n context.xlf = symbol('XLF') #sid(19656) #Financials 103\n context.xlre = symbol('IYR') #sid() #Real estate 104\n context.xlp = symbol('XLP') #sid(19659) #Consumer Staples 205\n context.xlv = symbol('XLV') #sid(19661) #Health Care 206\n context.xlu = symbol('XLU') #sid(19660) #Utilities 207\n context.xtl = symbol('IYZ') #sid() #Communication Services 308\n context.xle = symbol('XLE') #sid(19655) #Energy 309\n context.xli = symbol('XLI') #sid(19657) #Industrials 310\n context.xlk = symbol('XLK') #sid(19658) #Technology 311\n \n context.ETF_lookup = {context.xlb:101, 101:context.xlb,\n context.xly:102, 102:context.xly,\n context.xlf:103, 103:context.xlf,\n context.xlre:104, 104:context.xlre,\n context.xlp:205, 205: context.xlp,\n context.xlv:206, 206: context.xlv,\n context.xlu:207, 207:context.xlu,\n context.xtl:308, 308:context.xtl,\n context.xle:309, 309:context.xle,\n context.xli:310, 310:context.xli,\n context.xlk:311, 311:context.xlk}\n\n context.ETFs = [context.xlb,\n context.xly,\n context.xlf,\n context.xlre,\n context.xlp,\n context.xlv,\n context.xlu,\n context.xtl,\n context.xle,\n context.xli,\n context.xlk\n ]", "def performance_vs_index(self, index='SPY', dateIni='Ini', dateFin='Fin'):\n if dateFin == 'Fin':\n dateFin = self.data.index[-1]\n if dateIni == 'Ini':\n dateIni = self.data.index[0]\n portfolioGains = round(self.data.loc[self.data.index[-1], 'Profit/Loss%'], 2)\n else:\n pData = self.data.loc[dateIni:dateFin]\n pData.loc[:,'Profit/Loss'] = pData['Gains'].cumsum()\n pData.loc[:,'Profit/Loss%'] = pData['Profit/Loss'] / pData['Invested'] * 100\n portfolioGains = round(pData.loc[pData.index[-1], 'Profit/Loss%'], 2)\n indexData = yf.Ticker(index).history(start=dateIni, end=dateFin)\n indexData['Var%'] = (indexData.Close - indexData.Close[0]) / indexData.Close[0] * 100\n indexGains = round(indexData.loc[indexData.index[-1], 'Var%'], 2)\n return portfolioGains, indexGains, portfolioGains - indexGains", "def CalcEffectiveInventory(self):\r\n return (self.currentStock - self.currentOrders)", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def gbce_index(self):\n stocks_vwsp = [Stock.get_instance().get_stock_by_symbol(tr.symbol).vwsp for tr in Trade.get_instance()]\n try:\n return (reduce(operator.mul, stocks_vwsp, 1)) ** (1.0/len(stocks_vwsp))\n except ZeroDivisionError:\n return 0.0", "def place_orders(context, data):\r\n log.info(\"*********Monthly flags: %s\" % context.flags)\r\n \r\n context.sell = []\r\n context.buy = []\r\n \r\n # Go through flags to determine buy/sell signals\r\n for asset, flags in context.flags.items():\r\n # If up > down and multiple blue flags, add to buy\r\n if flags['UP'] > flags['DOWN'] and flags['UP'] > 1:\r\n context.buy.append(asset)\r\n \r\n # If down > up and multiple down flags, add to sell\r\n elif flags['DOWN'] > flags['UP'] and flags['DOWN'] > 1:\r\n context.sell.append(asset)\r\n \r\n # If both SPY and QQQ are buys, rebalance weightings and check components\r\n if sid(8554) in context.buy and sid(19920) in context.buy:\r\n rebalance_weightings(context)\r\n \r\n # Reset down sequence\r\n context.first_down_sequence = set()\r\n \r\n # Reset SPY and QQQ to max weightings\r\n context.target_weights[sid(8554)] = context.max_weights[sid(8554)]\r\n context.target_weights[sid(19920)] = context.max_weights[sid(19920)]\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[sid(8554)] = round(context.target_weights[sid(8554)] * context.portfolio.portfolio_value / context.price[sid(8554)])\r\n context.target_shares[sid(19920)] = round(context.target_weights[sid(19920)] * context.portfolio.portfolio_value / context.price[sid(19920)])\r\n \r\n # If not overweighting:\r\n if not context.overweighting:\r\n context.buy.remove(sid(8554))\r\n context.buy.remove(sid(19920))\r\n \r\n # Check components\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio > 1, add to buy\r\n if asset != sid(8554) and asset != sid(19920) and ratio > 1:\r\n context.buy.append(asset)\r\n \r\n # If SPY is a sell, check UP ratios for components\r\n if sid(8554) in context.sell:\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio < 1, add to sell\r\n if asset != sid(8554) and asset != sid(19920) and ratio < 1:\r\n context.sell.append(asset)\r\n \r\n \r\n \r\n # First month at end August 2017: set all other assets to max weighting, except take UP ratio of JKL to be <1 so sell 20% of weighting\r\n if context.first_iteration:\r\n log.info('First iteration')\r\n \r\n # Initialise weightings\r\n rebalance_weightings(context)\r\n context.first_iteration = False\r\n \r\n for asset, weight in context.max_weights.items(): \r\n # JKL\r\n if asset == sid(26451):\r\n context.sell.append(asset)\r\n\r\n context.target_weights[asset] = weight\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n buy_overweight = []\r\n remaining_cash = context.portfolio.cash\r\n \r\n # Buy components first (before considering overweighting QQQ/SPY)\r\n for asset in sorted(context.buy, reverse=True):\r\n \r\n # This is an up sequence so no subsequent down sequence\r\n if asset in context.first_down_sequence:\r\n context.first_down_sequence.remove(asset) \r\n \r\n # Buy 50% of weighting\r\n log.info('UP flags for %s: Buy 50 percent' % asset)\r\n extra_weight = 0.5 * context.max_weights[asset]\r\n \r\n # Do not exceed max shares by weighting, UNLESS taking from cash from components (overweighting)\r\n if context.target_weights[asset] == context.max_weights[asset] or (context.target_weights[asset] > context.max_weights[asset] and context.overweighting):\r\n buy_overweight.append(asset)\r\n \r\n elif context.target_weights[asset] + extra_weight > context.max_weights[asset]:\r\n context.target_weights[asset] = context.max_weights[asset]\r\n \r\n else:\r\n context.target_weights[asset] += extra_weight\r\n \r\n # Convert weights to number of shares\r\n old_shares = context.target_shares[asset]\r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n remaining_cash -= (context.target_shares[asset] - old_shares) * context.price[asset]\r\n \r\n for asset in buy_overweight:\r\n if remaining_cash > 0:\r\n # If first overweight or 2 assets to be overweighted, take 50% of available cash\r\n if context.target_weights[asset] > context.max_weights[asset] or len(buy_overweight) > 1:\r\n log.info('Taking half of cash of value: %f' % (remaining_cash * 0.5))\r\n context.target_weights[asset] += 0.5 * remaining_cash / context.portfolio.portfolio_value\r\n \r\n # If second overweight, take all remaining cash\r\n else:\r\n log.info('Taking remaining of cash of value: %f' % (remaining_cash))\r\n context.target_weights[asset] += remaining_cash / context.portfolio.portfolio_value\r\n \r\n else:\r\n # If no cash, ignore\r\n log.info('UP flags for %s: No change' % asset)\r\n continue\r\n \r\n \r\n # For assets in sell list\r\n for asset in context.sell:\r\n \r\n # If asset already has 0 holdings, ignore\r\n if context.target_weights[asset] == 0:\r\n log.info('DOWN flags for %s: No change' % asset)\r\n continue\r\n \r\n # If first multiple down flags, sell 20% of UP weight\r\n elif asset not in context.first_down_sequence:\r\n log.info('First DOWN flags for %s: Sell 20 percent' % asset)\r\n context.target_weights[asset] -= 0.2 * context.max_weights[asset]\r\n context.first_down_sequence.add(asset)\r\n \r\n # If this is a subsequent down flag sequence, sell 40% of UP weight\r\n else:\r\n log.info('DOWN flags for %s: Sell 40 percent' % asset)\r\n context.target_weights[asset] -= 0.4 * context.max_weights[asset]\r\n \r\n # Ensure no short position\r\n if context.target_weights[asset] < 0:\r\n context.target_weights[asset] = 0\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n print(context.target_weights)", "def example_reward(self,stock=None, action=None, current_date = None, products=None, orders=None, procurements=None):\n\n out = 0\n for key in stock:\n out += stock[key]\n return out * -1", "def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)", "def Rollback(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def blank_future_eta(request):\n today = datetime.datetime.today()\n today = today.date()\n\n orders = OrderDetail.objects.filter(eta__gt=today)\n for order in orders:\n order.eta = None\n order.save()\n\n return HttpResponse('ok', mimetype='text/plain')", "def prepare_order(self, index, order_status):\n if(self.running_qty > 0 and index > 0):\n quantity = self.running_qty\n price = self.get_price_offset3(index)\n elif(self.running_qty < 0 and index < 0):\n quantity = abs(self.running_qty)\n price = self.get_price_offset3(index)\n else:\n quantity = self.ORDER_START_SIZE // 4\n price = self.get_price_offset2(index)\n if (price == None):\n return None\n else:\n return {'price': price, 'orderQty': quantity, 'side': \"Buy\" if index < 0 else \"Sell\"}", "def my_rebalance(context, data):\n freq_month = 3\n context.counter += 1\n if context.counter == freq_month:\n for stock, weight in context.weights.iteritems():\n context.counter = 0\n if data.can_trade(stock):\n order_target_percent(stock, weight)", "def rebalance(context, data):\n\n cancel_all_orders(context, data)\n sell_stocks_not_in_portfolio(context, data)\n\n LOG.info(\"rebalancing\")\n LOG.info(context.stocks)\n totals = calculate_totals(context, data)\n LOG.info(\"totals calculated: %s\" % totals)\n for stock, info in totals.items():\n order(stock, info[\"total\"])", "def bkg_subtract(self, analyte, bkg, ind=None):\n\n if 'bkgsub' not in self.data.keys():\n self.data['bkgsub'] = {}\n\n self.data['bkgsub'][analyte] = self.focus[analyte] - bkg\n\n if ind is not None:\n self.data['bkgsub'][analyte][ind] = np.nan\n\n return", "def index():\n\n #select user's portfolio\n rows = db.execute(\"SELECT * FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n\n #set temporary holding place for cash to zero\n tcash = 0\n\n #update the stock information in user's portfolio\n for row in rows:\n stock = row[\"stock\"]\n number = row[\"number\"]\n quote = lookup(stock)\n total = float(number) * float(quote[\"price\"])\n tcash += total\n db.execute(\"UPDATE portfolio SET price=:price, total=:total WHERE userid=:id AND stock=:stock AND number=:number\", price=usd(quote[\"price\"]), total=total, id=session[\"user_id\"], stock=stock, number=number)\n\n #select user's cash and updated portfolio\n updated_cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n tcash += updated_cash[0][\"cash\"]\n updated_stock = db.execute(\"SELECT stock, SUM(number) AS number, price, SUM(total) AS stock_total FROM portfolio WHERE userid=:id GROUP BY stock HAVING SUM(number) > 0\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", stocks=updated_stock, cash=usd(updated_cash[0][\"cash\"]), all_total=usd(tcash))", "def __sell(self, order, portfolio):\n amount = order.price * order.volume\n portfolio.remove_stock(order.symbol, order.volume)\n portfolio.add_cash(amount)\n return True", "def my_rebalance(context,data):\n log.info(\"rebalancing...\")\n context.output = pipeline_output('my_pipeline')\n log.info(\"retrieved pipeline output...\")\n \n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index\n \n if context.prime == False:\n order_target_percent(symbol('SPY'),1) #hold SPY as a default \n context.prime = True\n \n weight= 1.0/len(context.security_list)\n \n for stock in context.security_list:\n log.info(\"Buying %s\" % (stock.symbol))\n order_target_percent(stock, weight)\n \n #: Exit any positions we might have\n for stock in context.portfolio.positions:\n if data.can_trade(stock) and stock not in context.security_list:\n log.info(\"Exiting our positions on %s\" % (stock.symbol))\n order_target_percent(stock, 0)", "def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)", "def on_order(self, order: OrderData):\n self.position_calculator.update_position(order)\n\n self.current_pos = self.position_calculator.pos\n self.avg_price = self.position_calculator.avg_price\n\n if order.status == Status.ALLTRADED and order.vt_orderid in (self.long_orders + self.short_orders):\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.last_filled_order = order\n\n for ids in (self.long_orders + self.short_orders + self.profit_orders):\n self.cancel_order(ids)\n\n if abs(self.position_calculator.pos) < self.fixed_size:\n return\n\n step = self.get_step()\n\n # tick 存在且仓位数量还没有达到设置的最大值.\n if self.tick and abs(self.position_calculator.pos) < self.max_pos_size * self.fixed_size:\n buy_price = order.price - step * self.grid_step\n sell_price = order.price + step * self.grid_step\n\n buy_price = min(self.tick.bid_price_1 * (1 - 0.0001), buy_price)\n sell_price = max(self.tick.ask_price_1 * (1 + 0.0001), sell_price)\n\n long_ids = self.buy(buy_price, self.fixed_size)\n short_ids = self.sell(sell_price, self.fixed_size)\n\n self.long_orders.extend(long_ids)\n self.short_orders.extend(short_ids)\n\n if order.status == Status.ALLTRADED and order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n if abs(self.position_calculator.pos) < self.fixed_size:\n self.cancel_all()\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.stop_orders:\n self.stop_orders.remove(order.vt_orderid)\n\n self.put_event()", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def test_interest_vs_stockprice(self):\n stock_prices = np.array([[5, 10, 20, 40]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def removeFixedEffect(self, index=None):\n if self._n_terms==0:\n pass\n if index is None or index==(self._n_terms-1):\n\n self._n_terms-=1\n F = self._F.pop() #= self.F[:-1]\n A = self._A.pop() #= self.A[:-1]\n self._A_identity.pop() #= self.A_identity[:-1]\n REML_term = self._REML_term.pop()# = self.REML_term[:-1]\n self._B.pop()# = self.B[:-1]\n self._n_fixed_effs-=F.shape[1]*A.shape[0]\n if REML_term:\n self._n_fixed_effs_REML-=F.shape[1]*A.shape[0]\n\n pass\n elif index >= self.n_terms:\n raise Exception(\"index exceeds max index of terms\")\n else:\n raise NotImplementedError(\"currently only last term can be removed\")\n pass\n self._rebuild_indicator()\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')", "def cool_balance(index):\n t = index[0]\n return (\n pulp.lpSum([component_output[i, t] for i in index_cool_out])\n - pulp.lpSum([component_input[i, t] for i in index_cool_in])\n + pulp.lpSum([storage_disch[i, t] for i in heat_storage_names])\n - pulp.lpSum([storage_ch[i, t] for i in heat_storage_names])\n + cool_unserve[t]\n - cool_dump[t]\n == forecast[\"cool_load\"][t]\n )", "def order(self, index=None):\n bfsize = card(self.basefield)\n\n if not self.ord:\n if self.ch in (2, 3):\n if bfsize == self.ch == 2:\n self.ord = self._order_2()\n elif bfsize == self.ch == 3:\n self.ord = self._order_3()\n else:\n error_message = \"no E/F_{%d} order\" % bfsize\n raise NotImplementedError(error_message)\n else:\n self.ord = self._trace_to_order(self.trace())\n\n # final result\n if index:\n # for subfield curve\n basetrace = self._order_to_trace(self.ord)\n trace, oldtrace = basetrace, 2\n for i in range(2, index + 1):\n trace, oldtrace = basetrace*trace - bfsize*oldtrace, trace\n return bfsize ** index + 1 - trace\n\n return self.ord", "def _order_cancel(self, bo):\n log.info(\"bo_blotter: order_cancel bracket order bo#%s\" % bo.ticket) \n cancelled = bo.cancel()\n return(cancelled)", "def on_order(self, order: OrderData):\n\n if order.vt_orderid not in (self.short_orders + self.long_orders):\n return\n\n self.pos_calculator.update_position(order)\n\n self.current_pos = self.pos_calculator.pos\n self.avg_price = self.pos_calculator.avg_price\n\n if order.status == Status.ALLTRADED:\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n self.trade_count += 1\n\n short_price = order.price + self.step_price\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if len(self.long_orders) < self.max_open_orders:\n long_price = order.price - self.step_price * self.max_open_orders\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n self.trade_count += 1\n long_price = order.price - self.step_price\n if long_price >= self.low_price:\n orders = self.buy(long_price, self.order_volume)\n self.long_orders.extend(orders)\n\n if len(self.short_orders) < self.max_open_orders:\n short_price = order.price + self.step_price * self.max_open_orders\n if short_price <= self.high_price:\n orders = self.short(short_price, self.order_volume)\n self.short_orders.extend(orders)\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.put_event()", "def limit_on_close_order(liability, price):\n return locals()", "def order(self, stock, amount):\n self.orders[stock] = amount", "def order_report():", "def market_on_close_order(liability):\n return locals()", "def before_trading_start(context, data):\r\n context.output = pipeline_output('pipeline')\r\n\r\n # sort by earning yield\r\n context.output = context.output.sort(\r\n columns='Free Cash Flow', ascending=False)\r\n\r\n # get top 20 stocks as security list\r\n context.eligible_assets = context.output.iloc[:19]", "def size_order(self, portfolio, initial_order):\n return initial_order", "def cancelOrder(self, order_number):\n pass", "def index():\n def getListOfCompanies(username, symbolOrPriceOrNumber):\n if symbolOrPriceOrNumber == \"symbol\" or symbolOrPriceOrNumber == \"price\" or symbolOrPriceOrNumber == \"number\":\n rows = db.execute(\"SELECT {0} FROM portfolio WHERE username=:username\".format(symbolOrPriceOrNumber), username=username)\n if symbolOrPriceOrNumber == \"symbol\" and len(rows) >= 1:\n namesList = []\n for row in rows:\n namesList.append(lookup(row[symbolOrPriceOrNumber])[\"name\"])\n return namesList\n elif symbolOrPriceOrNumber == \"price\" and len(rows) >= 1:\n pricseList = []\n for row in rows:\n pricseList.append(row[symbolOrPriceOrNumber])\n return pricseList\n elif symbolOrPriceOrNumber == \"number\" and len(rows) >= 1:\n numbersList = []\n for row in rows:\n numbersList.append(row[symbolOrPriceOrNumber])\n return numbersList\n else:\n return None\n else:\n return None\n\n def getTotalValueHolding(username):\n priceRow = db.execute(\"SELECT price FROM portfolio WHERE username=:username\", username=username)\n numberRow = db.execute(\"SELECT number FROM portfolio WHERE username=:username\", username=username)\n\n if len(priceRow) >= 1 and len(numberRow) >= 1 and len(priceRow) == len(numberRow):\n totalList = []\n for i in range(len(priceRow)):\n totalList.append(float(priceRow[i][\"price\"]) * float(numberRow[i][\"number\"]))\n\n return totalList\n\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n companiesNames = getListOfCompanies(username, \"symbol\")\n numberOfShares = getListOfCompanies(username, \"number\")\n prices = getListOfCompanies(username, \"price\")\n totalValueHolding = getTotalValueHolding(username)\n\n currentCashBalance = db.execute(\"SELECT cash FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n total = 0\n if totalValueHolding:\n for totalValue in totalValueHolding:\n total = total + totalValue\n\n cashAndStocksTotalValue = float(currentCashBalance) + total\n\n return render_template(\"index.html\", username=username, companiesNames=companiesNames, numberOfShares=numberOfShares,\n prices=prices, totalValueHolding=totalValueHolding, currentCashBalance=currentCashBalance, cashAndStocksTotalValue=cashAndStocksTotalValue)", "def backtest(self, strategy): \n \n # load data\n data = self.import_data(currency_ids = np.unique(self.signals.currency_id),\n start_date = self.start_date,\n end_date = self.end_date)\n \n # add cash and columns for currencies\n data[\"capital\"] = 0\n data[\"actions\"] = \"\" # should become a dictW \n data[\"description\"] = \"\"\n data[\"capital\"].iloc[0] = self.initial_capital\n data[\"worth_usd\"] = 0\n \n for currency_id in np.unique(self.signals.currency_id):\n data[currency_id + '_position'] = 0\n \n # a sell or buy can influence subsequent positions, so calculate iteratively\n for observation in range(1, len(data.index)):\n \n date = data.index[observation]\n \n # investment this period is zero\n investment_capital_period = 0\n \n # amount of currency_ids initially same as last period\n for currency_id in np.unique(self.signals.currency_id):\n data[currency_id + '_position'].iloc[observation] = data[currency_id + '_position'].iloc[observation-1] \n \n # at each point, compute size of each position (cash and currencies), and record actions\n if(data.index[observation] in self.signals.index):\n \n action_df = pd.DataFrame(columns=list([\"Currency\",\"NominalAmount\", \"CapitalAmount\"]))\n \n # could be multiple actions\n for index, action in self.signals.loc[date].iterrows(): \n currency_id = action['currency_id']\n signal = action['signal']\n \n # Buy\n if signal == 1:\n \n # buy for 10% currency_id\n investment_capital = data[\"capital\"].iloc[observation-1] * 0.10 \n\n # estimate how many coins\n investment_nominal = round(investment_capital / data[currency_id].iloc[observation])\n \n # calculate exact capital needed\n investment_capital_exact = investment_nominal * data[currency_id].iloc[observation]\n investment_capital_period = investment_capital_period + investment_capital_exact \n \n # change the amount of currency hold\n data[currency_id + '_position'].iloc[observation] = data[currency_id + '_position'].iloc[observation-1] + investment_nominal\n \n # report action by appending a Series to the (empty) dataframe\n action_df = action_df.append(pd.Series({\"Currency\": currency_id, \n \"NominalAmount\": investment_nominal, \n \"CapitalAmount\": investment_capital_exact}),ignore_index=True)\n \n # report description\n data[\"description\"].iloc[observation] = (data[\"actions\"].iloc[observation] + \"\\n Buy \" + \n str(investment_nominal) + \" \" + str(currency_id) + \n \" for \" + str(investment_capital_exact))\n \n # Sell\n if signal == -1:\n \n # sell currency_id for 10% of total capital\n investment_capital = data[\"capital\"].iloc[observation-1] * 0.10 \n \n # estimate how many coins\n investment_nominal = round(investment_capital / data[currency_id].iloc[observation])\n \n # calculate exact capital needed\n investment_capital_exact = investment_nominal * data[currency_id].iloc[observation]\n investment_capital_period = investment_capital_period - investment_capital_exact\n \n # change the amount of currency hold\n data[currency_id + '_position'].iloc[observation] = data[currency_id + '_position'].iloc[observation-1] - investment_nominal\n \n # report action\n action_df = action_df.append(pd.Series({\"Currency\": currency_id, \n \"NominalAmount\": investment_nominal, \n \"CapitalAmount\": investment_capital_exact}),ignore_index=True)\n \n # report description\n data[\"description\"].iloc[observation] = data[\"actions\"].iloc[observation] + \"Sell \" + str(investment_nominal) + \" \" + str(currency_id) + \" for \" + str(investment_capital_exact)\n \n # report actions\n data[\"actions\"].iloc[observation] = action_df.to_json()\n \n # calculate resulting cash capital\n data[\"capital\"].iloc[observation] = data[\"capital\"].iloc[observation-1] - investment_capital_period\n \n # calculate worth by capital (usd) and each currency * price\n data[\"worth_usd\"].iloc[observation] = data[\"capital\"].iloc[observation] \n return data", "def default_actual_order_price(context):\n current_type = context.current_parameters.get('current_type')\n default_price = 0\n actual_order_price = 0\n if current_type == 'order':\n project_id = context.current_parameters.get('project_id', None)\n if project_id:\n project = Project.get(project_id)\n default_price = project.price if project else default_price\n actual_order_price = context.current_parameters.get('price', default_price)\n return actual_order_price", "def backtest(cache):\n if not cache:\n history = getBars(MARKET, TF)\n else:\n cachefile = \"cache/{}-{}.csv\".format(MARKET, TF)\n try:\n history = pd.read_csv(\n cachefile, index_col=\"datetime\", parse_dates=True)\n except:\n history = getBars(MARKET, TF)\n history.to_csv(cachefile)\n history['ma'] = history['close'].rolling(MEAN).mean()\n weAreLong = False\n PRICE_DIPPED = False\n\n entry = 0.0\n exit = None\n pl = 0.0\n history['pandl'] = 0.0\n trades = 1\n\n ### BEGIN STRATEGY DEFINITION ###\n count = 1\n\n for i in history.index:\n candle_close_rate = history['ma'][i]\n ma = history['close'][i]\n if count > MEAN:\n # playing revert to mean (RTM)\n if not weAreLong and buySignaled(candle_close_rate, ma, PRICE_DIPPED):\n entry = candle_close_rate\n exit = candle_close_rate * (1.0 + (EXIT_PERCENT / 100.0))\n stop = candle_close_rate * (1.0 - (STOP_PERC / 100.0))\n weAreLong = True\n history['pandl'][i] = pl\n elif weAreLong and history['high'][i] >= exit:\n weAreLong = False\n pl += (((exit - entry) / entry) * 100.0 ) - (2.0 * FEE)\n history['pandl'][i] = pl\n trades += 1\n elif weAreLong and candle_close_rate <= stop:\n weAreLong = False\n pl += (((candle_close_rate - entry) / entry) * 100.0 ) - (2.0 * FEE)\n history['pandl'][i] = pl\n trades += 1\n else:\n if weAreLong:\n # fpl = ((history['close'][i] * (1.0 - FEE)) - (entry * (1.0 + FEE)))\n # fpl = ( (history['close'][i] * (100.0 - FEE)) - (entry * (100.0 + FEE)) ) / 100.0\n fpl = ((candle_close_rate - entry) / entry) * 100.0\n history['pandl'][i] = pl + fpl\n else:\n fpl = 0.0\n history['pandl'][i] = pl + fpl\n count += 1\n if candle_close_rate <= ma:\n PRICE_DIPPED = True\n else:\n PRICE_DIPPED = False\n ### END STRATEGY DEFINITION ###\n\n days = (len(history) * TF) / (60 * 24)\n sharpe_ratio = getSharpe(list(history['pandl']), days)\n fig, ax = plt.subplots(1)\n plt.plot(history['pandl'])\n fig.autofmt_xdate()\n plt.ylabel('cumulative %')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n # place a text box in upper left in axes coords\n textstr = \"\"\"{}\n Days: {}\n Trades: {}\n Settings:\n TF = {}\n MEAN = {}\n BREAKOUT = {}\n RTM = {}\n RTM_PERCENT = {}\n BO_PERCENT = {}\n EXIT_PERCENT = {},\n STOP = {}\n Sharpe = {}\n \"\"\".format(MARKET,\n days,\n trades,\n TF,\n MEAN,\n BREAKOUT,\n RTM,\n RTM_PERCENT,\n BO_PERCENT,\n EXIT_PERCENT,\n STOP_PERC,\n sharpe_ratio)\n ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n plt.title(\"BACKTEST {}\".format(MARKET))\n plt.savefig(BACKTESTFILE)\n print(\"{},{},{},{},{},{},{},{},{},{},{},{}\".format(days,\n trades,\n MARKET,\n TF,\n MEAN,\n BREAKOUT,\n RTM,\n RTM_PERCENT,\n BO_PERCENT,\n EXIT_PERCENT,\n STOP_PERC,\n sharpe_ratio))\n # plt.show()", "def test_sweat_index():\n pressure = np.array([1008., 1000., 947., 925., 921., 896., 891., 889., 866.,\n 858., 850., 835., 820., 803., 733., 730., 700., 645.,\n 579., 500., 494., 466., 455., 441., 433., 410., 409.,\n 402., 400., 390., 388., 384., 381., 349., 330., 320.,\n 306., 300., 278., 273., 250., 243., 208., 200., 196.,\n 190., 179., 159., 151., 150., 139.]) * units.hPa\n temperature = np.array([27.4, 26.4, 22.9, 21.4, 21.2, 20.7, 20.6, 21.2, 19.4,\n 19.1, 18.8, 17.8, 17.4, 16.3, 11.4, 11.2, 10.2, 6.1,\n 0.6, -4.9, -5.5, -8.5, -9.9, -11.7, -12.3, -13.7, -13.8,\n -14.9, -14.9, -16.1, -16.1, -16.9, -17.3, -21.7, -24.5, -26.1,\n -28.3, -29.5, -33.1, -34.2, -39.3, -41., -50.2, -52.5, -53.5,\n -55.2, -58.6, -65.2, -68.1, -68.5, -72.5]) * units.degC\n dewpoint = np.array([24.9, 24.6, 22., 20.9, 20.7, 14.8, 13.6, 12.2, 16.8,\n 16.6, 16.5, 15.9, 13.6, 13.2, 11.3, 11.2, 8.6, 4.5,\n -0.8, -8.1, -9.5, -12.7, -12.7, -12.8, -13.1, -24.7, -24.4,\n -21.9, -24.9, -36.1, -31.1, -26.9, -27.4, -33., -36.5, -47.1,\n -31.4, -33.5, -40.1, -40.8, -44.1, -45.6, -54., -56.1, -56.9,\n -58.6, -61.9, -68.4, -71.2, -71.6, -77.2]) * units.degC\n speed = np.array([0., 3., 10., 12., 12., 14., 14., 14., 12.,\n 12., 12., 12., 11., 11., 12., 12., 10., 10.,\n 8., 5., 4., 1., 0., 3., 5., 10., 10.,\n 11., 11., 13., 14., 14., 15., 23., 23., 24.,\n 24., 24., 26., 27., 28., 30., 25., 24., 26.,\n 28., 33., 29., 32., 26., 26.]) * units.knot\n direction = np.array([0., 170., 200., 205., 204., 200., 197., 195., 180.,\n 175., 175., 178., 181., 185., 160., 160., 165., 165.,\n 203., 255., 268., 333., 0., 25., 40., 83., 85.,\n 89., 90., 100., 103., 107., 110., 90., 88., 87.,\n 86., 85., 85., 85., 60., 55., 60., 50., 46.,\n 40., 45., 35., 50., 50., 50.]) * units.degree\n\n sweat = sweat_index(pressure, temperature, dewpoint, speed, direction)\n assert_almost_equal(sweat, 227., 2)", "def _order_close(self, bo, volume=None):\n log.info(\"bo_blotter: order_close bracket order bo#%s with vol=%s\" % (bo.ticket, volume))\n ticket = bo.close(volume)\n return(ticket)", "def update_weights(self, alpha, ind):\n inside = -alpha * self.labels * self.predictions[ind, :]\n new_weights = self.weights * np.exp(inside)\n self.weights = new_weights / np.sum(new_weights)", "def remove_extra_index_from_context_actions(context_action_dict):\n keys_to_keep = {'initial_value', 'replacement_value'}\n for question in context_action_dict:\n for obj_dct in context_action_dict[question]:\n total_keys = set(obj_dct.keys())\n keys_to_remove = total_keys - keys_to_keep\n for key in keys_to_remove:\n obj_dct.pop(key)\n return context_action_dict", "def test_total_totals_index():\n pressure = np.array([1008., 1000., 947., 925., 921., 896., 891., 889., 866.,\n 858., 850., 835., 820., 803., 733., 730., 700., 645.,\n 579., 500., 494., 466., 455., 441., 433., 410., 409.,\n 402., 400., 390., 388., 384., 381., 349., 330., 320.,\n 306., 300., 278., 273., 250., 243., 208., 200., 196.,\n 190., 179., 159., 151., 150., 139.]) * units.hPa\n temperature = np.array([27.4, 26.4, 22.9, 21.4, 21.2, 20.7, 20.6, 21.2, 19.4,\n 19.1, 18.8, 17.8, 17.4, 16.3, 11.4, 11.2, 10.2, 6.1,\n 0.6, -4.9, -5.5, -8.5, -9.9, -11.7, -12.3, -13.7, -13.8,\n -14.9, -14.9, -16.1, -16.1, -16.9, -17.3, -21.7, -24.5, -26.1,\n -28.3, -29.5, -33.1, -34.2, -39.3, -41., -50.2, -52.5, -53.5,\n -55.2, -58.6, -65.2, -68.1, -68.5, -72.5]) * units.degC\n dewpoint = np.array([24.9, 24.6, 22., 20.9, 20.7, 14.8, 13.6, 12.2, 16.8,\n 16.6, 16.5, 15.9, 13.6, 13.2, 11.3, 11.2, 8.6, 4.5,\n -0.8, -8.1, -9.5, -12.7, -12.7, -12.8, -13.1, -24.7, -24.4,\n -21.9, -24.9, -36.1, -31.1, -26.9, -27.4, -33., -36.5, -47.1,\n -31.4, -33.5, -40.1, -40.8, -44.1, -45.6, -54., -56.1, -56.9,\n -58.6, -61.9, -68.4, -71.2, -71.6, -77.2]) * units.degC\n\n tt = total_totals_index(pressure, temperature, dewpoint)\n assert_almost_equal(tt, 45.10 * units.delta_degC, 2)", "def PlaceOrder(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)", "def test_ordered_amount(self):\n self.app = self.make_app(argv = ['report', 'project_status', self.examples[\"project\"], '--debug', '-o', \"{'P001_101_index3':10, 'P001_102':20}\"],extensions=['scilifelab.pm.ext.ext_couchdb'])\n handler.register(DeliveryReportController)\n self._run_app()\n data = ast.literal_eval(self.app._output_data['debug'].getvalue())\n ordered = {x[0]:x[4] for x in data['table']}\n self.assertEqual(ordered[\"P001_101_index3\"], 10)\n self.assertEqual(ordered[\"P001_102\"], 20)", "def sell():\n if request.method == \"GET\":\n portf = db.execute(\"SELECT * FROM portfolio WHERE id=:id\", id = session[\"user_id\"])\n return render_template(\"sell.html\",portfolio = portf)\n else:\n\n quote = lookup(request.form.get('stocklist'))\n print(str(quote))\n # Remove the stock frm user's portfolio\n # taking no of shares provided by user in form\n shares = int(request.form.get(\"no_of_shares\"))\n\n # Taking the price of that share\n\n price = db.execute(\"SELECT price FROM portfolio WHERE symbol=:symbol AND id=:id\", symbol = quote[\"symbol\"], id = session[\"user_id\"])\n\n # totla_price\n total_remove_price = shares * quote[\"price\"]\n # Now updating\n print(total_remove_price)\n # Taking total no of shares from portfolio\n share = db.execute(\"SELECT shares FROM portfolio WHERE id=:id AND symbol=:symbol\",symbol = quote[\"symbol\"],\n id = session[\"user_id\"])\n total = db.execute(\"SELECT total FROM portfolio WHERE id=:id AND symbol=:symbol\",symbol = quote[\"symbol\"],\n id = session[\"user_id\"])\n\n # if share provided by user in form is less than or equal to total shares owned then only transaction will processed\n print(share[0][\"shares\"])\n print(shares)\n if (shares < share[0][\"shares\"]):\n # Remove stock and price and no of stocks stocks = stocks - n\n real_total = total[0][\"total\"].split(\"$\")\n\n new_total1 = real_total[1][2:]\n new_total2 = real_total[1][:1]\n yup_final = new_total1 + new_total2\n print(yup_final)\n db.execute(\"UPDATE portfolio set total=:total, shares=:shares WHERE id=:id\", total = float(yup_final) - total_remove_price\n , shares = int(share[0][\"shares\"]) - shares , id=session[\"user_id\"])\n # current selling price = price * stocks and add this to user's cash\n elif (shares == share[0][\"shares\"]):\n db.execute(\"DELETE FROM portfolio WHERE id=:id AND symbol=:symbol\", id = session[\"user_id\"], symbol = quote['symbol'])\n else:\n return apology(\"Unable to process request\", 404)\n return redirect(\"/\")", "def test_low_stockprice_high_interest(self):\n stock_prices = np.array([[5, 4, 4, 2],\n [5, 3, 3, 3],\n [5, 4, 2, 2],\n [5, 3, 3, 1]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def index():\n # Selects stock that user actually has\n stockuserhas = db.execute(\n \"SELECT symbol, shares FROM portfolio WHERE userid = :userid GROUP BY symbol HAVING SUM(shares) > 0\", userid=session[\"user_id\"])\n # Finds the amount of money user has to spend on stocks\n amount = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])\n # The virst value in the array is the amount of money user can spend\n money = amount[0][\"cash\"]\n # If the user does not have any stocks, return index using with just money as input\n if not stockuserhas:\n return render_template(\"index.html\", money=money, completetotal=money)\n\n # Selects summarative information for each symbol\n stocks = db.execute(\n \"SELECT SUM(total), symbol, SUM(shares), name FROM portfolio WHERE userid = :userid GROUP BY symbol\", userid=session[\"user_id\"])\n # For each symbol, add the current price of the stock to the end of the dictionary\n for stock in stocks:\n # Looks up current price of stock based on symbol\n stockinfo = lookup(stock[\"symbol\"])\n # Finds current value of stock\n currentprice = float(stockinfo[\"price\"])\n # Adds the price to the dictionary\n stock.update({\"price\": currentprice})\n\n # The total value of stocks user owns\n totalstockvalue = db.execute(\"SELECT SUM(total) FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n # Total amount a user owns is the cash they have plus the sum of the stocks\n completetotal = float(money + float(totalstockvalue[0]['SUM(total)']))\n # Return index.html with all of the information put together above\n return render_template(\"index.html\", completetotal=completetotal, money=money, stocks=stocks)", "def apply_discount(self, product):\n pass", "def rebalance(self, date):\n eod_values = self.df.shift(1).loc[date, 'values'].mul(1 + self.tc.instrument_returns.loc[date, 'daily'])\n eod_portfolio_value = sum(eod_values.values)\n\n previous_values = self.df.loc[date, 'values'].copy()\n position_value = self.target_weights.mul(eod_portfolio_value)\n trading_cost = abs(eod_values.div(eod_portfolio_value) - self.target_weights) * eod_portfolio_value * \\\n self.tc.commission\n current_values = position_value - trading_cost\n self.df.loc[date, 'values'] = current_values.values\n future_values = self.tc.instrument_returns.loc[date:, 'cumulative'].div(\n self.tc.instrument_returns.loc[date, 'cumulative']).mul(current_values, axis=1)\n self.df.loc[date:, 'values'] = future_values.values\n trade = pd.Series(current_values - previous_values)\n # Once we have calculated the end-of-day value of the portfolio, we set the allocation by looking at the\n # dollars invested in each ETF\n self.df.loc[date:, 'allocations'] = future_values.div(future_values.sum(axis=1), axis=0).values\n\n return trade", "def _move_cancelled_order(self, bo):\n return(self._move_order_from_to(bo, 'trades', 'cancelled'))", "def adjusted_rand_index(self):\n return self.pairwise.kappa()", "def onCancelOrder(self, item):\n self.frame.mode.cancelMarketOrder(self.lstOrders.getMultiSelectedItems(), self.mySystemDict['id'])", "def order(self, order):\n\n #print(\"Evaluating order: \", order)\n if self.security != order.secid:\n raise (\"Cannot place order for security \"\n \"%s on book[%s]\" % (order.security, self.security))\n\n levels = self.bid\n if order.side == Side.SELL:\n levels = self.offer\n\n new_level = OrderBookLevel(price=order.price, qty=order.qty, order_count=1)\n start_index = levels.bisect_right(new_level)\n levels.insert(start_index, new_level)\n OrderBookUtils.compact(levels, start_index)\n\n # Trim list\n if order.side == Side.SELL:\n # Delete from end of list - highest offers\n size = len(self.offer)\n if size > MAX_DEPTH:\n for _ in itertools.repeat(None, size - MAX_DEPTH):\n del self.offer[-1]\n else:\n # Delete from start of list - lowest bids\n size = len(self.bid)\n if size > MAX_DEPTH:\n for _ in itertools.repeat(None, size - MAX_DEPTH):\n del self.bid[0]\n\n return self.match(order.side)", "def prevalence_index(self):\n return _div(abs(self.TP - self.TN), self.grand_total)", "def update_order():", "def update_order():", "def before_trading_start(context, data):\r\n context.output = algo.pipeline_output('pipeline')\r\n\r\n # These are the securities that we are interested in trading each day.\r\n context.security_list = context.output.index\r\n \r\n # Loop through all assets in pipeline.\r\n for asset, row in context.output.iterrows():\r\n context.price[asset] = row.close\r\n \"\"\"\r\n # Skip entries with no flags.\r\n if row.flag_type != 'UP' and row.flag_type != 'DOWN':\r\n continue\r\n \r\n log.info('%s flag for %s. Price level = %f' % (row.flag_type, asset, context.price[asset]))\r\n \r\n # Count flags for asset in context.flags\r\n if asset in context.flags:\r\n context.flags[asset][row.flag_type] += 1\r\n else:\r\n if row.flag_type == 'UP':\r\n context.flags[asset] = {'UP': 1, 'DOWN': 0}\r\n \r\n elif row.flag_type == 'DOWN':\r\n context.flags[asset] = {'UP': 0, 'DOWN': 1}\r\n \"\"\" \r\n \r\n context.up_ratios[asset] = row.up_ratio\r\n \r\n if math.isnan(row.up_flags):\r\n continue\r\n \r\n context.flags[asset] = {'UP': row.up_flags, 'DOWN': row.down_flags}\r\n \r\n # In 2020, activate overweighting\r\n if not context.overweighting:\r\n today = get_datetime('US/Eastern')\r\n if today.year == 2020:\r\n context.overweighting = True", "def discounted_return(self, t, gamma):\n\n def discounted_reward(undiscounted, index):\n return undiscounted * np.power(gamma, index)\n\n if t < -len(self._experiences) or t > len(self._experiences):\n raise ValueError('Index t = {} is out of bounds'.format(t))\n\n return sum(\n discounted_reward(experience.reward, i)\n for i, experience in enumerate(self._experiences[t:]))", "def closed_positions(self, closed_pos):\n liquidated_short_capital = 0\n liquidated_long_capital = 0\n leveraged_capital = 0\n fees_paid = 0\n\n for pos in closed_pos:\n if pos.order_type == Consts.SHORT:\n liquidated_short_capital += pos.get_current_liquid_capital()\n else:\n liquidated_long_capital += pos.get_current_liquid_capital()\n\n # Calculates the amount of fees for the current closed position.\n fees_paid += pos.fees_paid\n\n # Calculates the amount of leveraged capital for current closed position.\n leveraged_capital += pos.leverage_capital\n\n # Update if trailing has been activate\n if pos.trailing_activated > 0:\n self.hit_trail_positions += pos.trailing_activated\n\n # Update hit/miss/expired/stopped counters\n if pos.hit_profit_target:\n self.hit_positions += 1\n elif pos.expired:\n self.expired_positions_counter += 1\n elif pos.stopped:\n self.stopped_positions_counter += 1\n else:\n self.miss_positions += 1\n\n # Update portfolio statistics according to the closed position\n self.fees_paid += fees_paid\n self.liquid_capital += liquidated_long_capital + liquidated_short_capital\n self.leverage_capital -= leveraged_capital", "def expenses_to_outside(self) -> Decimal:\n return Decimal(\n sum(\n [\n t.amount\n for t in self.transactions_all\n if t.amount < 0 and not t.other_party.is_user_owner\n ]\n )\n )", "def create_order(df_stock, df_signal, moneyness=('OTM', 'ITM'),\n cycle=0, strike=0, expire=(False, True)):\n symbol = df_stock.ix[df_stock.index.values[0]]['symbol']\n\n tb_closes = {\n stock.date.strftime('%Y-%m-%d'): np.float(stock.close) for stock in\n Stock.objects.filter(Q(symbol=symbol) & Q(source='thinkback'))\n }\n\n holding = df_signal['holding'].apply(\n lambda x: int(x / np.timedelta64(1, 'D'))\n ).astype(np.int).min()\n\n data = list()\n dates0, options0 = get_options_by_cycle_strike(\n symbol=symbol,\n name='CALL',\n dates0=df_signal['date0'],\n dte=holding,\n moneyness=moneyness,\n cycle=cycle,\n strike=strike\n )\n\n for date0, (index, signal) in zip(dates0, df_signal.iterrows()):\n date1 = signal['date1']\n\n if date0:\n option0 = options0.get(date=date0)\n\n option1 = None\n if option0 and option0.bid > 0:\n date1, option1 = get_option_by_contract_date(option0.contract, date1)\n\n if option0 and option1:\n stock0 = tb_closes[option0.date.strftime('%Y-%m-%d')]\n close0 = stock0 - np.float(option0.bid)\n\n ask1 = 0\n if int(expire):\n ask1 = np.float(\n tb_closes[option1.date.strftime('%Y-%m-%d')]\n - np.float(option0.contract.strike)\n )\n ask1 = ask1 if ask1 > 0 else 0.0\n\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(ask1)\n else:\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(option1.ask)\n\n data.append({\n 'date0': option0.date,\n 'date1': date1,\n 'signal0': 'BUY',\n 'signal1': 'SELL',\n 'stock0': stock0,\n 'stock1': stock1,\n 'option0': option0.bid,\n 'option1': ask1 if expire else option1.ask,\n 'close0': np.round(close0, 2), # buy using ask\n 'close1': np.round(close1, 2), # sell using bid\n 'option_code': option0.contract.option_code,\n 'strike': np.float(option0.contract.strike),\n 'dte0': np.int(option0.dte),\n 'dte1': np.int(option1.dte),\n 'intrinsic0': np.float(option0.intrinsic),\n 'intrinsic1': np.float(option1.intrinsic)\n })\n\n df = DataFrame()\n if len(data):\n df = DataFrame(data, columns=[\n 'date0', 'date1', 'signal0', 'signal1',\n 'stock0', 'stock1', 'option0', 'option1', 'close0', 'close1',\n 'option_code', 'strike', 'dte0', 'dte1',\n 'intrinsic0', 'intrinsic1'\n ])\n\n df['holding'] = df['date1'] - df['date0']\n df['pct_chg'] = np.round((df['close1'] - df['close0']) / df['close0'], 2)\n\n f = lambda x: np.round(x['pct_chg'] * -1 if x['signal0'] == 'SELL' else x['pct_chg'], 2)\n df['pct_chg'] = df.apply(f, axis=1)\n\n df['sqm0'] = 100\n df['sqm1'] = -100\n df['oqm0'] = -1\n df['oqm1'] = 1\n\n return df", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def att_neg_reward(state, election_results, electoral_votes, attack_list):\n return -538/51", "def reset(self, context):\n self.context = context\n self.min_order_size = 1e-4\n self.max_order_size = 0\n self.max_position_held = 0\n return self", "async def daily(self, ctx):\r\n # TODO: Asssess whether this can be cleaned up. \r\n # As it stands, very similar to inv()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, s.purchase_price, close.close, s.quantity*close.close - s.quantity*s.purchase_price ]) \r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n inv_df['sign'] = np.where(inv_df['Current Value']>=0, '+', '-')\r\n inv_df['%'] = abs(((inv_df['Close'] - inv_df['Purchase Price']) / inv_df['Purchase Price']) * 100)\r\n inv_df['%'] = inv_df['%'].round(1)\r\n inv_df = inv_df.sort_values(['Symbol'])\r\n inv_df = inv_df[['sign', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value']]\r\n aggregated = tabulate(inv_df.values.tolist(), headers=['Δ', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n await ctx.send(f'```diff\\n{aggregated}```')", "def current_portfolio_weights(self) -> 'pd.Series[float]':\n position_values = pd.Series({\n asset: (\n position.last_sale_price *\n position.amount *\n asset.price_multiplier\n )\n for asset, position in self.positions.items()\n }, dtype=\"float64\")\n return position_values / self.portfolio_value", "def get_beta(self,df,tick,ind):\n cov = get_cov(df,tick,ind)\n var = df[ind].var()\n beta = cov / var\n return beta", "def getIdealSec(context, data): #This replaced before_trading_start(context, data)\n record(Leverage = \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t context.account.leverage,pos=len(context.portfolio.positions))\n context.output = pipeline_output('my_pipeline')\n #print('Pipeout: ')\n #print(context.output)\n \n # These are the securities that we are interested in trading each day.\n # Note: As it stands, the securities in this list are from two different industries (defense and\n # consumer electronics). Although more computationally expensive then dividing them out into their \n # two respective industries prior to cross correlating, leaving them in the same matrix/data set and \n # cross correlating them gives us a way to 'check' that the crosscorrelation is valid, since securities within the same industry should typically cross correlate to a higher degree than across industries. ***\n context.security_list = context.output.index \n context.defenseList = context.output[context.output['defenseFilt']].index.tolist()\n #print(context.defenseList)\n context.autoList = context.output[context.output['autoFilt']].index.tolist()\n #print(context.autoList)\n context.chemList = context.output[context.output['chemFilt']].index.tolist()\n #print(context.chemList)\n context.techList = context.output[context.output['techFilt']].index.tolist()\n #print(context.techList)\n context.depList = context.output[context.output['depFilt']].index.tolist()\n # Within each sector, calculate the mean (and max, since we may choose only to trade the maximally correlated securities regardless of industry) crosscorrelation between all combinations of stocks. \n #This will only run every trading day to prevent computational expense. In that \n #respect, performs identically to a pipeline add-on (but allows the use of \"history\") \n #Try block here incase pipe returns no valid securities. \n try:\n \tprice_history = np.transpose(data.history(context.security_list, fields=\"price\", bar_count=context.lookback,frequency=\"1m\"))\n \tprice_history=price_history.as_matrix()\n except:\n price_history=[[0],[0],[0]]\n #This returns three arrays, containing a filtered set of maximally cross correlated securities within the last time range (given by context.lookback), their associated (and filtered) time delays corresponding to their maximum correlation, and the degree of their correlation in the given time frame. Essentially, since tau has already been filtered for, the degree of their correlation should be used as a confidence feature to make predictions off of, and tau should be used to determine when to make purchases/sales. \n #hCorrVals,maxSecs,timeDelays,short_timeDelays=crossCorr(context.security_list,price_history,context)\n #The best securities to trade using this algorithm (each day) are listed in the below lists ***\n try:\n \thCorrVals,maxSecs,timeDelays,short_timeDelays=crossCorr(context.security_list,price_history,context) \n except: \n print('Crosscorr Failed')\n maxSecs,hCorrVals,timeDelays,short_timeDelays=[],[],[],[]\n #\"Globalize\" the returned information so that we can handle these commodities every minute. \n context.Securities=maxSecs\n context.CorrVals=hCorrVals\n context.timeDelays=short_timeDelays #************Used to be timeDelays, now however, we calculate a more recent tau\n context.actionList,context.timerList,context.tradeList,context.tradingNow=[0]*len(context.Securities),[0]*len(context.Securities),[0]*len(context.Securities),[0]*len(context.Securities) #list of zeros indicating that no stocks should currently be trading\n #(Note that all stocks should be sold at end of every tradinng day.) ", "def index():\n # Establish userID.\n userID = session[\"user_id\"]\n # Isolate all results from portfolio table for the current user.\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id=:userID\", userID=session[\"user_id\"])\n # Cash for current user (first row, cash column)\n cash = db.execute(\"SELECT cash FROM users WHERE id=:userID\", userID=userID)[0][\"cash\"]\n # Empty list to store stock data as iterating through rows.\n stockData = []\n # Set total for combined stoc value to 0.\n totalAllStocks = 0\n\n # Iterate over rows from portfolio and allocate a row for each stock that has more than 0 owned.\n for row in portfolio:\n if row[\"numOwned\"] != 0:\n stockData.append(row)\n\n # Iterate over rows in stock data and provide value for each column. Other values for use in html are already in list from previous loop.\n # Had to play around with usd, once in usd is a str rather than float so usd always has to be post calculations.\n for row in stockData:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"currentPrice\"] = usd(stock[\"price\"])\n row[\"total\"] = usd(row[\"numOwned\"] * stock[\"price\"])\n totalAllStocks += row[\"numOwned\"] * stock[\"price\"]\n # Grand Total is combined stock values and cash value.\n grandTotal = totalAllStocks + cash\n # Return index.html input sources.\n return render_template(\"index.html\", stockData=stockData, cash=usd(cash), totalAllStocks = usd(totalAllStocks), grandTotal=usd(grandTotal))", "def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE", "def index():\n#Get the current data of the stock.\n\n #SUM all similar stock values from Portfolio.\n ports = db.execute(\"SELECT *, SUM(quantity) as sharetotal FROM portfolio WHERE id = :id GROUP BY symbol\", id=session[\"user_id\"])\n\n #Get the remaining cash of the user from the users table.\n get_cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session['user_id'])\n\n #Convert the get_cash dict to float so it can be displayed to index.html\n remaining_cash = get_cash[0]['cash']\n\n #SUM the stocks' total value plus the remaining cash.\n get_grand_total = db.execute(\"SELECT *, SUM(total) as grand_total FROM portfolio where id = :id\", id=session[\"user_id\"])\n grand_total_fl = get_grand_total[0]['grand_total']\n\n\n\n #Hold value is the sum of the shares * price of each shares in the portfolios PLUS the remaining cash.\n if grand_total_fl != None:\n hold_value = grand_total_fl + remaining_cash\n #Update hte current hold value of the user\n db.execute(\"UPDATE users SET hold_value = :hold_value WHERE id = :id\", id=session[\"user_id\"], hold_value=hold_value)\n else:\n hold_value = remaining_cash\n\n\n #Query for the symbol in the database for the specific user.\n rows = db.execute(\"SELECT symbol, stock_price FROM portfolio WHERE id = :id GROUP by symbol\", id=session[\"user_id\"])\n\n #Initiate a list for all the open prices of stocks of a certain user.\n price_open = []\n num_stocks = []\n symbol_list = []\n avg_open_list = []\n profit_loss_list = []\n price_today_list = []\n\n\n for i in range(len(rows)):\n print(rows[i]['symbol'])\n symbol = rows[i]['symbol']\n open_price = rows[i]['stock_price']\n print(rows[i]['stock_price'])\n stock = lookup(rows[i]['symbol'])\n price_today = stock['price']\n\n #Insert data into the price_open list\n price_open.insert(i, open_price)\n\n #Count the number of stocks in posession\n share_total = ports[i]['sharetotal']\n\n #Insert data into the num_stocks list\n num_stocks.insert(i, share_total)\n\n #Insert data into the symbol_list list\n symbol_list.insert(i, symbol)\n\n #Insert data into the price_today_list\n price_today_list.insert(i, price_today)\n\n #Compute for the average open price of all stocks of a certain user.\n total_price = ports[i]['total']\n avg_open = total_price/share_total\n avg_open_list.insert(i, avg_open)\n\n profit_loss = ((price_today - avg_open)/avg_open)*100\n\n profit_loss_list.insert(i, (profit_loss))\n\n\n db.execute(\"UPDATE portfolio SET price_today = :price_today, profit_loss = :profit_loss, avg_open = :avg_open WHERE symbol = :symbol AND id = :id\", price_today=price_today, symbol=symbol,profit_loss=profit_loss, avg_open=avg_open, id=session[\"user_id\"])\n\n\n print(\"The symbols are:\", symbol_list)\n print(\"The quantity are: \", num_stocks)\n print(\"The open prices are: \", price_open)\n print(\"The average open prices are: \", avg_open_list)\n print(\"The prices today are: \", price_today_list)\n print(\"The profit and loss are: \", profit_loss_list)\n\n return render_template(\"index.html\", ports=ports, remaining_cash = remaining_cash, hold_value=hold_value,)", "def test_withdraw_amount_view_with_negative_amount(self):\n self.account.current_balance = 100000\n self.account.save()\n\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': -100}, format='json')\n self.assertEqual(400, request.status_code)", "def skip(self):\n if self._energy < self._be_cost:\n return\n\n self._energy = self._energy - self._be_cost\n self._env.simulate()", "def BacktestStrategy2(start_cond_dict, df, stock_exchange, invt_dict):\n total_days=df.shape[0]\n today_invt_dict=invt_dict\n invt_daily_list=[] # invt after today's transaction\n net_wealth_list=[]\n recent_max=0 # recent max = 전고점 가격\n for i in range(total_days):\n if i==0: # 첫날은 일단 풀매수\n recent_max=stock_exchange.GetDayHighestPrice(i)\n today_invt_dict=stock_exchange.FullBuyStocks(today_invt_dict, i)\n else: # 다른날은 전부 전략대로 수행\n recent_max=max(recent_max, stock_exchange.GetDayHighestPrice(i-1)) # 전고점 갱신 확인\n # 만약 어제 종가가 전고점*threshold 미만이라면: 풀매도 \n if (stock_exchange.GetDayClosePrice(i-1) < \n (start_cond_dict['sell_threshold_percent']/100)*recent_max):\n today_invt_dict=stock_exchange.FullSellStocks(today_invt_dict, i)\n # 매도조건을 만족 안 시킨 상황에서 n개월 모멘텀이 (+)면: 풀매수 -- n개월이 안지났으면 스킵\n elif (i > start_cond_dict['buy_momentum_days'] and \n stock_exchange.GetDayHighestPrice(i-start_cond_dict['buy_momentum_days']) <\n stock_exchange.GetDayOpenPrice(i)):\n today_invt_dict=stock_exchange.FullBuyStocks(today_invt_dict, i)\n # 나머지 상황에선 포지션 홀드\n else:\n pass\n invt_daily_list.append(today_invt_dict)\n #print(today_invt_dict) # for debug :)\n net_wealth_list.append(stock_exchange.EstimateNetWealth(today_invt_dict, i))\n \n PrintResult(\"Experimental Strategy\", net_wealth_list)\n plt.plot(net_wealth_list)\n plt.title(\"Experimental Strategy\")\n plt.ylabel('Net Worth in USD') # Cash + Stock worth\n plt.show()", "def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2", "def generate_orders(self, good):\n surplus = self.inventory.surplus(good)\n if surplus >= 1: # sell inventory\n # the original only old one item here\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n # print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))\n self.market.sell(order)\n else: # buy more\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n\n if shortage > 0:\n if shortage <= free_space:\n # enough space for ideal order\n limit = shortage\n else:\n # not enough space for ideal order\n limit = math.floor(free_space / shortage)\n\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n # print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))\n self.market.buy(order)\n # else:\n # print(\"{} has no shortage of {} (has shortage: {})\".format(self.pop_job.title, good.title, shortage))", "def discard_index_from_word(self,word,index):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n if word in self.word_dict:\r\n\r\n self.word_dict[word].discard(str(index))\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname,word,str(index),)\r\n db_cursor.execute(\"DELETE FROM word_to_indexes \"\r\n +\"WHERE notebook=? AND word=? \"\r\n +\"AND note_index=?;\",\r\n value_tuple)\r\n\r\n db_cursor.execute(\"SELECT * FROM word_to_indexes\"\r\n +\" WHERE notebook=? and word=?;\",\r\n value_tuple[0:2])\r\n if db_cursor.fetchone():\r\n db_cursor.execute(\"DELETE FROM all_words\"\r\n +\" WHERE notebook=?\"\r\n +\" AND word=?;\",\r\n value_tuple[0:2])", "def exit(self, profit=0, loss=0, trail_offset=0):\n self.exit_order = {'profit': profit, 'loss': loss, 'trail_offset': trail_offset}", "def generate_naive_order(self, signal):\n order = None\n \n symbol = signal.symbol\n direction = signal.signal_type\n strength = signal.strength\n \n mkt_quantity = 100\n cur_quantity = self.current_positions[symbol]\n order_type = 'MKT'\n \n if direction == 'LONG' and cur_quantity == 0:\n order = OrderEvent(symbol, order_type, mkt_quantity, 'BUY')\n if direction == 'SHORT' and cur_quantity == 0:\n order = OrderEvent(symbol, order_type, abs(cur_quantity), 'SELL')\n \n if direction == 'EXIT' and cur_quantity > 0:\n order = OrderEvent(symbol, order_type, abs(cur_quantity), 'SELL')\n if direction == 'EXIT' and cur_quantity < 0:\n order = OrderEvent(symbol, order_type, abs(cur_quantity), 'BUY')\n\n # print(symbol, order_type)\n # print(mkt_quantity, cur_quantity)\n return order", "def _calculate_order(self, world: World) -> float:\n raise NotImplementedError()", "def _abandon(self, option, positions, exDate):\n settleDate = _getSettleDate(option.instrument, exDate)\n for pos in positions:\n posTrades, orgTrades = (pos[0], pos[1])\n multiCurr = len(posTrades) > 1\n for trade in posTrades:\n if not trade.Quantity():\n continue\n\n link = _getLinkedTrade(trade, orgTrades, multiCurr)\n abandoned = _createClosingTrade(trade, link, exDate,\n settleDate)\n self.add_trade(abandoned)", "def index():\n\n rows = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n users = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n cash = users[0][\"cash\"]\n total = 0\n\n for row in rows:\n symbol = row[\"symbol\"]\n shares = row[\"shares\"]\n stock = lookup(symbol)\n price_t = float(stock[\"price\"]) * shares\n db.execute(\"UPDATE portfolio SET price=:price WHERE id=:id AND symbol=:symbol\",\n price=float(stock[\"price\"]), id=session[\"user_id\"], symbol=row[\"symbol\"])\n total += price_t\n\n TOTAL = total + cash\n return render_template(\"index.html\", rows=rows, cash=usd(cash), TOTAL=usd(TOTAL))", "def _correct_back_adjusted_prices(self, price_df):\n final_adj_close = price_df.iloc[-1]['Adj Close']\n if final_adj_close > 0.0:\n final_close = price_df.iloc[-1]['Close']\n if not np.allclose(final_close, final_adj_close):\n adj_factor = final_close / final_adj_close\n price_df['Adj Close'] *= adj_factor", "def index():\n stocks = db.execute(\"SELECT Symbol, Company, SUM(NumberOfShares) AS Shares, UnitPrice, SUM(TotalPrice) AS TotalPrice FROM \"\n \"portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n\n symbol = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid\", userid=session.get(\"user_id\"))\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n\n balance = cash[0][\"cash\"]\n grandTotal = 0\n for stock in stocks:\n grandTotal = grandTotal + stock[\"TotalPrice\"]\n\n grandTotal = grandTotal + balance\n\n return render_template(\"index.html\", stockList=stocks, cash=balance, totalAssets=grandTotal, currentUser=session.get(\"user_id\"))", "def ShipOrder(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def onCancelOrder(self, item):\n self.frame.mode.cancelIndustryOrder(self.lstOrders.getMultiSelectedItems(), self.mySystemDict['id'])", "def generate_naive_order(self, signal):\n order = None\n\n symbol = signal.symbol\n datetime = signal.datetime\n direction = signal.signal_type\n strength = signal.strength\n price = signal.price\n\n lmt_quantity = 100\n cur_quantity = self.current_positions[symbol]\n order_type = 'LMT'\n\n if direction == 'LONG':\n order = OrderEvent(symbol, datetime, order_type,\n lmt_quantity, price, 'BUY')\n if direction == 'EXIT':\n if cur_quantity < 100:\n print 'Current quantity: %s is smaller than 100.' \\\n % cur_quantity\n raise KeyError\n else:\n order = OrderEvent(symbol, datetime, order_type,\n cur_quantity, price, 'SELL')\n\n return order", "def order(self, request):\n is_auth = request.session.get(\"is_auth\", False)\n if not is_auth:\n return HttpResponseRedirect('/crisis')\n\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_auth\": is_auth,\n \"is_daily\": request.session.get(\"is_daily\", False),\n \"is_leader\": request.session.get(\"is_leader\", False),\n \"entity_list\": ENTITY,\n \"parts_list\": PARTS,\n \"detail_info_list\": DETAIL_INFO})\n\n if \"priority\" not in context:\n priority = {}\n for item in ARMY:\n priority.update({item: 1})\n context.update({\"priority\": priority})\n\n if context.get(\"is_run\", False):\n context.update({\"left_time\": self.utils.get_remaining_time(uid),\n \"order\": self.utils.get_current_unit_order(uid)})\n \"\"\" Context Example\n context = {\"username\": self.utils.get_user_name(uid),\n \"is_run\": False,\n \"is_auth\": is_auth,\n \"resource\": {\"money\": 100, \"food\": 200, \"fuel\": 300},\n \"entity\": {\"armor_composite\": 1, \"armor_plate\": 2, \"control_block\": 3,\n \"gun_receiver\": 4, \"kevlar_fiber\": 5, \"laser_aimer\": 6,\n \"powder_charge\": 7, \"rare_item\": 8, \"tnt_charge\": 9},\n \"parts\": {\"artillery_armor\": 1, \"artillery_chassis\": 2, \"artillery_shell\": 3, \"detonator\": 4,\n \"gunner_armor\": 5, \"gunner_gun\": 6, \"jeep_armor\": 7, \"jeep_gun\": 8, \"sniper_armor\": 9,\n \"sniper_gun\": 10, \"soldier_gun\": 11, \"tank_chassis\": 12, \"thrower_armor\": 13,\n \"thrower_gun\": 14, \"wave_emitter\": 15},\n 'order': {'soldier': 1, 'thrower': 4, 'artillery': 8, 'gunner': 2, 'base_artillery': 7, 'jeep': 6, 'artillery_emp': 9, 'base_tank': 5, 'artillery_cassete': 0, 'sniper': 3}\n }\n \"\"\"\n\n if request.method == \"POST\":\n if \"start\" in request.POST:\n order, priority = {}, {}\n data = dict(request.POST)\n\n for item in ARMY:\n try:\n count = int(data.get(item, [''])[0])\n except:\n count = 0\n try:\n prior = int(data.get(\"%s_priority\" % item, [''])[0])\n except:\n prior = 1\n order.update({item: count})\n priority.update({item: prior})\n\n context.update({\"is_run\": True,\n \"order\": order,\n \"priority\": priority,\n \"left_time\": self.utils.get_remaining_time(uid)})\n\n RUNNING_INFO.update({uid: context})\n self.utils.start_gather(uid, context)\n elif \"stop\" in request.POST:\n uid = request.session['uid']\n context = RUNNING_INFO.get(uid, {})\n context.update({\"is_run\": False, \"left_time\": \"00:00:00\"})\n RUNNING_INFO.update({uid: context})\n self.utils.stop_gather(uid)\n\n return render_to_response(\"crisis/order.html\",\n context,\n context_instance=RequestContext(request))", "def calculate(index):\n postingl1 = [len(x[0]) for x in index[0].values()]\n print(\"Number of terms in index i1 : \" + str(len(postingl1)) + \"\\t\")\n print(\"Maximum Length of Postings List : \" + str(max(postingl1)) + \"\\t\")\n print(\"Minimum Length of Postings List : \" + str(min(postingl1)) + \"\\t\")\n print(\"Average Length of Postings List : \" + str(sum(postingl1) / float(len(postingl1))) + \"\\t\")\n print(\"Size of the file that stores the index i1 : \" + \\\n str(os.stat('invertedindex1.txt').st_size) + \" bytes\\n\")\n\n postingl2 = [len(x[0]) for x in index[1].values()]\n print(\"Number of terms in index i2 : \" + str(len(postingl2)) + \"\\t\")\n print(\"Maximum Length of Postings List : \" + str(max(postingl2)) + \"\\t\")\n print(\"Minimum Length of Postings List : \" + str(min(postingl2)) + \"\\t\")\n print(\"Average Length of Postings List : \" + str(sum(postingl2) / float(len(postingl2))) + \"\\t\")\n print(\"Size of the file that stores the index i2 : \" + \\\n str(os.stat('invertedindex2.txt').st_size) + \" bytes\\n\")\n\n postingl3 = [len(x[0]) for x in index[2].values()]\n print(\"Number of terms in index i3 : \" + str(len(postingl3)) + \"\\t\")\n print(\"Maximum Length of Postings List : \" + str(max(postingl3)) + \"\\t\")\n print(\"Minimum Length of Postings List : \" + str(min(postingl3)) + \"\\t\")\n print(\"Average Length of Postings List : \" + str(sum(postingl3) / float(len(postingl3))) + \"\\t\")\n print(\"Size of the file that stores the index i3 : \" + \\\n str(os.stat('invertedindex3.txt').st_size) + \" bytes\\n\")\n\n postingl4 = [len(x[0]) for x in index[3].values()]\n print(\"Number of terms in index i4 : \" + str(len(postingl4)) + \"\\t\")\n print(\"Maximum Length of Postings List : \" + str(max(postingl4)) + \"\\t\")\n print(\"Minimum Length of Postings List : \" + str(min(postingl4)) + \"\\t\")\n print(\"Average Length of Postings List : \" + str(sum(postingl4) / float(len(postingl4))) + \"\\t\")\n print(\"Size of the file that stores the index i4 : \" + \\\n str(os.stat('invertedindex4.txt').st_size) + \" bytes\\n\")", "def bar(expression, index, return_dict):\n return_dict[index] = factor(expression)\n print(index)", "def apply_tax(order_obj):\n tax_rule = taxes.get()\n all_credits = order_obj.credits\n other_credit = filter(lambda x: x[\"coll_name\"] != taxes.TaxRule.coll_name(), all_credits)\n\n if tax_rule is not None:\n order_obj.credits = other_credit + [{\n \"obj_id\": tax_rule._id,\n \"coll_name\": taxes.TaxRule.coll_name(),\n \"amount\": taxes.amount(tax_rule, order_obj),\n }]\n else:\n order_obj.credits = other_credit", "def index():\n inventory = db.execute(\"SELECT symbol,quantity FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n cash = float(db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])[0][\"cash\"])\n total = cash\n for i in inventory:\n stock = lookup(i[\"symbol\"])\n i[\"price\"] = stock[\"price\"]\n i[\"name\"] = stock[\"name\"]\n i[\"total\"] = usd(stock[\"price\"] * i[\"quantity\"])\n total += stock[\"price\"] * i[\"quantity\"]\n return render_template(\"index.html\", context={\"inventory\":inventory,\"total\":usd(total),\"cash\":usd(cash)})", "def ConcludeTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def getWeightValue(self, index):\r\n\t\treturn None" ]
[ "0.5514604", "0.5256099", "0.5154788", "0.50738245", "0.50134844", "0.5007411", "0.4994111", "0.49798325", "0.4962537", "0.4952547", "0.4933376", "0.49214765", "0.49075228", "0.49000627", "0.4870517", "0.48602587", "0.4830687", "0.48236924", "0.47850198", "0.47702926", "0.47414806", "0.46885774", "0.46875164", "0.4687351", "0.4671185", "0.464341", "0.4642237", "0.46415833", "0.46382055", "0.46232736", "0.46226308", "0.4613512", "0.46129355", "0.46068817", "0.45978346", "0.45925376", "0.45834267", "0.4564257", "0.45582235", "0.45512596", "0.4536819", "0.4525354", "0.45207182", "0.45027122", "0.45020428", "0.44983563", "0.4497533", "0.44940844", "0.44934687", "0.44868323", "0.4464893", "0.44642344", "0.4463309", "0.4458694", "0.44558635", "0.4441947", "0.44406405", "0.44395667", "0.44393253", "0.4431351", "0.44299054", "0.44299054", "0.4429901", "0.4424179", "0.44238552", "0.4423058", "0.4419122", "0.44151023", "0.44084972", "0.440371", "0.43989542", "0.43985638", "0.43956566", "0.43879184", "0.438495", "0.43848127", "0.43823165", "0.43807346", "0.43790522", "0.43772352", "0.43734217", "0.43718308", "0.43692833", "0.43632567", "0.4357587", "0.43569538", "0.43563986", "0.435345", "0.43487567", "0.43485385", "0.43482867", "0.43467882", "0.43378913", "0.43330932", "0.43323684", "0.43308267", "0.43293402", "0.4328958", "0.4328879", "0.43266353" ]
0.55305976
0
returns a dataframe of 'alpha' and 'beta' exposures for each asset in the current universe.
def get_alphas_and_betas(context, data): all_assets = context.portfolio.positions.keys() if context.index not in all_assets: all_assets.append(context.index) prices = data.history(all_assets, 'price', context.lookback, '1d') returns = prices.pct_change()[1:] # index_returns = returns[context.index] factors = {} for asset in context.portfolio.positions: try: y = returns[asset] factors[asset] = linreg(returns[context.index], y) except: log.warn("[Failed Beta Calculation] asset = %s" % asset.symbol) return pd.DataFrame(factors, index=['alpha', 'beta'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_alphas(portfolio_returns,risk_free,market_returns,betas):\r\n \r\n R = portfolio_returns\r\n Rf = risk_free\r\n Beta = betas\r\n Rm = market_returns\r\n alpha = R - Rf - (Beta*(Rm-Rf))\r\n \r\n return alpha", "def transparency(\n et: pd.DataFrame, alpha_by: Hashable, alpha_bounds: Optional[Tuple] = None\n) -> pd.Series:\n if alpha_by is not None:\n ref_data = et[alpha_by]\n if isinstance(alpha_bounds, tuple):\n ref_data = pd.Series(alpha_bounds)\n return encodings.data_transparency(et[alpha_by], ref_data)\n return pd.Series([0.1] * len(et), name=\"alpha\")", "def create_beta_posteriors(df):\n goods = df.num_matured - df.fpd\n df['alpha_p'] = df.alpha + df.fpd\n df['beta_p'] = df.beta + goods\n return df", "def generate_features(self):\n bars = self.portfolio.data_handler.bars.ix[:, -15:, :]\n prices = bars[\"adj_price_close\"]\n weights = np.array([1.0, -1.])\n feats = pd.DataFrame(index=bars.minor_axis)\n ts = prices.dot(weights)\n feats[\"z-score\"] = (ts.ix[-1] - ts.mean()) / ts.std()\n return feats", "def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all", "def _calculate_data_quantiles(\n self, df: pd.DataFrame, alpha: List[float], legacy_interface=False\n ) -> pd.DataFrame:\n var_names = self._get_varnames(\n default=\"Quantiles\", legacy_interface=legacy_interface\n )\n var_name = var_names[0]\n\n index = pd.MultiIndex.from_product([var_names, alpha])\n pred_quantiles = pd.DataFrame(columns=index)\n for a in alpha:\n quant_a = df.groupby(level=-1, as_index=True).quantile(a)\n pred_quantiles[[(var_name, a)]] = quant_a\n\n return pred_quantiles", "def get_full_df(self):\n\n galaxies = []\n for i, gal_name in enumerate(self.filenames):\n g_df = self.galaxies[gal_name].all_particle_properties(\n ).to_pandas()\n g_df['name'] = self.names[i]\n g_df['snap'] = self.snaps[i]\n galaxies.append(g_df)\n return pd.concat(galaxies)", "def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df", "def get_assets_data_frames(assets: list, asset_function: list, country: str, start_date: str, end_date: str) -> list:\r\n\r\n data_frames = []\r\n\r\n for asset in assets:\r\n\r\n data_frame = asset_function(asset,\r\n country=country,\r\n from_date=start_date,\r\n to_date=end_date)\r\n\r\n data_frames.append(data_frame)\r\n\r\n return data_frames", "def beta_and_alpha(self):\n # make scatter plot\n sp_temp = self.daily_returns(self.sp.rename(columns={'Adj Close': '^GSPC'}))\n symbol_temp = self.daily_returns(self.daily.rename(columns={'Adj Close': self.symbol}))\n joined = sp_temp.merge(symbol_temp, on='Date')\n\n # beta and alpha\n beta, alpha = np.polyfit(joined[\"^GSPC\"], joined[self.symbol], 1)\n beta = round(beta, 3)\n alpha = round(alpha, 5)\n if alpha > 0:\n self.buys += 1\n self.debug += '\\nAlpha > 0: buys + {}'.format(alpha)\n else:\n self.debug += '\\nAlpha < 0: {}'.format(alpha)\n\n # assuming favorable market conditions. else, it would be sells + 1.\n if beta > 1:\n self.buys += 1\n self.debug += '\\nBeta > 1: buys + {}'.format(beta)\n else:\n self.debug += '\\nBeta < 1: {}'.format(beta)\n\n # finish plotting scatter\n if self.will_plot:\n ax = joined.plot(title=self.symbol + ' vs The Market', kind = 'scatter', x='^GSPC', y=self.symbol)\n ax.set_xlabel(\"S&P 500\")\n plt.plot(joined[\"^GSPC\"], beta * joined['^GSPC'] + alpha, '-', color='r', label='Correlation')\n\n # plot expected beta (slope) of 1 and alpha (y- int.) of zero\n plt.plot(joined[\"^GSPC\"], 1 * joined['^GSPC'] + 0, '-', color='gray', label='Beta of 1')\n plt.plot(joined[\"^GSPC\"], 0 * joined['^GSPC'] + 0, '-', color='gray', label='Alpha of 0')\n plt.legend(loc='best')", "def to_abivars(self):\n abivars = dict(\n bs_calctype=1,\n bs_loband=self.bs_loband,\n #nband=self.nband,\n mbpt_sciss=self.mbpt_sciss,\n ecuteps=self.ecuteps,\n bs_algorithm=self._ALGO2VAR[self.algo],\n bs_coulomb_term=21,\n mdf_epsinf=self.mdf_epsinf,\n bs_exchange_term=1 if self.with_lf else 0,\n inclvkb=self.inclvkb,\n zcut=self.zcut,\n bs_freq_mesh=self.bs_freq_mesh,\n bs_coupling=self._EXC_TYPES[self.exc_type],\n optdriver=self.optdriver,\n )\n\n if self.use_haydock:\n # FIXME\n abivars.update(\n bs_haydock_niter=100, # No. of iterations for Haydock\n bs_hayd_term=0, # No terminator\n bs_haydock_tol=[0.05, 0], # Stopping criteria\n )\n\n elif self.use_direct_diago:\n raise NotImplementedError(\"\")\n\n elif self.use_cg:\n raise NotImplementedError(\"\")\n\n else:\n raise ValueError(\"Unknown algorithm for EXC: %s\" % self.algo)\n\n # Add extra kwargs\n abivars.update(self.kwargs)\n\n return abivars", "def alpha(requestContext, seriesList, alpha):\n for series in seriesList:\n series.options['alpha'] = alpha\n return seriesList", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def hedge_portfolio(context, data):\r\n factors = get_alphas_and_betas(context, data)\r\n beta_exposure = 0.0\r\n count = 0\r\n for asset in context.portfolio.positions:\r\n if asset in factors and asset != context.index:\r\n if not np.isnan(factors[asset].beta):\r\n beta_exposure += factors[asset].beta\r\n count += 1\r\n beta_hedge = -1.0 * beta_exposure / count\r\n dollar_amount = context.portfolio.portfolio_value * beta_hedge\r\n record(beta_hedge=beta_hedge)\r\n if not np.isnan(dollar_amount):\r\n order_target_value(context.index, dollar_amount)", "def predict(self, alpha=0.05):\n assert 0 < alpha < 1\n predictions = self.predictions(model=self.model)\n if len(self.models) > 0:\n self.draws = np.vstack([\n self.predictions(model=mod) for mod in self.models\n ])\n # TODO: Make this work for n > 1 outcomes -- adjust axis\n return pd.DataFrame({\n 'mean': predictions[0],\n 'lower': np.quantile(self.draws, q=alpha/2, axis=0),\n 'upper': np.quantile(self.draws, q=1-alpha/2, axis=0)\n })\n else:\n return pd.DataFrame({\n 'mean': predictions[0]\n })", "def get_assets(self):\n findstr = r'W\\.iframeInit\\({\"assets\":(\\[.*\\])'\n try:\n page = str(requests.get(self.srcpage).content, 'utf-8')\n asset_search = re.search(findstr, page)\n if asset_search:\n assets = asset_search.group(1)\n try:\n assets = json.loads(assets)\n except ValueError:\n print(\"Error loading JSON string\")\n self.assets = pd.DataFrame(assets)\n return self.assets\n else:\n raise AssetNotFoundError\n except:\n print(\"Failed to get asset information from page.\\nCheck video ID.\")", "def get_assets(symbols: List[str], search_limit: int = 100) -> Tuple:\n # 1) Get the available assets up to the specified limit\n assets = get_available_assets(search_limit)\n\n # 2) Filter out the wanted assets\n try:\n filtered_assets = filter_by_symbol(assets, symbols)\n except Exception as e:\n raise e # We may be wanting to do something about that\n\n # 3) For every selected asset, return its dataframe\n to_return = []\n for asset in filtered_assets:\n time.sleep(5)\n series = get_series(asset['id'], 'd1') # With d1 as interval, we select daily prices\n to_return.append(Asset(asset['symbol'], series))\n return tuple(to_return)", "def vectorized_alpha(asset, strategies):\n up = asset['forward_returns'][asset['forward_returns'] > 0]\n down = asset['forward_returns'][asset['forward_returns'] < 0]\n bh_alpha = np.sum(up) / np.abs(np.sum(down))\n\n strat_returns = asset['forward_returns'][:, np.newaxis].T * strategies\n up = strat_returns * (strat_returns[:, ] > 0)\n down = strat_returns * (strat_returns[:, ] < 0)\n strat_alpha = np.sum(up, axis=1) / np.abs(np.sum(down, axis=1))\n\n _alpha = (strat_alpha / bh_alpha) - 1\n return _alpha", "def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)", "def __evalAlphas(self):\n #breit wheeler\n self.__alphaObjBW = alpha(self.getMomenta('bw'),self.__config)\n self.__alphaBW = [self.__alphaObjBW(index) for index in [1,2,3]]\n #compton\n self.__alphaObjC = alpha(self.getMomenta('c'),self.__config)\n self.__alphaC = [self.__alphaObjC(index) for index in [1,2,3]]\n #breit wheeler exchange\n self.__alphaObjBWx = alpha(self.getMomenta('bwx'),self.__config)\n self.__alphaBWx = [self.__alphaObjBWx(index) for index in [1,2,3]]\n #compton exchange\n self.__alphaObjCx = alpha(self.getMomenta('cx'),self.__config)\n self.__alphaCx = [self.__alphaObjCx(index) for index in [1,2,3]]\n self.__allAlphas = [self.__alphaBW,self.__alphaC,self.__alphaBWx,self.__alphaCx]", "def iter_beta_sheets(self):\n return iter(self.beta_sheet_list)", "def weighted_returns(self):\n r = self.asset_returns.fillna(0.0)\n return pd.DataFrame({a: r[a]*self.weights[a].dropna().shift(1).fillna(0.0) for a in self.assets})", "def _alpha_stats(self, trace):\n mean = np.mean(trace['alpha'])\n sd = np.std(trace['alpha'], ddof=1)\n zscore = mean / sd\n return mean, sd, zscore", "def load_multiple_assets(exchange_ids, assets, timeframe, start, end=None):\n df = pd.DataFrame()\n for ex_id in exchange_ids:\n for asset in assets:\n fpath = get_ohlcv_fpath(asset, ex_id, timeframe)\n if os.path.exists(fpath):\n data = load_asset(fpath, start, end)\n for col in data.columns:\n df[col] = data[col]\n else:\n print(\"Fpath does not exist: {:s}\".format(str(fpath)))\n # TODO: Is this okay? How to fill in missing values? How to handle them?\n # df.dropna(inplace=True)\n df['utc'] = [epoch_to_utc(t) for t in df.index]\n return df", "def _calculate_alpha(self, feats):\n \n init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)\n init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n\n forward_var = autograd.Variable(init_alphas)\n\n for feat in feats:\n alphas_t = [] # The forward variables at this timestep\n for next_tag in range(self.tagset_size):\n emit_score = feat[next_tag].view(\n 1, -1).expand(1, self.tagset_size)\n trans_score = self.transition[next_tag].view(1, -1)\n next_tag_var = forward_var + trans_score + emit_score\n alphas_t.append(log_sum_exp(next_tag_var))\n forward_var = torch.cat(alphas_t).view(1, -1)\n terminal_var = forward_var + self.transition[self.tag_to_ix[STOP_TAG]]\n alpha = log_sum_exp(terminal_var)\n return alpha", "def getInputData():\n\n # Get current allocations.\n current_alloc_dict = DataIO.getCurrentData('data/current_allocations.csv')\n\n # Get tickers and expense ratios.\n ticker_list, expense_ratio_dict = DataIO.getTickerList(\n 'data/tickers_expenses.csv')\n\n # Get raw data.\n raw_data = DataIO.getRawData(ticker_list)\n\n # Create all stock objects.\n stock_dict = {}\n for ticker in raw_data.keys():\n stock_dict[ticker] = Stock(\n raw_data[ticker], ticker, expense_ratio_dict[ticker])\n\n if not len(stock_dict.keys()):\n raise ValueError('No keys found.')\n\n # Create stock database.\n stock_db = StockDatabase(stock_dict)\n\n # Create current portfolio.\n current_portfolio = Portfolio(\n stock_db, percent_allocations_dict=current_alloc_dict)\n\n return current_portfolio, stock_db", "def ArXivEprints(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('arxiv_eprints', default)\n return [HEP.ArXivObject(i) for i in tmp]", "def _get_alpha_beta(self):\n alpha = tf.nn.softplus(self.alpha_prime)\n beta = -alpha + tf.nn.softplus(self.beta_prime)\n return alpha, beta", "def extract_exp_betas(molecule_etree):\n BETA_XPATH = 'hunterdb:ExperimentalProperties/hunterdb:Property[@hunterdb:name=\"beta_expt\"]'\n return molecule_etree.xpath(BETA_XPATH, namespaces=HUNTER_DB_NAMESPACE_DICT)", "def project(self, alpha):\n ax = alpha[0]\n ay = alpha[1]\n az = alpha[2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n return [ax, ay, az]", "def getSymbols(self):\n return self.alpha.getSymbols()", "def eta_grid( self ):\n return self._Vals", "def alpha_beta(returns, factor_returns):\n\n ret_index = returns.index\n beta, alpha = sp.sp.stats.linregress(factor_returns.loc[ret_index].values,\n returns.values)[:2]\n\n return alpha * APPROX_BDAYS_PER_YEAR, beta", "def iex_equities(symbols):\n # strict this in memory so that we can reiterate over it\n symbols = tuple(symbols)\n\n def ingest(environ,\n asset_db_writer,\n minute_bar_writer, # ignored\n daily_bar_writer,\n adjustment_writer,\n calendar,\n start_session, # ignored\n end_session, # ignored\n cache,\n show_progress,\n output_dir):\n\n metadata = pd.DataFrame(np.empty(len(symbols), dtype=[\n ('start_date', 'datetime64[ns]'),\n ('end_date', 'datetime64[ns]'),\n ('auto_close_date', 'datetime64[ns]'),\n ('symbol', 'object'),\n ]))\n\n today = datetime.today()\n start = datetime(today.year-5,today.month,today.day)\n \n def _pricing_iter():\n sid = 0\n with maybe_show_progress(\n symbols,\n show_progress,\n label='Downloading IEX pricing data: ') as it, \\\n requests.Session() as session:\n for symbol in it:\n path = _cachpath(symbol, 'ohlcv')\n try:\n df = cache[path]\n except KeyError:\n df = cache[path] = get_historical_data(symbol, start=start, end=None, output_format='pandas').sort_index()\n df.index = pd.to_datetime(df.index)\n # the start date is the date of the first trade and\n # the end date is the date of the last trade\n start_date = df.index[0]\n end_date = df.index[-1]\n # The auto_close date is the day after the last trade.\n ac_date = end_date + pd.Timedelta(days=1)\n metadata.iloc[sid] = start_date, end_date, ac_date, symbol\n\n df.rename(\n columns={\n 'Open': 'open',\n 'High': 'high',\n 'Low': 'low',\n 'Close': 'close',\n 'Volume': 'volume',\n },\n inplace=True,\n )\n yield sid, df\n sid += 1\n\n daily_bar_writer.write(_pricing_iter(), show_progress=True)\n\n metadata['exchange'] = \"NYSE\"\n \n symbol_map = pd.Series(metadata.symbol.index, metadata.symbol)\n asset_db_writer.write(equities=metadata)\n\n adjustment_writer.write()\n\n return ingest", "def iter_beta_sheets(self):\n if self.default_model:\n return self.default_model.iter_beta_sheets()\n return iter(list())", "def alpha_rf(port_returns, risk_free_rate, market_returns, b):\n\n # the portfolio Alpha is given by the below equation, as stated by the Capital Asset Pricing Model\n alpha = np.mean(port_returns) - risk_free_rate + b*(np.mean(market_returns) - risk_free_rate)\n\n return alpha", "def alpha_rf(port_returns, risk_free_rate, market_returns, b):\n\n # the portfolio Alpha is given by the below equation, as stated by the Capital Asset Pricing Model\n alpha = np.mean(port_returns) - risk_free_rate + b*(np.mean(market_returns) - risk_free_rate)\n\n return alpha", "def to_abivars(self):\n abivars = {\n \"ecuteps\" : self.ecuteps,\n \"ecutwfn\" : self.ecutwfn,\n \"inclvkb\" : self.inclvkb,\n \"gwpara\" : self.gwpara,\n \"awtr\" : self.awtr,\n \"symchi\" : self.symchi,\n \"nband\" : self.nband,\n #\"gwcalctyp\": self.gwcalctyp,\n #\"fftgw\" : self.fftgw,\n \"optdriver\" : self.optdriver,\n }\n\n # Variables for the Hilber transform.\n if self.use_hilbert:\n abivars.update(self.hilbert.to_abivars())\n\n return abivars", "def aga_expression_entropies(adata):\n from scipy.stats import entropy\n groups_order, groups_masks = utils.select_groups(adata, smp='aga_groups')\n entropies = []\n for mask in groups_masks:\n X_mask = adata.X[mask]\n x_median = np.median(X_mask, axis=0)\n x_probs = (x_median - np.min(x_median)) / (np.max(x_median) - np.min(x_median))\n entropies.append(entropy(x_probs))\n return entropies", "def get_all_labs():\n return Lab.query.all()", "async def get_active_exchange_markets(cls) -> pd.DataFrame:\n async with aiohttp.ClientSession() as client:\n\n trading_pairs_response = await client.get(ASSET_PAIRS_URL)\n trading_pairs_response: aiohttp.ClientResponse = trading_pairs_response\n\n if trading_pairs_response.status != 200:\n raise IOError(f\"Error fetching Kraken trading pairs. \"\n f\"HTTP status is {trading_pairs_response.status}.\")\n\n trading_pairs_data: Dict[str, Any] = await trading_pairs_response.json()\n trading_pairs_data[\"result\"] = {\n pair: details for pair, details in trading_pairs_data[\"result\"].items() if \".\" not in pair}\n\n wsname_dict: Dict[str, str] = {pair: details[\"wsname\"]\n for pair, details in trading_pairs_data[\"result\"].items()}\n trading_pairs: Dict[str, Any] = {pair: {\"baseAsset\": wsname_dict[pair].split(\"/\")[0],\n \"quoteAsset\": wsname_dict[pair].split(\"/\")[1],\n \"wsname\": wsname_dict[pair]}\n for pair in trading_pairs_data[\"result\"]}\n\n trading_pairs_str: str = ','.join(trading_pairs.keys())\n\n market_response = await client.get(f\"{TICKER_URL}?pair={trading_pairs_str}\")\n market_response: aiohttp.ClientResponse = market_response\n\n if market_response.status != 200:\n raise IOError(f\"Error fetching Kraken markets information. \"\n f\"HTTP status is {market_response.status}.\")\n\n market_data = await market_response.json()\n\n market_data: List[Dict[str, Any]] = [{\"pair\": pair, **market_data[\"result\"][pair], **trading_pairs[pair]}\n for pair in market_data[\"result\"]\n if pair in trading_pairs]\n\n # Build the data frame.\n all_markets: pd.DataFrame = pd.DataFrame.from_records(data=market_data, index=\"pair\")\n all_markets[\"lastPrice\"] = all_markets.c.map(lambda x: x[0]).astype(\"float\")\n all_markets.loc[:, \"volume\"] = all_markets.v.map(lambda x: x[1]).astype(\"float\")\n\n price_dict: Dict[str, float] = await cls.get_prices_from_df(all_markets)\n\n usd_volume: List[float] = [\n (\n baseVolume * price_dict[baseAsset] if baseAsset in price_dict else -1\n )\n for baseAsset, baseVolume in zip(all_markets.baseAsset,\n all_markets.volume)]\n all_markets.loc[:, \"USDVolume\"] = usd_volume\n\n return all_markets.sort_values(\"USDVolume\", ascending=False)", "def ema_fix():\n df_ones, df_decay = ema_test_data()\n return df_ones, df_decay", "def assets(self):\n from briefy.leica.models.asset import Asset\n query = Asset.query().filter(\n Asset.c.job_id.in_([a.id for a in self.assignments if a.state == 'approved']),\n )\n return query", "def history(self, assets, field='price', window=30, frequency='1d'):\n if hasattr(assets, '__iter__'):\n results = None\n columns = ['date']\n for symbol in assets:\n columns.append(symbol)\n if frequency == '1d':\n rows = self._get_history_daily(symbol, field, window)\n elif frequency == '1m':\n columns[0] = 'minute'\n rows = self._get_history_min(symbol, window)\n elif frequency == '30m':\n columns[0] = '30min'\n rows = self._get_history_30min(symbol, window)\n if results is None:\n results = map(list, rows)\n else:\n map(lambda x, y: x.append(y[1]), results, rows)\n if len(results) > window:\n results = results[-window:]\n df = pd.DataFrame(map(lambda x: x[1:], results), index=map(lambda x: x[0], results), columns=columns[1:])\n return df\n else:\n symbol = str(assets)\n if frequency == '1d':\n rows = self._get_history_daily(symbol, field, window)\n elif frequency == '1m':\n rows = self._get_history_min(symbol, window)\n elif frequency == '30m':\n rows = self._get_history_30min(symbol, window)\n if len(rows) > window:\n rows = rows[-window:]\n series = pd.Series(map(lambda x: x[1], rows), index=map(lambda x: x[0], rows))\n return series", "def get_df_emb(model, dataset, dataset_name='PIR'):\n\n df = pd.DataFrame()\n embeddings = []\n\n index = 0\n for batch_index in dataset:\n images, relevant, image_name = batch_index\n # _, embs = model(images) # classification\n embs = model(images) # ssl\n for emb, r, img_name in zip(embs, relevant, image_name):\n embeddings.append(list(emb.numpy()))\n df.loc[index, 'relevant'] = r.numpy().decode()\n img_name = img_name.numpy().decode()\n df.loc[index, 'path_name'] = img_name\n if dataset_name == 'PIR':\n df.loc[index, 'image_name'] = img_name\n elif dataset_name == 'copydays10k' or dataset_name == 'copydays10k-strong':\n df.loc[\n index,\n 'image_name'] = img_name if '_' not in img_name else img_name.split(\n '_')[1]\n else:\n raise NotImplementedError(\n f'Evaluation for dataset {dataset_name} not implemented')\n index += 1\n embeddings = np.array(embeddings)\n return df, embeddings", "def assets(self, short_name=False, quantity=False):\n table = Table(\n 3 + short_name + quantity,\n headers=(['Account']\n + (['Name'] if short_name else [])\n + (['Quantity'] if quantity else [])\n + ['Asset', 'Value']),\n coltypes=(['str']\n + (['str'] if short_name else [])\n + (['float'] if quantity else [])\n + ['str', 'dollars']))\n for account in self.accounts():\n for asset in account.assets():\n row = ([account.name()]\n + ([f'{asset.short_name()}'] if short_name else [])\n + [asset.name(), asset.adjusted_value()])\n if quantity:\n row.insert(1 + short_name, asset.shares()\n if hasattr(asset, 'shares') else None)\n table.add_row(row)\n return table", "def get_daily_historic_data(self, ticker, start_date, end_date):\n av_url = self._construct_alpha_vantage_symbol_call(ticker)\n\n try:\n av_data_js = requests.get(av_url)\n data = json.loads(av_data_js.text)['Time Series (Daily)']\n except Exception as e:\n print(\n \"Could not download AlphaVantage data for %s ticker \"\n \"(%s)...stopping.\" % (ticker, e)\n )\n return pd.DataFrame(columns=COLUMNS).set_index('Date')\n else:\n prices = []\n for date_str in sorted(data.keys()):\n date = dt.strptime(date_str, '%Y-%m-%d')\n if date < start_date or date > end_date:\n continue\n\n bar = data[date_str]\n prices.append(\n (\n date, \n float(bar['1. open']),\n float(bar['2. high']),\n float(bar['3. low']),\n float(bar['4. close']),\n int(bar['6. volume']),\n float(bar['5. adjusted close'])\n )\n )\n price_df = pd.DataFrame(prices, columns=COLUMNS).set_index('Date').sort_index()\n self._correct_back_adjusted_prices(price_df)\n return price_df", "def litBetaAlpha(inc,wave,m,d):\n psi = blazeYaw(inc,wave,m,d)\n beta1 = cos(inc)*cos(psi)\n alpha1 = cos(inc)*sin(psi)-m*wave/d\n return beta1,alpha1", "def alpha_composite(front, back):\n front = np.asarray(front)\n back = np.asarray(back)\n result = np.empty(front.shape, dtype='float')\n alpha = np.index_exp[:, :, 3:]\n rgb = np.index_exp[:, :, :3]\n falpha = front[alpha] / 255.0\n balpha = back[alpha] / 255.0\n result[alpha] = falpha + balpha * (1 - falpha)\n old_setting = np.seterr(invalid='ignore')\n result[rgb] = (front[rgb] * falpha + back[rgb] * balpha * (1 - falpha)) / result[alpha]\n np.seterr(**old_setting)\n result[alpha] *= 255\n np.clip(result, 0, 255)\n # astype('uint8') maps np.nan and np.inf to 0\n result = result.astype('uint8')\n result = Image.fromarray(result, 'RGBA')\n return result", "def rho02alpha(self, rho0, Rs, gamma_inner, gamma_outer):\n gx = self._g(1.0, gamma_inner, gamma_outer)\n alpha_Rs = rho0 * (4. * Rs ** 2 * gx / 1.0 ** 2)\n return alpha_Rs", "def getAlpha(self,mode='full'):\n if mode=='full':\n return self.__allAlphas\n else:\n return self.__allAlphas[modeDict[mode][2]]", "async def get_active_exchange_markets(cls) -> pd.DataFrame:\n async with aiohttp.ClientSession() as client:\n async with client.get(\"https://api.radarrelay.com/v2/markets?include=ticker,stats\") as response:\n response: aiohttp.ClientResponse = response\n if response.status != 200:\n raise IOError(f\"Error fetching active Radar Relay markets. HTTP status is {response.status}.\")\n data = await response.json()\n all_markets: pd.DataFrame = pd.DataFrame.from_records(data=data, index=\"id\")\n fetch_markets: pd.DataFrame = all_markets[\n lambda df: [FETCH_MARKET_SYMBOL_PATTERN.search(i) is not None for i in df.index]\n ]\n\n weth_dai_price: float = float(fetch_markets.loc[\"WETH-DAI\"][\"ticker\"][\"price\"])\n dai_volume: List[float] = []\n for row in fetch_markets.itertuples():\n product_name: str = row.Index\n base_volume: float = float(row.stats[\"volume24Hour\"])\n if product_name.endswith(\"WETH\"):\n dai_volume.append(weth_dai_price * base_volume)\n else:\n dai_volume.append(base_volume)\n fetch_markets.loc[:, \"DAIVolume\"] = dai_volume\n\n return fetch_markets.sort_values(\"DAIVolume\", ascending=False)", "def calc_alpha_beta(self, dow_jones):\n \n x = array(dow_jones.get_returns())\n y = array(self._returns)\n \n A = vstack([x, ones(len(x))]).T\n \n m, c = linalg.lstsq(A, y)[0]\n \n# pylab.plot(x, y, 'o', label='Daily returns', markersize=3)\n# pylab.plot(x, m*x + c, 'r', label='Fitted line')\n# pylab.ylabel(self._name)\n# pylab.xlabel(dow_jones.get_name())\n# pylab.legend()\n# pylab.show()\n \n self._beta = m\n self._alpha = c", "def all_experiments():\n elo_explain_experiments()\n alpha_beta_experiments()\n mtcs_experiments()", "def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression", "def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression", "def create_equity_curve_dataframe(self):\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n\n curve_symbols = curve[self.symbol_list]\n lists = ['buy_times', 'sell_times', 'hold', 'total_times',\n 'cash', 'commission', 'total']\n curve_lists = curve[lists]\n curve = pd.concat([curve_symbols, curve_lists], axis=1)\n\n curve['returns'] = curve['total'].pct_change()\n curve.loc[curve.index[0], 'returns'] = 0.0\n curve['equity_curve'] = (1.0 + curve['returns']).cumprod()\n self.equity_curve = curve\n\n positions = pd.DataFrame(self.all_positions)\n positions.set_index('datetime', inplace=True)\n self.positions = positions\n\n prices = pd.DataFrame(self.all_prices)\n prices.set_index('datetime', inplace=True)\n self.prices = prices", "def test_compute_alphas(self):\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], pool=False, randomize=False, plot=False, mp_fit=False, pl_package=WW_POWERLAW)\n\t\t#d = self.watcher.get_details(results=results)\n\t\ta = details.alpha.to_numpy()\n\t\tself.assertAlmostEqual(a[0],1.74859, places=3)\n\t\tself.assertAlmostEqual(a[1],1.66595, places=3)\n\t\tself.assertAlmostEqual(a[3],1.43459, places=3)", "def get_dataframe(self,tickers = None,variable = \"close\",normalization = True):\n if tickers is not None:\n companies = self[tickers]\n else:\n companies = self.data \n\n data = pd.concat([company.data[[variable]].rename(columns = {variable:company.ticker}) for company in companies],axis = 1)\n \n if normalization:\n data /= data.max(axis = 0)\n \n return data", "def get_experiment_data(experiment_names):\n\n snapshots_query = db_utils.query(\n Experiment.git_hash,\\\n Trial.experiment, Trial.fuzzer, Trial.benchmark,\\\n Trial.time_started, Trial.time_ended,\\\n Snapshot.trial_id, Snapshot.time, Snapshot.edges_covered)\\\n .select_from(Experiment)\\\n .join(Trial)\\\n .join(Snapshot)\\\n .filter(Experiment.name.in_(experiment_names))\\\n .filter(Trial.preempted.is_(False))\n\n return pd.read_sql_query(snapshots_query.statement, db_utils.engine)", "def getPatternsInDataFrame(self):\n\n dataFrame = {}\n data = []\n for a, b in self.finalPatterns.items():\n data.append([a, b])\n dataFrame = pd.DataFrame(data, columns=['Patterns', 'Support'])\n return dataFrame", "def get_beta_sheet(self):\n return self.beta_sheet", "def get(self) -> Iterable[Artifact]:\n # TODO(b/125037186): We should support dynamic query against a Channel\n # instead of a static Artifact collection.\n return self._artifacts", "def add_assets(self, assets=None):\n if assets is None:\n assets = PipelineHelper.getSelectedMayaAssets()\n\n assets = [\n asset\n for asset in assets\n if (\n asset.get_maya_commit().component.stage ==\n zefir.STAGES.FX_SIMULATION\n )\n ]\n\n for asset in assets:\n name = asset.name\n if name in self._model.assets:\n continue\n\n asset_data = AssetData()\n asset_data.asset = asset\n\n effects_node = str(asset.get_effects_node())\n if (\n mc.objExists(effects_node) and\n mc.listRelatives(effects_node) is not None\n ):\n asset_data.commit_to_fx_cache = True\n else:\n asset_data.can_commit_to_fx_cache = False\n asset_data.commit_to_fx_cache = False\n asset_data.generate_alembic_from_geos = False\n asset_data.use_local_space_for_alembic = False\n asset_data.commit_to_alembic_anim = True\n\n self._model.assets[name] = asset_data\n\n self._view.build_items(sorted(self._model.assets.keys()))", "def get_assay_solutions(self):\n vocabs = []\n assay = self.get_assay()\n if not assay:\n return vocabs\n for solution_type_name in assay.needed_solutions:\n type_batches = find(Type=solution_type_name,\n expires={'query': datetime.today().date(),\n 'range': 'min'},\n sort_on='expires')\n\n tmp = []\n for batch in type_batches:\n tmp.append([batch.id,\n batch.Title,\n batch.expires.strftime('%Y-%m-%d')])\n vocabs.append([solution_type_name, tmp])\n return vocabs", "def add_all_features(df):\n df.reset_index(drop=True, inplace=True)\n df = target_indicators(df)\n df = momentum_indicators(df)\n df = trend_indicators(df)\n df = volatility_indicators(df)\n df = volume_indicators(df)\n df = special_indicators(df)\n return df", "def assets(self):\n return self._assets.values()", "def pandas(self):\n names,prior,posterior = [],[],[]\n for iname,name in enumerate(self.posterior_parameter.row_names):\n names.append(name)\n posterior.append(np.sqrt(float(\n self.posterior_parameter[iname, iname]. x)))\n iprior = self.parcov.row_names.index(name)\n prior.append(np.sqrt(float(self.parcov[iprior, iprior].x)))\n for pred_name, pred_var in self.posterior_prediction.items():\n names.append(pred_name)\n posterior.append(np.sqrt(pred_var))\n prior.append(self.prior_prediction[pred_name])\n return pd.DataFrame({\"posterior\": posterior, \"prior\": prior},\n index=names)", "def initAlpha(self):\n alpha = [ s.zeros(self.K,) for m in range(self.M) ]\n for m in range(self.M):\n tmp = bernoulli.rvs(p=0.5, size=self.K)\n tmp[tmp==1] = 1.\n tmp[tmp==0] = 1E5\n alpha[m] = tmp\n return alpha", "def get_artists_alpha(session_):\n # artists = session_.query(Artist).order_by(Artist.name.desc()).paginate()\n artists = session_.query(Artist).order_by(Artist.name.asc()).all()\n return artists", "def investments_table(self):\n table = pd.DataFrame(index=[etf.buy_date for etf in self.etfs.values()])\n table['Ticker'] = [name.split('-')[0].split('.')[0] for name in self.etfs.keys()]\n table['Buying Price (€)'] = [etf.buy_price for etf in self.etfs.values()]\n table['Number of Shares'] = [etf.n_shares for etf in self.etfs.values()]\n table['Commissions (€)'] = [etf.total_commissions() for etf in self.etfs.values()]\n table['Invested (€)'] = [etf.initial_investment() for etf in self.etfs.values()]\n table['Share Price (€)'] = [etf.stock_price() for etf in self.etfs.values()]\n table['Value (€)'] = [etf.present_value() for etf in self.etfs.values()]\n table['P/L (€)'] = [etf.profit_loss() for etf in self.etfs.values()]\n table['P/L (%)'] = [etf.profit_loss(pct=True) for etf in self.etfs.values()]\n return table", "def to_abivars(self):\n abivars = dict(\n gwcalctyp=self.gwcalctyp,\n ecuteps=self.ecuteps,\n ecutsigx=self.ecutsigx,\n symsigma=self.symsigma,\n gw_qprange=self.gw_qprange,\n gwpara=self.gwpara,\n optdriver=self.optdriver,\n nband=self.nband\n #\"ecutwfn\" : self.ecutwfn,\n #\"kptgw\" : self.kptgw,\n #\"nkptgw\" : self.nkptgw,\n #\"bdgw\" : self.bdgw,\n )\n\n # FIXME: problem with the spin\n #assert len(self.bdgw) == self.nkptgw\n\n # ppmodel variables\n if self.use_ppmodel:\n abivars.update(self.ppmodel.to_abivars())\n\n return abivars", "def ohe_inverse_LR(normalized_alphas):\n\n normalized_alphas = np.abs(normalized_alphas)\n\n # Regular expression to pick attributes names.\n # Since in our case attributes names are the genomic positions (i.e. an integer number), we use the regex below\n import re\n pattern = \"^\\d+\"\n\n # Auxiliary dictionary to create one pd.DataFrame for each sample, summing the shap values for each attribute.\n # Later, these dataframes will be appended together, resulting in the final df.\n dic={}\n\n for index, alpha in normalized_alphas.iteritems():\n # print(index)\n attr = re.match(pattern, index).group()\n if attr not in dic.keys():\n dic[attr] = (0.5 * alpha)\n else:\n dic[attr] += (0.5 * alpha)\n\n shap_original = pd.Series(dic)\n\n return shap_original", "def get_supported_instruments():\n manuf_instruments = {\n agilent.name: agilent.instruments,\n horiba.name: horiba.instruments,\n tecan.name: tecan.instruments,\n MIT.name: MIT.instruments,\n }\n # instruments = [Aqualog, Fluorolog, Cary]\n df = pd.DataFrame()\n for manuf, instruments in manuf_instruments.items():\n for i in instruments:\n for j in i.supported_models:\n d = {\n \"manufacturer\": manuf,\n \"name\": i.name,\n \"supported_models\": j,\n \"object\": i,\n }\n df = df.append(d, ignore_index=True)\n\n df.set_index([\"manufacturer\", \"supported_models\"], inplace=True)\n df_display = df.drop(columns=[\"object\"])\n return df_display, df", "def alpha_composed(self, list_of_alpha_value, dart):\r\n current = dart\r\n for i in list_of_alpha_value:\r\n current = self.alphas[i][current]\r\n return current", "def get_appliance_by_asset_donut_chart(self):\n appliances = self.get_appliances().order_by('asset')\n assets = Asset.objects.filter(\n pk__in=[a['asset'] for a in appliances.values('asset').distinct()]\n )\n data = {\n 'series': [],\n 'labels': [asset.name for asset in assets],\n }\n for asset in assets:\n _appliances = appliances.filter(asset=asset)\n _total = 0\n for appliance in _appliances:\n _total += float(\n round(\n appliance.total or 0, settings.DEFAULT_DECIMAL_PLACES\n )\n )\n data['series'].append(_total)\n return data", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def simulated(self):\n # Join #\n df = self.age_indicators.left_join(self.bef_ft, on='forest_type')\n # Select only some columns #\n columns_of_interest = ['ave_age', 'time_step', 'area', 'biomass', 'bef_tot', 'density']\n columns_of_interest += list(self.parent.classifiers.columns)\n # Drop the other columns #\n df = df[columns_of_interest].copy()\n # Divide biomass by the expansion factor #\n df['merch_c_ha'] = df['biomass'] / df['bef_tot']\n df['merch_vol_ha'] = df['merch_c_ha'] / df['density']\n # Return #\n return df", "def to_abivars(self):", "def api_asset_get():\n names = request.args.getlist(\"name\")\n\n result = []\n for name in names:\n asset = app.bank.get(name)\n if asset:\n result.append(asset)\n\n return jsonify(sorted(result)), 200", "def _get_surfaces(idf):\n surfaces = idf.getsurfaces() + idf.getshadingsurfaces() + idf.getsubsurfaces()\n return surfaces", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def get_assets(self):\n self.logger.debug(\"Fetching assets.\")\n return self._api_query(\"assets\")[\"assets\"]", "def features_for(report):\n\n features = []\n dedupe_vulns = {}\n for pkg_id, pkg in report[\"packages\"].items():\n pkg_env = report[\"environments\"][pkg_id][0]\n pkg_vulns = []\n # Quay doesn't care about vulnerabilities reported from different\n # repos so dedupe them. Key = package_name + package_version + vuln_name.\n for vuln_id in report[\"package_vulnerabilities\"].get(pkg_id, []):\n vuln_key = (\n pkg[\"name\"]\n + \"_\"\n + pkg[\"version\"]\n + \"_\"\n + report[\"vulnerabilities\"][vuln_id].get(\"name\", \"\")\n )\n if not dedupe_vulns.get(vuln_key, False):\n pkg_vulns.append(report[\"vulnerabilities\"][vuln_id])\n dedupe_vulns[vuln_key] = True\n\n enrichments = (\n {\n key: sorted(val, key=lambda x: x[\"baseScore\"], reverse=True)[0]\n for key, val in list(report[\"enrichments\"].values())[0][0].items()\n }\n if report.get(\"enrichments\", {})\n else {}\n )\n\n features.append(\n Feature(\n pkg[\"name\"],\n \"\",\n \"\",\n pkg_env[\"introduced_in\"],\n pkg[\"version\"],\n [\n Vulnerability(\n fetch_vuln_severity(vuln, enrichments),\n vuln[\"updater\"],\n vuln[\"links\"],\n maybe_urlencoded(\n vuln[\"fixed_in_version\"] if vuln[\"fixed_in_version\"] != \"0\" else \"\"\n ),\n vuln[\"description\"],\n vuln[\"name\"],\n Metadata(\n vuln[\"updater\"],\n vuln.get(\"repository\", {}).get(\"name\"),\n vuln.get(\"repository\", {}).get(\"uri\"),\n vuln.get(\"distribution\", {}).get(\"name\"),\n vuln.get(\"distribution\", {}).get(\"version\"),\n NVD(\n CVSSv3(\n enrichments.get(vuln[\"id\"], {}).get(\"vectorString\", \"\"),\n enrichments.get(vuln[\"id\"], {}).get(\"baseScore\", \"\"),\n )\n ),\n ),\n )\n for vuln in pkg_vulns\n ],\n )\n )\n\n return features", "def asset_history(self, asset: str, data_order: str='asc', nb_of_results: int=100, pandas: bool=False) -> Union[pd.DataFrame, dict]:\n \n assets_history_url = bf_assets_url + asset + bf_asset_history_url\n\n response, count_api_calls = query_on_several_pages(self.network, self.api_key, data_order, nb_of_results, assets_history_url, self.proxies)\n \n #print('[INFO] Function asset_history, {} API calls.'.format(count_api_calls))\n \n return pd.DataFrame.from_dict(response) if pandas else response", "def instruments_with_meta_data(self):\n if len(self._instruments_with_meta_data) > 0:\n return self._instruments_with_meta_data\n else:\n self._borsdata_api = BorsdataAPI(constants.API_KEY)\n # fetching data from api\n countries = self._borsdata_api.get_countries()\n branches = self._borsdata_api.get_branches()\n sectors = self._borsdata_api.get_sectors()\n markets = self._borsdata_api.get_markets()\n instruments = self._borsdata_api.get_instruments()\n # instrument type dict for conversion (https://github.com/Borsdata-Sweden/API/wiki/Instruments)\n instrument_type_dict = {0: 'Aktie', 1: 'Pref', 2: 'Index', 3: 'Stocks2', 4: 'SectorIndex', 5: 'BranschIndex'}\n # creating an empty dataframe\n instrument_df = pd.DataFrame()\n # loop through the whole dataframe (table) i.e. row-wise-iteration.\n for index, instrument in instruments.iterrows():\n name = instrument['name']\n ins_id = instrument['insId']\n ticker = instrument['ticker']\n isin = instrument['isin']\n # locating meta-data in various ways\n # dictionary-lookup\n instrument_type = instrument_type_dict[instrument['instrument']]\n # .loc locates the rows where the criteria (inside the brackets, []) is fulfilled\n # located rows (should be only one) get the column 'name' and return its value-array\n # take the first value in that array ([0], should be only one value)\n market = markets.loc[markets['id'] == instrument['marketId']]['name'].values[0]\n country = countries.loc[countries['id'] == instrument['countryId']]['name'].values[0]\n sector = 'N/A'\n branch = 'N/A'\n # index-typed instruments does not have a sector or branch\n if market.lower() != 'index':\n sector = sectors.loc[sectors['id'] == instrument['sectorId']]['name'].values[0]\n branch = branches.loc[branches['id'] == instrument['branchId']]['name'].values[0]\n # appending current data to dataframe, i.e. adding a row to the table.\n instrument_df = instrument_df.append({'name': name, 'ins_id': ins_id, 'ticker': ticker, 'isin': isin, 'instrument_type': instrument_type,\n 'market': market, 'country': country, 'sector': sector, 'branch': branch}, ignore_index=True)\n # create directory if it do not exist\n if not os.path.exists(constants.EXPORT_PATH):\n os.makedirs(constants.EXPORT_PATH)\n # to csv\n instrument_df.to_csv(constants.EXPORT_PATH + 'instrument_with_meta_data.csv')\n # creating excel-document\n excel_writer = pd.ExcelWriter(constants.EXPORT_PATH + 'instrument_with_meta_data.xlsx')\n # adding one sheet\n instrument_df.to_excel(excel_writer, 'instruments_with_meta_data')\n # saving the document\n excel_writer.save()\n self._instruments_with_meta_data = instrument_df\n return instrument_df", "def gamma_pdf(a,b):\n df = DataFrame(columns=['Day','Gamma_Values'])\n for day in range(181):\n df = df.append({'Day': int(day), 'Gamma_Values': float(gamma.pdf(day,a,0,b))}, ignore_index=True)\n return df", "def buildExposureTable(exposures, fields, instruments):\n name = []\n ra = []\n dec= []\n field= []\n inst = []\n airmass = []\n mjd = []\n exptime = []\n epoch = []\n apcorr = []\n index = 0\n for k,e in exposures.items():\n name.append(e.name)\n ra.append(getDegree(e.coords.ra))\n dec.append(getDegree(e.coords.dec))\n field.append(fields[e.field].index)\n if e.instrument in specialInstruments:\n inst.append(specialInstruments[e.instrument])\n else:\n inst.append(instruments[e.instrument].index)\n e.index = index\n index += 1\n\n airmass.append(e.airmass)\n mjd.append(e.mjd)\n exptime.append(e.exptime)\n epoch.append(e.epoch)\n apcorr.append(e.apcorr)\n hdu = pf.BinTableHDU.from_columns(\\\n pf.ColDefs( [pf.Column(name='NAME',format=py_to_fits(name),array=name),\n pf.Column(name='RA',format=py_to_fits(ra),array=ra),\n pf.Column(name='DEC',format=py_to_fits(dec),array=dec),\n pf.Column(name='FIELDNUMBER',format=py_to_fits(field),array=field),\n pf.Column(name='INSTRUMENTNUMBER',format=py_to_fits(inst),\\\n array=inst),\n pf.Column(name=\"MJD\",format=py_to_fits(mjd),array=mjd),\n pf.Column(name=\"AIRMASS\",format=py_to_fits(airmass),array=airmass),\n pf.Column(name=\"EXPTIME\",format=py_to_fits(exptime),array=exptime),\n pf.Column(name=\"EPOCH\",format=py_to_fits(epoch),array=epoch),\n pf.Column(name=\"APCORR\",format=py_to_fits(apcorr),array=apcorr)] ),\n name = 'Exposures')\n # hdu.header['EXTNAME'] = 'Exposures'\n return hdu", "def create_equity_curve_dataframe(self):\n # returns the cumulative product for percent change over every timestamp in the index\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n curve['returns'] = curve['total'].pct_change()\n #curve['equity_curve'] = (1.0+curve['returns']).cumprod()\n curve['equity_curve'] = curve['returns']\n curve['equity_curve'] += 1\n curve['equity_curve'] = curve['equity_curve'].cumprod()\n self.equity_curve = curve", "def examplars(self):\n return np.array(\n [\n examplar_idx for class_examplars in self._examplars.values()\n for examplar_idx in class_examplars\n ]\n )", "def asset_list(self, **kwargs):\n headers, items = self._get('/asset', kwargs)\n return AssetList(\n headers,\n [Asset.fromdict(item_dict, api=self) for item_dict in items],\n kwargs=kwargs,\n api=self)", "def _filter_universe_from_data_for_prediction(self, data, current_timestamp, universe):\n current_date = current_timestamp.date()\n assets = []\n for idx, row in universe.iterrows():\n if row.start_date <= current_date <= row.end_date:\n assets = row.assets\n break\n\n filtered = {}\n for feature, df in data.items():\n filtered[feature] = df.drop(df.columns.difference(assets), axis=1)\n\n return filtered", "def get_alpha_beta(data, market, risk_free=0, scale=1, dspl=False):\n logger = logging.getLogger(__name__)\n if data.ndim!=1:\n raise ValueError(\"invest.calculation.get_alpha_beta only takes pandas Series\")\n df = get_returns(data, style='log', fillna=False).rename(\"data\").to_frame()\n df['market'] = get_returns(market, style='log', fillna=False)\n df['risk_free'] = risk_free / 100\n # A complicated way to get risk-free rate:\n # df['risk_free'] = df.interest * 0.01 * (df.date-df.date.shift(1)).dt.days / 260\n df.dropna(axis=0, how='any', inplace=True)\n y = (df.data * scale - df.risk_free).values\n x = (df.market * scale - df.risk_free).values\n from machine_learning.Msklearn import LinearRegression\n lm = LinearRegression(intercept=True)\n lm.fit(x, y)\n if dspl:\n lm.summary()\n alpha, beta = lm.beta\n return alpha, beta", "def _get_alpha_beta(self, a, b):\n beta = a / b\n alpha = a * beta\n return alpha, beta", "def create_equity_curve_dataframe(self):\n # returns the cumulative product for percent change over every timestamp in the index\n curve = pd.DataFrame(self.all_holdings)\n curve.set_index('datetime', inplace=True)\n curve['returns'] = curve['total'].pct_change()\n #curve['equity_curve'] = (1.0+curve['returns']).cumprod()\n curve['equity_curve'] = curve['returns']\n curve['equity_curve'] += 1\n curve['equity_curve'] = curve['equity_curve'].cumprod()\n self.equity_curve = curve\n print(curve)", "def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)", "def get_exog_df(explanatory_df, explanatory_variables):\n\n # create an exogenous DataFrame beginning with the intercept column\n number_of_observations = explanatory_df.shape[0]\n intercept_data = np.ones((number_of_observations, 1))\n intercept_column = ['Intercept']\n exog_df = pd.DataFrame(data=intercept_data, columns=intercept_column, index=explanatory_df.index)\n\n # add all explanatory variables\n for variable_name in explanatory_variables:\n variable_transform, raw_variable = find_raw_variable(variable_name)\n transform_function = TRANSFORM_FUNCTIONS[variable_transform]\n exog_df[variable_name] = transform_function(explanatory_df[raw_variable])\n\n ordered_exog_variables = intercept_column + explanatory_variables\n\n return exog_df[ordered_exog_variables]", "def all_data(self):\n return pd.concat([self.historic_data, self.dayahead_data])", "def alpha(asset, strategy):\n up = asset['forward_returns'][asset['forward_returns'] > 0]\n down = asset['forward_returns'][asset['forward_returns'] < 0]\n bh_alpha = np.sum(up) / np.abs(np.sum(down))\n\n strat_returns = asset['forward_returns'][strategy]\n up = strat_returns[strat_returns > 0]\n down = strat_returns[strat_returns < 0]\n strat_alpha = np.sum(up) / np.abs(np.sum(down))\n\n _alpha = (strat_alpha / bh_alpha) - 1\n return _alpha", "def homeGrid():\n alphaTarget = 0\n betaTarget = 180\n hasApogee = True\n seed = 0\n rg = gridInit(seed)\n\n for r in rg.robotDict.values():\n r.setAlphaBeta(alphaTarget, betaTarget)\n return rg" ]
[ "0.571466", "0.5539759", "0.5527638", "0.5383794", "0.53507555", "0.5238355", "0.51728773", "0.5134495", "0.5117016", "0.50879073", "0.50690675", "0.5064041", "0.5010054", "0.49698728", "0.49637634", "0.49520984", "0.49412426", "0.4937754", "0.4913756", "0.49073732", "0.4898681", "0.4861123", "0.4858231", "0.48548", "0.4847356", "0.48459482", "0.48316336", "0.48278168", "0.48230937", "0.48187822", "0.47998878", "0.47959015", "0.47785667", "0.47751695", "0.47554934", "0.4753114", "0.4753114", "0.47307998", "0.47253188", "0.47212163", "0.47196117", "0.4712441", "0.4707711", "0.46956968", "0.46943116", "0.46852404", "0.4675936", "0.46656972", "0.4665103", "0.46479014", "0.46455756", "0.46424317", "0.4639351", "0.46366787", "0.46300498", "0.46300498", "0.4629771", "0.46245775", "0.4621123", "0.4619148", "0.4615256", "0.46151456", "0.46133673", "0.4605584", "0.46043643", "0.46033487", "0.45995945", "0.4592331", "0.4590025", "0.45876497", "0.4587375", "0.45777258", "0.45731258", "0.457197", "0.4564037", "0.45626307", "0.4559938", "0.45581117", "0.45553705", "0.45416108", "0.4538158", "0.45341834", "0.45301703", "0.45297378", "0.45264363", "0.45230883", "0.4520314", "0.45149115", "0.45098242", "0.44964942", "0.44964504", "0.449483", "0.4494267", "0.4492331", "0.44850603", "0.44824633", "0.44800285", "0.44770682", "0.44669262", "0.4464707" ]
0.7166975
0
Removes charracters listed in self.custom_chars
def _remove_custom_chars(self, text: str) -> str: patterns = "|".join([x for x in self.custom_chars]) return re.sub(patterns, "", str(text), flags=re.IGNORECASE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text", "def strip_other_charcter():\n pass", "def remove_special_characters(string_list):", "def remove_special_char(self,text):\n modified_text = re.sub(',|;|#,$','',text)\n return modified_text", "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def remove_punct(self,text):", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def _remove_unknown_characters(self, text):\n exist = []\n missing_chars = set([])\n for each_char in text:\n if each_char not in self.char_2_imgs:\n if each_char == '・':\n exist.append(each_char)\n else:\n missing_chars.add(each_char)\n else:\n exist.append(each_char)\n\n return ''.join(exist), missing_chars", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def other_chars(self):\n return [sign for sign in re.findall(r'[^\\w\\s]', self.text)]", "def remove_special_chars(self, text_list):\n return [self._remove_special_chars(text) for text in text_list]", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def remove_extra_characters(self, text):\n if text:\n parsed_text = text\n parsed_text = parsed_text.replace(\"[\", \"\")\n parsed_text = parsed_text.replace(\"]\", \"\")\n parsed_text = parsed_text.replace(\"{\", \"\")\n parsed_text = parsed_text.replace(\"}\", \"\")\n parsed_text = parsed_text.replace(\"|\", \" \")\n parsed_text = parsed_text.replace(\"-\", \"\")\n parsed_text = parsed_text.replace(\"&nbsp;\", \"\")\n parsed_text = parsed_text.replace(\":'\", \"\")\n parsed_text = parsed_text.replace(\"'\", \"\")\n parsed_text = parsed_text.replace(\"#\", \"\")\n parsed_text = parsed_text.replace(\"':\", \"\")\n parsed_text = parsed_text.replace(\"=\", \"\")\n parsed_text = parsed_text.replace(\"*\", \"\")\n parsed_text = parsed_text.replace(\"/\", \"\")\n parsed_text = parsed_text.replace(\"<--\", \"\")\n parsed_text = parsed_text.replace(\"-->\", \"\")\n parsed_text = parsed_text.replace(\"<!--\", \"\")\n parsed_text = parsed_text.replace(\">\", \"\")\n parsed_text = parsed_text.replace(\"<\", \"\")\n\n parsed_text = parsed_text.replace('__NOTOC__', '')\n\n return parsed_text", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def _remove_diacritics(self, text: str) -> str:\n nfkd_form = unicodedata.normalize(\"NFKD\", text)\n return \"\".join([char for char in nfkd_form if not unicodedata.combining(char)])", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_non_ascii(self, words):\n\t\tnew_words = []\n\t\tfor word in words:\n\t\t\tnew_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n\t\t\tnew_words.append(new_word)\n\t\treturn new_words", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def _clean_term(self, term):\n return filter(lambda char: char in allowed_chars, term)", "def filter_characters(self, allow_chars=string.printable, drop_chars=None):\n\n if allow_chars is not None:\n if not isinstance(allow_chars, set):\n allow_chars = set(allow_chars)\n\n drop_chars = ''.join(self.unique_characters - allow_chars)\n else:\n if isinstance(drop_chars, (set, list, tuple)):\n drop_chars = ''.join(drop_chars)\n\n if not isinstance(drop_chars, str):\n raise ValueError('`drop_chars` must be a sequence, set or string if `allow_chars` is not given')\n\n return self.replace_characters(str.maketrans(drop_chars, drop_chars, drop_chars))", "def remove_special_chars(text):\n schars = ''.join([a for a in string.punctuation if a not in \".,?\"])\n\n text = re.sub('[%s]' % re.escape(schars), '', text)\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def remove_special_chars(s):\n stripped = re.sub('[^\\w\\s]', ' ', s)\n stripped = re.sub('_', ' ', stripped)\n\n # Make all whitespaces only one space\n stripped = re.sub('\\s+', ' ', stripped)\n\n stripped = stripped.strip()\n\n return stripped", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string", "def removeNonAscii(self, words):\n\t\tnewWords = []\n\t\tfor word in words:\n\t\t\tif isinstance(word, unicode):\n\t\t\t\tnewWord = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore')\n\t\t\telse:\n\t\t\t\tnewWord = word\n\t\t\tnewWords.append(newWord)\n\t\treturn newWords", "def remove_characters(tokens):\n pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))\n no_char_tokens = filter(None, [pattern.sub('', token) for token in tokens])\n return no_char_tokens", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def removeChars(inStr, chars):\n newStr = inStr\n for char in chars:\n newStr = newStr.replace(char, \"\")\n return newStr", "def remove(string, list_of_unwanted_car, replacement_char=\"_\"):\n new_string = string\n for unwanted_char in list_of_unwanted_car:\n new_string = new_string.replace(unwanted_char, replacement_char)\n return new_string", "def remove_control_chars(json_string):\n return re.sub('[\\x00-\\x1f]', '',json_string)", "def _remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_chars_from_string(string: str, chars: str) -> str:\n translate_dict = dict((c, \"\") for c in chars)\n return Str.get_string_from_translate_dict(string, translate_dict)", "def _strip_invalid_characters(self: object) -> None:\n for current_invalid_character in Episode._invalid_characters:\n self.episode_broadcast = self.episode_broadcast.replace(current_invalid_character, \" \").strip()\n self.episode_inspectors = self.episode_inspectors.replace(current_invalid_character, \" \").strip()\n self.episode_name = self.episode_name.replace(current_invalid_character, \" \").strip()\n self.episode_sequence = self.episode_sequence.replace(current_invalid_character, \"-\").strip()", "def removeExtraChars(inStr, char):\n for i in range(5):\n inStr = inStr.replace(char+char, char)\n return inStr", "def remove_non_ascii(words):\n #Revisar esta funcion porque no filtra nada...\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n removed_nonascii = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n removed_nonascii.append(new_word)\n return removed_nonascii", "def remove_string_special_characters(s):\n stripped = re.sub('[^\\w\\s]', '', s)\n stripped = re.sub('_', '', stripped)\n stripped = re.sub('\\s+', ' ', stripped)\n stripped = stripped.strip()\n\n return stripped", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def remove_chars(old_str, chars):\n new_string = old_str\n for char in chars:\n new_string = new_string.replace(char, '')\n \n return new_string", "def remove_non_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]', ' ', text)", "def strip(self, str_text):\n punct_chars = [' ', '.', ',', '!', '?', '&', '\"', \"'\", '-', ':']\n str_text = [i for i in str_text if i not in punct_chars]\n str_text = ''.join(str_text)\n return str_text", "def remove_emoji_punc(text):\n \n allchars = [str for str in text]\n emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]\n clean_text = ' '.join([str for str in text.split() if not any(i in str for i in emoji_list)])\n\n \n s1 = clean_text.replace(u'’', u\"\").replace(\"'\",\"\")\n s1 = re.sub(r'[^a-z0-9 ]+', ' ', s1)\n \n return \" \".join(s1.split())", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()", "def remove_non_ascii(words):\r\n new_words = []\r\n for word in words:\r\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\r\n new_words.append(new_word)\r\n return new_words", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def replaceNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else ' ' for i in text])", "def string_cleanup(s, garbage=\":,-()&\"):\n s_new = ''\n for x in s:\n if x not in garbage:\n s_new += x\n\n return s_new", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def remove_ats(self):\n\t\tfor key in self.keys():\n\t\t\tif key[:1] == '@':\n\t\t\t\ttry: del self[key]\n\t\t\t\texcept: pass", "def filter_invalid_characters(self, string):\n valid_chars = \"abcdefghijklmnopqrstuvwxyz0123456789-.\"\n newstring = \"\"\n for char in string:\n use_char = char\n if char not in valid_chars:\n use_char = '-'\n newstring = newstring + use_char\n\n return newstring", "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def remove_diacritics(self, text):\n text = re.sub(self._arabic_diacritics, '', text)\n return text", "def _run_strip_accents(self, text):\n text = unicodedata.normalize('NFD', text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == 'Mn':\n continue\n output.append(char)\n return ''.join(output)", "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue # pragma: no cover\n output.append(char)\n return \"\".join(output)", "def _strip_text(text):\n text = re.sub(r'[ ,?:]|%s', \"\", text.lower())\n for chr in \"-%\":\n new_text = text.replace(chr, \"\")\n if new_text:\n text = new_text\n return text.lower()", "def clean_word(word):\n return \"\".join([c for c in word.lower() if ord(c) < 128])", "def __remove_accolade_chars(self, string):\n if string.endswith('*+'):\n string = string[:-2]\n elif string.endswith('*') or string.endswith('+'):\n string = string[:-1]\n\n return string", "def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)", "def other_chars(self):\n return re.findall(r'[,.!?_\\':;/#%*\\=@\"]', self.text)", "def _clean(self, texts, no_punc=False):\n result = ''\n sw = self._sw_no_punc_dict if no_punc else self._sw_dict\n for t in texts:\n if t not in sw:\n result += t\n return result", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def keep_chr(char):\n return (unicodedata.category(char).startswith('P') and\n (char != \"#\" and char != \"@\" and char != \"&\"))", "def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})", "def remove_nonalpha(text):\n text = ''.join(c for c in text if c.isalpha() or c == ' ')\n return re.sub(\" +\", \" \", text)", "def handle_special_symbols(text: str\n ) -> str:\n valid_special_symbols = {' ', '_'}\n\n def criteria(c: str\n ) -> str:\n return c if c.isalnum() or c in valid_special_symbols else ' '\n\n return ''.join(criteria(c) for c in list(text))", "def remove_punctuation(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n # new_words += f\"{new_word} \"\n self.words = new_words\n return self", "def remove_inner_word_characters(text):\n return RegexFilters.replace_inner_word_characters(text, \"\")", "def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def remove_accented_chars(text):\n text = unidecode.unidecode(text)\n return text", "def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def Clean(s):\n for c in BAD_CHARACTERS:\n s = s.replace(c, '_')\n return s", "def lstrip(self, chars=None):\n return asarray(lstrip(self, chars))", "def clean_user_input(self, user_input):\n legal_chars = re.compile(r'^[a-z0-9]$')\n return filter(lambda c: re.match(legal_chars, c), user_input.lower())", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def wipe_bad_chars(filename):\n return multi_replace(filename, {'(': '', ' ': '_', ')': '', '/': '_'})", "def _removeDiacritics(self, text):\n norm_txt = unicodedata.normalize('NFD', text)\n shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))\n # remove accents and other diacritics, replace spaces with \"_\" because identifiers can't have spaces\n no_spaces = unicodedata.normalize(\n 'NFC', shaved).lower().replace(\" \", \"_\")\n final_text = no_spaces\n # only allow [a-z], [0-9] and _\n p = re.compile('[a-z0-9_]+')\n for i in range(0, len(no_spaces)):\n if not (p.match(no_spaces[i])):\n final_text = final_text[:i] + '_' + final_text[i+1:]\n # i the first char is not a-z then replaceit (all identifiers must start with a letter)\n p2 = re.compile('[a-z]+')\n if not p2.match(final_text[0]):\n final_text = 'a' + final_text[1:]\n return final_text", "def remove_tokens(self, text):\r\n\r\n return text.replace(self.PAD_TK, \"\").replace(self.UNK_TK, \"\")" ]
[ "0.7372764", "0.7265707", "0.7070031", "0.6986273", "0.69196767", "0.68504214", "0.67211777", "0.66895443", "0.6674223", "0.6622369", "0.662198", "0.66078997", "0.65977836", "0.6588692", "0.65562934", "0.65343094", "0.6528675", "0.6523658", "0.6516981", "0.650481", "0.6504477", "0.64807856", "0.6475873", "0.6475873", "0.64515775", "0.64435786", "0.64316255", "0.6400398", "0.63946515", "0.6393171", "0.6376289", "0.63743037", "0.6349041", "0.6343783", "0.62895685", "0.62817", "0.62568426", "0.6232844", "0.62318265", "0.6225875", "0.6192261", "0.618889", "0.6186559", "0.61814326", "0.6166165", "0.6161223", "0.6158244", "0.61440593", "0.6130172", "0.6117439", "0.6116828", "0.611404", "0.61134416", "0.61007977", "0.6087062", "0.60815924", "0.6067308", "0.6056491", "0.6055025", "0.6046087", "0.6038899", "0.6031631", "0.6031202", "0.6026663", "0.6026663", "0.6026663", "0.6026663", "0.6026663", "0.6026663", "0.6019984", "0.6008303", "0.5983571", "0.5981852", "0.5979459", "0.5969495", "0.59402907", "0.58887726", "0.5886121", "0.58850145", "0.58817947", "0.58780533", "0.58769727", "0.58718246", "0.58673006", "0.5867244", "0.5867192", "0.5863789", "0.585837", "0.5856952", "0.5856952", "0.58568907", "0.5855753", "0.5850488", "0.58488005", "0.58360666", "0.583008", "0.58197385", "0.5814463", "0.5813273", "0.58016837" ]
0.8835553
0
Removes strings starting with http
def _remove_urls(self, text: str) -> str: pattern = r"http\S+" return re.sub(pattern, " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_URL(sample):\n return re.sub(r\"http\\S+\", \"\", sample)", "def remove_urls(self, text):\n return re.sub(r'http.?://[^\\s]+[\\s]?', '', text)", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def remove_url(text):\n return re.sub(r'http\\S+', ' ', text)", "def remove_url(tweet):\n return re.sub(r\"http\\S+\", \"URL\", tweet)", "def remove_url(txt):\n\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt).split())", "def remove_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', '', text)\n return text", "def remove_mask(self, string):\n caracter = (r'[.\\/-]')\n if string[0:4] != 'http':\n match = re.search(caracter, string)\n if match:\n string = re.sub(caracter, '', string)\n\n return string", "def remove_urls(text):\n pass", "def url_fix_common_typos(url):\n if url.startswith(\"http//\"):\n url = \"http://\" + url[6:]\n elif url.startswith(\"https//\"):\n url = \"https://\" + url[7:]\n return url", "def remocion_de_urls(self, texto):\n \n texto = re.sub(r'http\\S+', '', texto)\n return texto", "def remove_url(text):\r\n url = re.sub('https?://[A-Za-z0-9./]+', '', text)\r\n return url", "def clean(self, sub):\n sub = re.sub(r'^RT[\\s]+', '', sub)\n sub = re.sub(r'https?:\\/\\/.*[\\r\\n]*', '', sub)\n sub = re.sub(r'#', '', sub)\n sub = re.sub(r'@[A-Za-z0–9]+', '', sub) \n\n return sub", "def removeURL(text):\n text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','',text)\n text = re.sub(r'#([^\\s]+)', r'\\1', text)\n return text", "def remove_url(txt):\n print(txt['fields']['tweet'])\n return \" \".join(re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", txt['fields']['tweet']).split())", "def remove_url(sample):\n sample[\"full_text\"] = re.sub(r\"http\\S+\", \"\", sample[\"full_text\"])\n return sample", "def _remove_urls(text: str) -> str:\n pattern = r'(http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/)?[a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(\\/.*)?'\n\n return re.sub(pattern, '', text, flags=re.MULTILINE)", "def clean_urls(self, tweet):\n self.urls = re.findall(self.regexpForURLs, tweet)\n\n for url in self.urls:\n tweet = tweet.replace(url, '')\n\n tweet = self.clean_unnecessary_whitespaces(tweet)\n return tweet", "def url_at_remove(text):\n text = re.sub(r'#\\w+|@\\w+',' ',text)\n # Remove url:\n return(re.sub(r'\\bhttps?:\\/\\/.*[\\r\\n]*', ' ', text, flags=re.MULTILINE))", "def _remove_urls(self, doc: str):\n processed_tweet = re.sub('(https?:)?\\/\\/[\\w\\.\\/-]+', '', doc)\n return processed_tweet", "def remove_urls(self, doc):\n doc = re.sub(\n r'(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)'\n r'(?:[^\\s()<>]+|\\(([^\\s()<>]+|'\n r'(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|'\n r'[^\\s`!()\\[\\]{};:\\'\".,<>]))',\n '',\n doc)\n return ' '.join(doc.split())", "def remove_urls(self, tweet_text):\n\n url_free_tweet_text = \" \".join(\n re.sub(r\"http\\S+\", \"\", tweet_text).split())\n\n return url_free_tweet_text", "def removeurl(wordlist):\n newlist=[]\n for w in wordlist:\n phrases=str(w[0]).split()\n for phrase in phrases:\n if(phrase.startswith('http') is True):\n phrase=\"\"\n newlist.append((phrases,w[1])) \n return newlist", "def clean_content(content):\n content = content.strip()\n valid_words = content.split()\n valid_words = [word for word in valid_words if not word_is_url(word)]\n return \" \".join(valid_words)", "def _fix_url(url):\n\n if not url.startswith('http'):\n url = 'http://' + url\n\n return url", "def url_removal(text):\n return re.sub(r'''(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]\\\n {2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]\\\n +|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’]))''', '', text)", "def reformat_weburl(s):\n s = (s or '').strip()\n\n if s and '.' in s and 'notavailable' not in s:\n match = re.match(r'^http(s)?://', s)\n if not match:\n s = 'http://' + s\n\n return s", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def normalize_url(url):\n if not url.startswith((\"git+\", \"hg+\")):\n return url\n return url[4:]", "def clean_url(url):\n for noisy_url in noisy_urls:\n url = str(url).replace(noisy_url,\"\").lower()\n return url", "def obfuscate_url(url: str) -> str:\n return re.sub(r\"\\/\\/.*:.*@\", \"//***:***@\", url)", "def cleanmatomo_url(self):\n self.matomo_url = re.sub(r\"/\\/$/\", \"\", self.matomo_url) # Cuts \"/\"\n\n if re.match(r\"^http://\", self.matomo_url): # replace it to \"https://\"\n self.matomo_url = re.sub(\"^http://\", \"\", self.matomo_url)\n self.matomo_url = self.protocol + self.matomo_url\n elif not bool(re.match(\"^https://\", self.matomo_url)): # check for \"https://\" and set it\n self.matomo_url = self.protocol + self.matomo_url", "def uncanonicalize(self, url):\n pass", "def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])", "def sanitize_url(url, require_scheme = False):\r\n if not url or ' ' in url:\r\n return\r\n\r\n url = url.strip()\r\n if url.lower() == 'self':\r\n return url\r\n\r\n u = urlparse(url)\r\n # first pass: make sure a scheme has been specified\r\n if not require_scheme and not u.scheme:\r\n url = 'http://' + url\r\n u = urlparse(url)\r\n\r\n if (u.scheme and u.scheme in valid_schemes\r\n and u.hostname and len(u.hostname) < 255\r\n and '%' not in u.netloc):\r\n return url", "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "def clean_link(self, url: str) -> str:\n return self.CLEAN_REGEX.sub(lambda match: f\"%{ord(match.group(0)):02x}\", url)", "def clean_tweet(tweet): \n #Remove URL\n tweet = re.sub('\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', ' ', tweet) \n #Remove user\n tweet = re.sub('@[^\\s]+','',tweet)\n #Replace #word with word\n tweet = re.sub(r'#([^\\s]+)', ' ', tweet) \n return tweet", "def remove_urls(lista_tweets):\n\n novos_tweets = []\n\n for tweet in lista_tweets:\n texto = re.sub(r\"http\\S+\", \"\", tweet[\"text\"])\n novos_tweets.append(texto)\n\n return novos_tweets", "def cleanUrl(url):\n\turl_clean = url.replace(' ','%20')\n\t\"\"\" add /index.html where necessary \"\"\"\n\tif (url[-1:]=='/'):\n\t\turl_clean += 'index.html'\n\telif (url[-5:].find('.') == -1):\n\t\t url_clean += '/index.html'\n\treturn url_clean", "def is_http(line):\n return line.startswith('http://') or line.startswith('https://')", "def strip(url):\r\n split = list(urlsplit(url))\r\n split[4]=''\r\n return urlunsplit(split)", "def clean_tweet(self, tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w +://\\S +)\", \" \", tweet).split())", "def fix_website(raw_website):\n if url_is_good(raw_website):\n return raw_website\n else:\n return \"http://\" + raw_website", "def sanitize_link(link, url):\n if link.startswith('//'):\n link = f'http:{link}'\n elif link.startswith('/'):\n parsed_url = urlparse(url)\n link = f'http://{parsed_url.hostname}{link}'\n return link", "def clean_url(url):\n o = urlsplit(url)\n return \"{scheme}://{netloc}{path}\".format(\n scheme=o[0], netloc=o[1], path=o[2],\n )", "def getFilteredUrl(self, url):\n url = url.split('#')[0]\n if url.startswith('/wiki'):\n return ('https://en.wikipedia.org' + url)\n if 'en.wikipedia.org/wiki/' not in url:\n return ('https://en.wikipedia.org/wiki' + url)\n return url", "def clean_tweet(tweet):\n\n pattern = r'http\\S+|pic.\\S+|@[a-zA-Z0-9_]+|#[a-zA-Z0-9_]+|[‘’“”’–—…]|\\xa0'\n return re.sub(pattern, '', tweet)", "def remove_www(hostname: str) -> str:\n if hostname.startswith(\"www.\"):\n return hostname[4:]\n return hostname", "def clean_text(text):\n cleanedup = text.lower()\n return re.sub(\"(@[A-Za-z0-9]+)|(#[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", cleanedup)", "def replace_urls(text):\n text = re.sub('(?P<url>https?://[^\\s]+)', 'URL', text)\n return text", "def fixURLS():\n url_re = re.compile(r'http t co \\S+')\n tweets = Tweet.objects.all()\n for tweet in tweets:\n tweet.text = url_re.sub(' ', tweet.text)\n tweet.text = ' '.join(tweet.text.split())\n tweet.save()", "def deprotocolise(url):\n return PROTORE.sub('', url)", "def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url", "def clean_tweet(self, tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())", "def clean_url(url):\n scheme, netloc, path, query, fragment = url_parse.urlsplit(url)\n path = url_parse.quote(path)\n url = url_parse.urlunsplit((scheme, netloc, path, query, fragment))\n return url", "def clean_tweet(tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+://\\S+)\", \" \", tweet).split())", "def host_cleanup(host):\n if not host.startswith('https://'):\n host = 'https://' + host # Add schema\n host = strip_end(host, '/')\n host = strip_end(host, '/api/v1')\n host = strip_end(host, '/')\n return host", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def sanitize_url(url: str, protocol: str = 'https://') -> str:\n sanitized = url[0:-1] if url[-1] == '/' else url\n with_protocol = sanitized if sanitized.startswith('http') else f'{protocol}{sanitized}'\n return with_protocol", "def clean_tweet(tweet):\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())", "def find_https(x):\n i = 0\n start = None\n end = None\n for i in range(0,len(x)):\n if i < (len(x) - 4):\n string = x[i:i+4]\n if string == \"http\":\n start = i\n if start != None and x[i] == '\"':\n end = i\n return x[start:end]", "def replace_urls_token(text):\n\n text = re.sub(r\"^https?://.*[\\r\\n]*\", \"<url/>\", text, re.M | re.I)\n return re.sub(r\"http\\S+(\\s)*(\\w+\\.\\w+)*\", \"<url/>\", text, re.M | re.I)", "def clean_url(url):\r\n s = url\r\n url = url.encode('utf8')\r\n url = ''.join([urllib.quote(c) if ord(c) >= 127 else c for c in url])\r\n return url", "def format_url(url):\n if not (url.startswith(\"//\") or url.startswith(\"http\")):\n url = \"http://\" + url\n return url", "def _sanitize_url_prefix(url_prefix: Optional[str]) -> str:\n if not url_prefix:\n return ''\n\n while url_prefix.startswith('//'):\n url_prefix = url_prefix[1:]\n while url_prefix.endswith('/'):\n url_prefix = url_prefix[:-1]\n\n if url_prefix == '':\n return ''\n\n if url_prefix.startswith('/') \\\n or url_prefix.startswith('http://') \\\n or url_prefix.startswith('https://'):\n return url_prefix\n\n return '/' + url_prefix", "def clean_link(self, link):\n link = link.strip(\"[]\")\n if \"|\" in link: \n link = link.split(\"|\",1)[0]\n link = link.strip() #remove trailing white space\n return link", "def test_strip_leading_trailing_whitespace():\n assert normalize_url(\" http://example.com \") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/a \") == \"http://example.com/a\"\n assert normalize_url(\" http://example.com/\") == \"http://example.com/\"", "def test_lower_case():\n assert normalize_url(\"HTTP://examPle.cOm/\") == \"http://example.com/\"\n assert normalize_url(\"http://example.com/A\") == \"http://example.com/A\"", "def scrub_url(self, url):\n return self.__url_scrubber(url)", "def sanitize_url(urlstring):\n\n # A blog's url is the best unique identifier for the data store\n # (some Twitter handles have more than one blog), but certain\n # punctuation in a string throws an error in Firebase when\n # you attempt to use that string as a key.\n return annoying_punctuation.sub('', urlstring)", "def pruneURL(url):\n match = URL_GROUPER.match(url)\n if match is None:\n return url\n else:\n url_parts = match.groupdict()\n protocol = url_parts['protocol']\n if protocol is None:\n protocol = ''\n tail = url_parts['tail']\n if tail is None:\n tail = ''\n return \"%s://%s\" % (protocol, tail)", "def fix_url(cls, url: str):\r\n ...", "def normalize_url(self, url):\n pass", "def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)", "def make_clean_url(url):\n return urlparse.urldefrag(url)[0]", "def test_remove_empty_port():\n assert (normalize_url(\"http://www.example.com:/\") ==\n \"http://www.example.com/\")", "def clean_blog_url(raw_url):\n # Example urls that need handling:\n # http://jessicaanner.tumblr.com/post/113520547711/animated-versions-here-main-view-webm-gif\n # http://havesomemoore.tumblr.com/\n # http://pwnypony.com/\n # (?:https?://)([^#/'\"]+)\n stripped_url = raw_url.strip(\"\\r\\n\\t \")\n logging.debug(\"stripped_url: \"+repr(stripped_url))\n blog_url_regex = \"\"\"(?:https?://)?([^#/'\"]+)\"\"\"\n blog_url_search = re.search(blog_url_regex, stripped_url, re.IGNORECASE)\n if blog_url_search:\n blog_url = blog_url_search.group(1)\n return blog_url\n else:\n logging.error(\"Can't parse list item! Skipping it.\")\n logging.error(\"clean_blog_url()\"+\" \"+\"raw_url\"+\": \"+repr(raw_url))\n return \"\"", "def clean_url(url):\n return url[:url.find('?')]", "def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)", "def clean_tweet(tweet):\n word_out, hashtags = [], []\n for word in tweet.split():\n if word[0] == '#':\n hashtags.append(word)\n elif ((len(word) != 0) and (word[0] != '@')) and (\n len(word) < 4 or ((len(word) > - 4) and (word[:4] != 'http'))):\n word_out.append(word)\n return word_out, hashtags", "def clean_url_path(markup):\n\n soup = BeautifulSoup(markup, \"html.parser\")\n elements = soup.find_all('a')\n\n for url in elements:\n url_href = url.get('href')\n if url.string:\n url_string = url.string.replace('\\n', '').replace(' ', '')\n\n # Only clean links where the URL matches the string, without custom text inside.\n if url_string == url_href:\n url_parse = urllib.parse.urlparse(url_href)\n path = '{0}{1}'.format(url_parse.netloc.replace(\"www.\", \"\"), url_parse.path)\n url.string.replace_with(path)\n return soup.prettify(soup.original_encoding)", "def remove_links(str):\n stripped_str = re.sub(\"\\[.*\\]\",\"\", str)\n str_list = filter(None, stripped_str.split(\" \"))\n built_string = \" \".join(str_list)\n return built_string", "def __clean_url(links_titles):\n clean_urls = []\n for url, title, flag in links_titles:\n duplicates_words = []\n unique_words = []\n for word in str(url).rstrip('/').split('/'):\n if word not in unique_words:\n unique_words.append(word)\n else:\n if word not in duplicates_words:\n duplicates_words.append(word)\n url = str(url).replace(word+'/', '', 1)\n clean_urls.append((url, title, flag))\n return clean_urls", "def normalize_for_url(text: str) -> str:\n\n # German is our main language, so we are extra considerate about it\n # (unidecode turns ü into u)\n text = text.replace(\"ü\", \"ue\")\n text = text.replace(\"ä\", \"ae\")\n text = text.replace(\"ö\", \"oe\")\n clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())\n clean = _double_dash.sub('-', clean)\n clean = clean.strip('-')\n\n return clean", "def transform_url_without_encode(result):\n import re\n result = re.sub('//', '/', result)\n result = re.sub('/', '//', result, count=1)\n return result", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def preprocess_url(self, request_url: str):\n if re.fullmatch('.*/+', request_url) is None:\n return request_url\n else:\n return re.compile(request_url.rstrip('/') + '/+')", "def clean_text(self, text: str) -> str:\n url_regex = r\"https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,4}\\b([-a-zA-Z0-9@:%_\\+.~#?&//=]*)\"\n\n text = text.strip(\" _\\t\\n\")\n text = text.split(\"____\")[0] # To remove footnotes\n text = text.strip(\" _\\t\\n\")\n text = re.sub(url_regex, \"<url>\", text) # To remove URLs\n text = re.sub(r\"&gt;.*(?!(\\n+))$\", \"\",\n text) # To remove quotes at last.\n text = re.sub(r\"&gt;(.*)\\n\", \"<startq> \\g<1> <endq>\",\n text) # To add start quote, end quote tags\n text = re.sub(r\"\\n\", \" \", text)\n text = text.rstrip(\" _\\n\\t\")\n text = re.sub(r\"\\n\", \" \", text)\n text = re.sub(r\"\\r\", \" \", text)\n text = text.lower()\n if self.mask_dms:\n text = self.mask_disc_markers(text)\n return text", "def strip_mxp(self, string):\n string = self.mxp_sub.sub(r\"\\2\", string)\n string = self.mxp_url_sub.sub(r\"\\1\", string) # replace with url verbatim\n return string", "def testLeadingSpaces(self):\n self.assertEqual([\"http://tomtom.foobar.org/\"], grab(' http://tomtom.foobar.org/', self.needScheme))\n self.assertEqual([\"http://www.foobi.org/saatoimia\"], grab(' http://www.foobi.org/saatoimia', self.needScheme))", "def test_remove_default_port():\n assert (normalize_url(\"http://www.example.com:80/bar.html\") ==\n \"http://www.example.com/bar.html\")\n assert (normalize_url(\"HTTPS://example.com:443/abc/\") ==\n \"https://example.com/abc\")", "def test_unreserved_percentencoding():\n assert (normalize_url(\"http://www.example.com/%7Eusername/\") ==\n \"http://www.example.com/~username\")\n assert (normalize_url('http://example.com/foo%23bar') ==\n 'http://example.com/foo%23bar')\n assert (normalize_url('http://example.com/foo%2fbar') ==\n 'http://example.com/foo%2Fbar')\n assert (normalize_url('http://example.com/foo%3fbar') ==\n 'http://example.com/foo%3Fbar')", "def url_prepare(url):\n if 'http://' in url or 'https://' in url:\n return url\n try:\n if requests.get('https://' + url):\n return 'https://' + url\n except Exception as ex:\n pprint(ex)\n return 'http://' + url", "def shorten_in_text(text):\n replacements = {} #URL -> is.gd URL\n #Only check for urls that start with \"http://\" for now\n for m in re.finditer(\"http://[^ \\n\\r]*\", text):\n try:\n replacements[m.group()] = shorten_with_is_gd(m.group())\n except:\n replacements[m.group()] = m.group()\n for url,replacement in replacements.items():\n text = text.replace(url, replacement)\n return text" ]
[ "0.78006214", "0.7698079", "0.7578181", "0.74963003", "0.7458265", "0.74112725", "0.73307616", "0.72641194", "0.72031736", "0.7188069", "0.71281874", "0.70712876", "0.7058085", "0.7049562", "0.7030817", "0.69787", "0.6963308", "0.69407505", "0.6912276", "0.68773633", "0.6809274", "0.67975545", "0.67813057", "0.67711765", "0.6759833", "0.6676425", "0.66684043", "0.6622973", "0.659795", "0.65748554", "0.65704674", "0.6510631", "0.6421727", "0.64042056", "0.63885087", "0.6387464", "0.6382279", "0.6350454", "0.63487786", "0.6346099", "0.632983", "0.6329648", "0.6328466", "0.6307168", "0.6304052", "0.6278636", "0.62671137", "0.6266537", "0.62399745", "0.6236387", "0.6229533", "0.6221153", "0.6197753", "0.61906135", "0.6171886", "0.61651903", "0.614838", "0.61178267", "0.61162513", "0.609842", "0.60726994", "0.6041662", "0.60364395", "0.6026255", "0.60121095", "0.6000427", "0.5996423", "0.59898895", "0.59513026", "0.5948021", "0.59466714", "0.59408754", "0.5936783", "0.5902852", "0.58978355", "0.5859634", "0.58551013", "0.584862", "0.5828845", "0.5797125", "0.57920194", "0.57866055", "0.57621986", "0.57610387", "0.5730565", "0.5715456", "0.57141876", "0.57141876", "0.57141876", "0.57141876", "0.57141876", "0.57141876", "0.57141876", "0.57088435", "0.5705577", "0.5705394", "0.57036996", "0.5700321", "0.5699824", "0.5691158" ]
0.780052
1
Removes html tags and other related elements
def _remove_html_tags(self, text: str) -> str: pattern = r""" (?x) # Turn on free-spacing <[^>]+> # Remove <html> tags | &([a-z0-9]+|\#[0-9]{1,6}|\#x[0-9a-f]{1,6}); # Remove &nbsp; """ return re.sub(pattern, " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_html_tags_fun(self):\n cleaner = re.compile('<.*?>')\n cleaned_text = re.sub(cleaner, '', self.doc)\n cleaned_text = re.sub('[\\n\\t]', '', cleaned_text)\n self.doc = cleaned_text", "def remove_html_tags(self,text):\n #https://medium.com/@jorlugaqui/how-to-strip-html-tags-from-a-string-in-python-7cb81a2bbf44\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_Tags(self,text):\n cleaned_text = re.sub('<[^<]+?>', '', text)", "def remove_html_tags(text):\n print('VOU REMOVER AS TAGS DA STRING')\n clean = re.compile('<.*?>')\n print('',re.sub(clean, '', text))\n return re.sub(clean, '', text)", "def clean(self):\n # Calls handle_starttag, handle_endtag, and handle_data\n self.feed()\n\n # Clean up any parent tags left open\n if self.current_parent_element['tag'] != '':\n self.cleaned_html += '</{}>'.format(self.current_parent_element['tag'])\n\n # Remove empty <p> added after lists\n self.cleaned_html = re.sub(r'(</[u|o]l>)<p></p>', r'\\g<1>', self.cleaned_html)\n\n self._remove_pre_formatting()\n\n return self.cleaned_html", "def remove_html(txt):\r\n TAG_RE = re.compile(r'<[^>]+>')\r\n return TAG_RE.sub(\"\", txt).strip()", "def remove_html_tags(text):\r\n clean = re.compile('<.*?>')\r\n return re.sub(clean, '', text)", "def remove_html(text):\n return re.sub(r'<.*?>', r'', text)", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def clean_unneeded_html_tags(html):\n log.debug(\"Removing unnecessary optional HTML tags.\")\n for tag_to_remove in (\"\"\"</area> </base> <body> </body> </br> </col>\n </colgroup> </dd> </dt> <head> </head> </hr> <html> </html> </img>\n </input> </li> </link> </meta> </option> </param> \n </td> </tfoot> </th> </thead> </tr> </basefont> </isindex> </param>\n \"\"\".split()):\n html = html.replace(tag_to_remove, '')\n return html # May look silly but Emmet does this and is wrong.", "def remove_html_tags(text):\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_html_tags(data):\n p = re.compile(r'<.*?>')\n return p.sub('', data)", "def strip_tags(text):\n # Remove header tags\n p = re.compile(\"<\\?.+?\\?>\") \n text = re.sub(p, \"\", text)\n\n # Remove <HOO>, <p> and <s> tags\n text = text.replace(\"<p>\",\"\")\n text = text.replace(\"</p>\",\"\")\n text = text.replace(\"<s>\",\"\")\n text = text.replace(\"</s>\",\"\")\n text = text.replace(\"<HOO>\",\"\")\n text = text.replace(\"</HOO>\",\"\")\n\n return text", "def clean_html(text):\n cleanr = re.compile(\"<.*?>\")\n clean_text = re.sub(cleanr, \"\", text)\n return clean_text", "def remove_html_tags(text: str) -> str:\n return re.sub('<.*?>', '', text).strip()", "def strip_html_tags(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"<.*?>\")\n return re.sub(regex, \"\", text)", "def RemoveHTMLTags(self, data):\n return self.UnescapeHTMLEntities(lxml.html.fromstring(data).text_content())", "def remove_html_tags(text):\n tag_pattern = re.compile(r'<[^>]+>')\n return tag_pattern.sub('', text)", "def strip_html(unclean, tags=[]):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=tags, attributes=[], styles=[])", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>|\\\\n')\n return re.sub(clean, '', text)", "def remove_html_tags(self, text, tags):\n\t\tcheck_if_any_type(text, [str, str])\n\n\t\tfor tag in tags:\n\t\t\tcheck_if_any_type(tag, [str, str])\n\t\t\ttext = re.compile('<\\/?%s\\/?>' % tag, re.U).sub('', text)\n\t\treturn text", "def remove_html_tags(text: str) -> str:\n clean = re.compile('<.*?>')\n return re.sub(clean, '', str(text))", "def remove_all_tags(html):\n # remove all <...>\n reobj = re.compile(r\"<[^>]*>\", re.IGNORECASE|re.DOTALL)\n return reobj.sub(\" \", html)", "def remove_html( html):\n return html2txt(html)", "def strip_html_tags(text):\r\n soup = BeautifulSoup(text, 'lxml')\r\n stripped_text = soup.get_text(separator=\" \")\r\n return stripped_text", "def remove_html_tags(text):\n clean = re.compile('<.*?>|&ndash; ')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n clean = re.compile('<.*?>|&ndash; ')\n return re.sub(clean, '', text)", "def strip_html(unclean):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])", "def strip_tags_from_html(html):\n\n tag_re = re_compile(r'(<!--.*?-->|<[^>]*>)')\n return tag_re.sub('', html)", "def strip_html_tags(text):\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text(separator=\" \")\n return stripped_text", "def strip_html_tags(text):\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text(separator=\" \")\n return stripped_text", "def clean_html(input):\n p = HTMLParser(tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))", "def stripHTMLTags (html):\r\n import re\r\n text = html\r\n \r\n # apply rules in given order!\r\n rules = [\r\n { r'>\\s+' : u'>'}, # remove spaces after a tag opens or closes\r\n { r'\\s+' : u' '}, # replace consecutive spaces\r\n { r'\\s*<br\\s*/?>\\s*' : u'\\n'}, # newline after a <br>\r\n #{ r'</(div)\\s*>\\s*' : u'\\n'}, # newline after </p> and </div> and <h1/>...\r\n #{ r'</(p|h\\d)\\s*>\\s*' : u'\\n\\n'}, # newline after </p> and </div> and <h1/>...\r\n { r'<head>.*<\\s*(/head|body)[^>]*>' : u'' }, # remove <head> to </head>\r\n { r'<a\\s+href=\"([^\"]+)\"[^>]*>.*</a>' : u'' }, # show links instead of texts\r\n { r'[ \\t]*<[^<]*?/?>' : u'' }, # remove remaining tags\r\n { r'^\\s+' : u'' } # remove spaces at the beginning\r\n ]\r\n \r\n for rule in rules:\r\n for (k,v) in rule.items():\r\n regex = re.compile (k)\r\n text = regex.sub (v, text)\r\n \r\n # replace special strings\r\n special = {\r\n '&nbsp;' : ' ', '&amp;' : '&', '&quot;' : '\"',\r\n '&lt;' : '<', '&gt;' : '>'\r\n }\r\n \r\n for (k,v) in special.items():\r\n text = text.replace (k, v)\r\n \r\n return text", "def stripHTMLTags (html):\n text = html\n \n # apply rules in given order!\n rules = [\n { r'>\\s+' : u'>'}, # remove spaces after a tag opens or closes\n { r'\\s+' : u' '}, # replace consecutive spaces\n { r'\\s*<br\\s*/?>\\s*' : u'\\n'}, # newline after a <br>\n { r'</(div)\\s*>\\s*' : u'\\n'}, # newline after </p> and </div> and <h1/>...\n { r'</(p|h\\d)\\s*>\\s*' : u'\\n\\n'}, # newline after </p> and </div> and <h1/>...\n { r'<head>.*<\\s*(/head|body)[^>]*>' : u'' }, # remove <head> to </head>\n { r'<a\\s+href=\"([^\"]+)\"[^>]*>.*</a>' : r'\\1' }, # show links instead of texts\n { r'[ \\t]*<[^<]*?/?>' : u'' }, # remove remaining tags\n { r'^\\s+' : u'' } # remove spaces at the beginning\n ]\n \n for rule in rules:\n for (k,v) in rule.items():\n regex = re.compile (k)\n text = regex.sub (v, text)\n \n # replace special strings\n special = {\n '&nbsp;' : ' ', '&amp;' : '&', '&quot;' : '\"',\n '&lt;' : '<', '&gt;' : '>'\n }\n \n for (k,v) in special.items():\n text = text.replace (k, v)\n\n filtered = filter(lambda x: not re.match(r'^\\s*$', x), text) \n finaltext = re.sub(u'分享:','', filtered)\n return finaltext", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text).rstrip('...')", "def remove_tags(text):\n\n global cleanr\n global cleann\n global cleans\n try:\n text = BeautifulSoup(text)\n for table in text.findAll(\"table\"):\n table.extract()\n text = text.text\n text = re.sub(cleanr, '', text)\n text = re.sub(cleann, '', text)\n text = re.sub(cleans, ' ', text)\n\n except Exception as e:\n pass\n\n return text", "def remove_html(self):\n text_func = self._remove_html\n args = ()\n kwargs = {}\n DataSet._apply_to_texts(text_func, self._meta, args, kwargs)\n return None", "def clean_html(soup):\n html = str(soup.findAll('p', text=True)).strip()\n tags = re.compile('<.*?>')\n clean_2 = re.sub(tags, '', html)\n line_removed = clean_2.replace('\\n', ' ').replace('\\r', '').replace('’', ' ')\n return re.sub(r\"[-()\\\"#”/@“—;:<>{}'`+=~|!?,]\", \"\", line_removed).strip()", "def remove_tags(raw):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, ' ', raw)\n return cleantext", "def remove_tags(text):\n # Remove HTML tags\n soup = BeautifulSoup(text, \"html.parser\")\n [s.extract() for s in soup(['iframe', 'script'])]\n stripped_text = soup.get_text()\n stripped_text = re.sub(r'[\\r|\\n|\\r\\n]+', '\\n', stripped_text)\n \n \n text = unicodedata.normalize('NFKD', stripped_text).encode('ascii', 'ignore').decode('utf-8', 'ignore') # Remove Accented characters\n text = re.sub(r'[^\\x00-\\x7F]+','', text) # Remove Non-Ascii characters\n text = re.sub(\"[a-z0-9\\.\\-+_]+@[a-z0-9\\.\\-+_]+\\.[a-z]+\", '', text) # Remove Emails\n text = re.sub(r\"http\\S+\", \"\", text) # Remove URLs\n return text", "def removeHtmlTags(self, text):\n sb = []\n text = self.removeHtmlComments(text)\n bits = text.split(u'<')\n sb.append(bits.pop(0))\n tagstack = []\n tablestack = tagstack\n for x in bits:\n m = _tagPattern.match(x)\n if not m:\n continue\n slash, t, params, brace, rest = m.groups()\n t = t.lower()\n badtag = False\n if t in _htmlelements:\n # Check our stack\n if slash:\n # Closing a tag...\n if t in _htmlsingleonly or len(tagstack) == 0:\n badtag = True\n else:\n ot = tagstack.pop()\n if ot != t:\n if ot in _htmlsingleallowed:\n # Pop all elements with an optional close tag\n # and see if we find a match below them\n optstack = []\n optstack.append(ot)\n while True:\n if len(tagstack) == 0:\n break\n ot = tagstack.pop()\n if ot == t or ot not in _htmlsingleallowed:\n break\n optstack.append(ot)\n if t != ot:\n # No match. Push the optinal elements back again\n badtag = True\n tagstack += reversed(optstack)\n else:\n tagstack.append(ot)\n # <li> can be nested in <ul> or <ol>, skip those cases:\n if ot not in _htmllist and t in _listtags:\n badtag = True\n elif t == u'table':\n if len(tablestack) == 0:\n bagtag = True\n else:\n tagstack = tablestack.pop()\n newparams = u''\n else:\n # Keep track for later\n if t in _tabletags and u'table' not in tagstack:\n badtag = True\n elif t in tagstack and t not in _htmlnest:\n badtag = True\n # Is it a self-closed htmlpair? (bug 5487)\n elif brace == u'/>' and t in _htmlpairs:\n badTag = True\n elif t in _htmlsingleonly:\n # Hack to force empty tag for uncloseable elements\n brace = u'/>'\n elif t in _htmlsingle:\n # Hack to not close $htmlsingle tags\n brace = None\n else:\n if t == u'table':\n tablestack.append(tagstack)\n tagstack = []\n tagstack.append(t)\n newparams = self.fixTagAttributes(params, t)\n if not badtag:\n rest = rest.replace(u'>', u'&gt;')\n if brace == u'/>':\n close = u' /'\n else:\n close = u''\n sb.append(u'<')\n sb.append(slash)\n sb.append(t)\n sb.append(newparams)\n sb.append(close)\n sb.append(u'>')\n sb.append(rest)\n continue\n sb.append(u'&lt;')\n sb.append(x.replace(u'>', u'&gt;'))\n\n # Close off any remaining tags\n while tagstack:\n t = tagstack.pop()\n sb.append(u'</')\n sb.append(t)\n sb.append(u'>\\n')\n if t == u'table':\n if not tablestack:\n break\n tagstack = tablestack.pop()\n\n return u''.join(sb)", "def remove_html(x: str) -> str:\n regex = r\"<.+?>\"\n return re.sub(regex, \"\", x)", "def remove_html_tags(html_text: str) -> str:\n document = fromstring(html_text)\n text = document.text_content()\n return text.strip()", "def stripHtml(html):\n\t# kinda works\n\tres = html.replace(\"&lt;\", \"<\")\n\tres = res.replace(\"&gt;\", \">\")\n\tres = re.sub(r'<[^>]+>', '', res)\n\treturn res", "def _remove_tags(self, text):\n try:\n result = \"\".join(xml.etree.ElementTree.fromstring(text).itertext()).replace(\n \"\\n\\n\", \"\\n\"\n )\n except: # pylint: disable=bare-except\n result = text\n return result", "def clear_text(body):\n soup = BeautifulSoup(body, features=\"html.parser\")\n for a in soup.findAll('a'):\n # print(a)\n # del a['href']\n a.replaceWithChildren()\n\n # for code in soup.findAll('code'):\n # # print(a)\n # # del a['href']\n # print(\"888888888888888888\")\n # print(code)\n # print(\"888888888888888888\")\n # #code.replaceWithChildren()\n #\n # del code\n\n return str(soup)", "def strip_html(inputString):\r\n return BeautifulSoup(inputString, \"html.parser\").text", "def RemoveHTMLTags(data):\n\n p = re.compile(r'<[^<]*?>')\n return p.sub('', data)", "def remove_tags(text):\n tree = html.fromstring(text)\n return tree.xpath(\"//text()\")", "def strip_tags(self, html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()", "def remove_html_widgets(self):\n ### Currently unsure how to support html-widgets so as a simple solution\n ### I am just removing them\n for i in self.book.xpath(\"//div[contains(@class, 'html-widget')]\"):\n print(\"Unsupported widget removed\")\n i.clear()", "def strip_html(text):\n soup = BeautifulSoup(text, \"html.parser\")\n return soup.get_text()", "def strip_tags(html):\n if html is None:\n html = ''\n s = MLStripper()\n s.feed(html)\n return s.get_data()", "def remove_unwanted_tags(soup: bs4.BeautifulSoup):\n for tag in soup.find_all(['script', 'style']):\n tag.decompose()", "def strip_tags(src):\n res = ''.join(BeautifulSoup(src).findAll(text=True))\n res = re.sub(r\"\\s+\", \" \", res).strip()\n return res", "def remove_html_tags(text):\n if type(text) is pd.core.series.Series or type(text) is str:\n text = text.replace(\"'\", \" \").replace('\"', \" \")\n clean = re.compile('<.*?>')\n return re.sub(clean, ' ', text)\n return text", "def cleaned_html(self):\n cleaner = Cleaner()\n cleaner.scripts = True\n cleaner.javascript = True\n cleaner.comments = True\n cleaner.style = True\n self.dom = cleaner.clean_html(self.dom)\n assert self.dom, 'The html needs to be parsed to get the cleaned html'\n return lxml.html.tostring(self.dom)", "def strip_html(text: str, **serializer_kwargs: bool):\n cleaner = get_cleaner(**serializer_kwargs)\n text = cleaner.clean(text)\n return text", "def cleanup(self):\n for element in self.root.iter():\n element.tag = element.tag.partition('}')[-1]", "def removeMarkup(self, text):\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)", "def clean_html(self):\n self.cleaned_html = self.html.strip()\n for begin_splitter in self.begin_splitters:\n self.cleaned_html = self.cleaned_html.split(begin_splitter)[-1]\n for end_splitter in self.end_splitters:\n self.cleaned_html = self.cleaned_html.split(end_splitter)[0]\n self.cleaned_html = self.cleaned_html.strip()\n return self.cleaned_html", "def clean_html(html):\n html = re.sub(r\"(?s)<!--(.*?)-->[\\n]?\", \"\\\\1\", html)\n html = re.sub(r\"<!--\", \"\", html)\n if html == '':\n return ''\n s = MLStripper()\n s.feed(html)\n return s.get_data().strip()", "def replace_with_text(self):\r\n self.parser.stripTags(self.get_top_node(), 'b', 'strong', 'i', 'br', 'sup')", "def striphtml(content):\n\tif not isinstance(content, basestring):\n\t\treturn u''\n\tcontent = re_script.sub(u'',content)\n\tdoc = html.fragment_fromstring(content, create_parent=True)\n\tclean.clean_html(doc)\n\treturn unicode(re_nl.sub(u'', doc.text_content()))", "def remove_all_empty_tags(soup):\n return remove_empty_tags(soup, soup.name, recursive=True)", "def strip_logfile_html(text):\n out_text = \"\"\n buff = \"\"\n start_tag = \"\"\n end_tag = \"\"\n context = \"none\"\n for i in range(len(text)):\n c = text[i]\n # print \"c = \"+str(c)+\" context = \"+str(context)\n if c == \"<\":\n if context == \"none\":\n # Possible start of a tag, depending on\n # next character\n context = \"putative_tag\"\n buff = c\n else:\n # Everything up to this needs to\n # be dumped directly to output\n out_text = out_text + escape_xml_characters(buff)\n elif context == \"putative_tag\":\n buff = buff + c\n if c.isalpha():\n context = \"start_tag\"\n elif c == \"/\":\n context = \"end_tag\"\n elif c == \"!\":\n context = \"comment_tag\"\n else:\n # Not a tag so dump it\n context = \"none\"\n out_text = out_text + escape_xml_characters(buff)\n elif context == \"start_tag\" or context == \"end_tag\" or context == \"comment_tag\":\n buff = buff + c\n if c == \">\":\n if context == \"start_tag\":\n # End of a start tag\n # Process it and see if we can\n # salvage something\n salvage_text = salvage_tag_data(buff)\n if salvage_text != \"\":\n out_text = out_text + escape_xml_characters(salvage_text)\n # Reset the buffer\n context = \"none\"\n buff = \"\"\n elif context == \"end_tag\":\n # End of an end tag\n # Throw this away (for now)\n context = \"none\"\n buff = \"\"\n elif context == \"comment_tag\":\n # End of a comment\n # Throw this away (for now)\n context = \"none\"\n buff = \"\"\n else:\n # Nothing special about this\n # Add to the output\n out_text = out_text + escape_xml_characters(c)\n # Finished - append the remaining buffer\n out_text = out_text + escape_xml_characters(buff)\n return remove_blank_lines(out_text)", "def scrubHTML( html ):\n parser = StrippingParser()\n parser.feed( html )\n parser.close()\n return parser.result", "def clean_xml_tags(text):\n tag_re = re.compile(r'<[^>]+>')\n text = tag_re.sub('', text)\n return text", "def strip_tags(text, valid_tags={}):\n try:\n text = HTMLParser().unescape(text)\n soup = BeautifulSoup(text)\n for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):\n comment.extract()\n for tag in soup.findAll(True):\n if tag.name in valid_tags:\n valid_attrs = valid_tags[tag.name]\n # tag.attrs = [(attr, val.replace('javascript:', ''))\n # for attr, val in tag.attrs if attr in valid_attrs]\n\n attrs = {}\n for attr, val in tag.attrs.items():\n if attr in valid_attrs:\n attrs[attr] = val.replace('javascript:', '')\n\n tag.attrs = attrs\n\n else:\n tag.hidden = True\n return soup.renderContents().decode('utf8')\n except Exception as ex:\n return str(ex)", "def remove_tags_and_contents(html, *tags):\n # remove *tags and the stuff between them. tag is \"table\"\n for tag in tags:\n start, end = _make_tag_patterns(tag)\n reobj = re.compile(r\"%s.*?%s\" % (start, end), re.IGNORECASE|re.DOTALL)\n html = reobj.sub(\" \", html)\n return html", "def strip_markup(text):\n html_tag_regex = re.compile(\n r'<'\n r'[(--)\\?\\!\\%\\/]?'\n r'[a-zA-Z0-9#\\\"\\=\\s\\.\\;\\:\\%\\&?!,\\+\\*\\-_\\/]+'\n r'\\/?>',\n re.MULTILINE | re.UNICODE\n )\n if text:\n text = re.sub(html_tag_regex, ' ', text)\n return text", "def strip_html(html_str):\n return bleach.clean(html_str, tags=[], attributes={},\n styles=[], strip=True)", "def RemoveHTMLTags(text, \\\n separator=''):\n clean = re.compile('<.*?>')\n return re.sub(clean, separator, text)", "def clean_html(html_file):\n clean_file = (\n html_file.replace(\"<section><section></section></section>\", \"\")\n .replace(\"</image><image></section>\", \"\")\n )\n return clean_file", "def removeTags(self, words):\n\t\treturn re.sub(r'<.*?>', '', words)", "def remove_empty_tags(soup, tag_name, recursive=False):\n new_soup = BeautifulSoup(str(soup))\n for tag in new_soup.findAll(tag_name, recursive=recursive):\n if not tag.text:\n tag.extract()\n return new_soup", "def strip_tags(self, *a, **kw):\n return strip_tags(self._widget_cget(text_s), *a, **kw)", "def _strip_tags(value):\r\n return re.sub(r'<[^>]*?>', ' ', force_unicode(value))", "def _strip_excerpt(self, raw_html):\n clean_regex = re.compile(\"<.*?>\")\n clean_text = re.sub(clean_regex, \"\", raw_html)\n return html.unescape(clean_text).replace(\"\\n\", \"\")", "def bs_preprocess(html):\n pat = re.compile('(^[\\s]+)|([\\s]+$)', re.MULTILINE)\n html = re.sub(pat, '', html) # remove leading and trailing whitespaces\n html = re.sub('\\n', ' ', html) # convert newlines to spaces\n # this preserves newline delimiters\n html = re.sub('[\\s]+<', '<', html) # remove whitespaces before opening tags\n html = re.sub('>[\\s]+', '>', html) # remove whitespaces after closing tags\n return html", "def remove_html_tags(text):\r\n global count\r\n clean = re.compile('<.*?>')\r\n var = re.sub(clean, '', str(item))\r\n # FOR ADDING NUMBERS ON SAME LINE\r\n if count < 2:\r\n print(var,end=' ')\r\n count+=1\r\n if count > 1:\r\n count = 0\r\n print(\"\\n\")\r\n if var.isnumeric()==False:\r\n college_names.append(var)", "def strip_tags(value):\n if value:\n return re.sub(r'<[^>]*?>', '', value)\n return \"\"", "def strip_tags(value):\n if value:\n return re.sub(r'<[^>]*?>', '', value)\n return \"\"", "def strip_tags(value):\n return re.sub(r'<[^>]*?>', '', value)", "def sanitize_html(input):\n p = HTMLParser(tokenizer=HTMLSanitizer, tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))", "def test_drop_html():\n cleaner = TextCleaner()\n assert cleaner.transform([[\"<table>test</table>\"]])[\"corpus\"][0] == \"test\"\n assert not cleaner.drops[\"html\"].dropna().empty", "def clean_html(message):\n all_lines = []\n started_html = False\n finished_with_html_tag = False\n html_part = []\n for idx, line in enumerate(message.split(\"\\n\")):\n if re.search(r\"<.*?html.*?>\", line):\n started_html = True\n html_part.append(line)\n else:\n if started_html:\n html_part.append(line)\n else:\n all_lines.append(line)\n if \"</html>\" in line:\n finished_with_html_tag = True\n if finished_with_html_tag:\n all_lines.append(clean_text_from_html_tags(\"\\n\".join(html_part)))\n html_part = []\n finished_with_html_tag = False\n started_html = False\n if len(html_part) > 0:\n all_lines.extend(html_part)\n return delete_empty_lines(\"\\n\".join(all_lines))", "def remove_tags(self, rules):\n for rule in rules:\n [s.extract() for s in self.soup.find_all(**rule)]", "def clean_tag(data):\n # TODO: make this a method of Tag?\n return escape_html(data).replace('\"', '&quot;').replace(\"'\", '&#39')", "def clean_text_from_html_tags(message):\n regex_style_tag = re.compile('<style.*?>[\\\\s\\\\S]*?</style>')\n message = re.sub(regex_style_tag, \" \", message)\n regex_script_tag = re.compile('<script.*?>[\\\\s\\\\S]*?</script>')\n message = re.sub(regex_script_tag, \" \", message)\n regex_html_tags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n message = re.sub(regex_html_tags, \" \", message)\n return message", "def remove_tag(self, rules):\n for rule in rules:\n [s.extract() for s in self.soup.find_all(limit=1, **rule)]", "def _remove_tags(xml):\n chars = list(xml)\n\n i = 0\n while i < len(chars):\n if chars[i] == '<':\n while chars[i] != '>':\n chars.pop(i) # pop everything between brackets\n chars.pop(i) # pops the right-angle bracket, too\n else:\n i += 1\n\n return ''.join(chars)", "def _npgStripExtra(self, htmlStr):\n lines = htmlStr.splitlines()\n start, end = (0, 0)\n for i, line in enumerate(lines):\n if '<article>' in line and start != 0:\n start = i\n if '</article>' in line and end != 0:\n end = i\n\n if start != 0 and end != 0 and end > start and end - start > 10 and end < len(lines):\n logging.log(5, 'stripping some extra html')\n return ''.join(lines[start:end + 1])\n else:\n return htmlStr", "def clean_content(self) -> str:", "def remove_html_tags_from_text(html_data, add_detectors=True, attached_tags=list, site_tags=list,\n exclude_site_tags=False, exclude_assignment=False):\n try:\n html_data = html.unescape(html_data)\n if add_detectors:\n html_data = __set_has_codeblock(html_data)\n html_data = __set_has_link(html_data)\n if html_data is None:\n return None\n stripper = HTMLStripper()\n stripper.feed(html_data)\n stripped_html = stripper.get_data()\n # remove newlines from string (since all posts starts/ends with <p>)\n stripped_html = ' '.join(stripped_html.split())\n if add_detectors:\n stripped_html = __set_has_hexadecimal(stripped_html)\n stripped_html = __set_has_numeric(stripped_html)\n # due to external tags also overwriting others, this has been omitted\n stripped_html = __set_has_tag(stripped_html, attached_tags, site_tags, exclude_site_tags)\n homework_list = constants.HOMEWORK_SYNONMS_LIST\n homework_list.sort(key=len, reverse=True)\n replacement_text = constants.QUESTION_HAS_HOMEWORK_KEY\n stripped_html = __set_has_homework_or_assignment(stripped_html, replacement_text, homework_list)\n if not exclude_assignment:\n assignment_list = constants.ASSIGNMENT_LIST\n replacement_text = constants.QUESTION_HAS_ASSIGNMENT_KEY\n stripped_html = __set_has_homework_or_assignment(stripped_html, replacement_text, assignment_list)\n return stripped_html\n except TypeError as error:\n # print html_data\n print(\"Error occurred in text_processor.remove_html_tags_from_text\", error)\n return None", "def remove_special_tags(text):\n clean = re.compile('{.*?}')\n return re.sub(clean, '', text)", "def normalize_html(html):\n # Replace many whitespace characters with a single space in some elements\n # kind of like a browser does.\n soup = BeautifulSoup(html, 'lxml')\n for e in soup.select(':not(script,pre,code,style)'):\n for part in e:\n if isinstance(part, NavigableString):\n crunched = NavigableString(re.sub(r'\\s+', ' ', part))\n if crunched != part:\n part.replace_with(crunched)\n # Asciidoctor adds a \"content\" wrapper. It doesn't really change the layout\n # so we're ok with it.\n for e in soup.select('#content'):\n e.unwrap()\n # Docbook adds <span class=\"emphasis\"> around <em> tags. We don't need them\n # and it isn't worth making Asciidoctor make them.\n for e in soup.select('.emphasis'):\n e.unwrap()\n # Asciidoctor adds a \"ulist\" class to all unordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.itemizedlist.ulist'):\n e['class'].remove('ulist')\n # Docbook adds type=\"disc\" to ul which is the default and isn't needed.\n for e in soup.select('ul'):\n if 'type' in e.attrs and e['type'] == 'disc':\n del e['type']\n # Asciidoctor adds a \"olist\" class to all ordered lists which doesn't\n # hurt anything so we can ignore it.\n for e in soup.select('.orderedlist.olist'):\n e['class'].remove('olist')\n # Docbook adds type=\"1\" to ol which is the default and isn't needed.\n for e in soup.select('ol'):\n if 'type' in e.attrs and e['type'] == '1':\n del e['type']\n # Docbook emits images with the 'inlinemediaobject' class and Asciidoctor\n # has the 'image' class. We've updated our styles to make both work.\n for e in soup.select('.inlinemediaobject'):\n e['class'].remove('inlinemediaobject')\n e['class'].append('image')\n # Docbook links with `<a class=\"link\"` when linking from one page of a book\n # to another. Asciidoctor emits `<a class=\"link\"`. Both look fine.\n for e in soup.select('a.xref'):\n if '.html#' in e['href']:\n e['class'].remove('xref')\n e['class'].append('link')\n # Format the html with indentation so we can *see* things\n html = soup.prettify()\n # docbook spits out the long-form charset and asciidoctor spits out the\n # short form but they are equivalent\n html = html.replace(\n '<meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"/>',\n '<meta charset=\"utf-8\"/>')\n return html", "def clean_spam(doc):\n for tag in doc.find_all([\"div\",\"ol\", \"dl\", \"ul\", \"table\", \"section\"]):\n if no_block_children(tag) and is_ad_block(tag):\n tag.extract()", "def unhtmlify(html):\n return unescape(re.sub(r'<.*?>', '', html))" ]
[ "0.84983486", "0.80671585", "0.7806236", "0.7542184", "0.7538982", "0.75369763", "0.75203156", "0.74862987", "0.74467105", "0.74467105", "0.7445784", "0.7424373", "0.73804176", "0.7364171", "0.7357503", "0.7353617", "0.7350502", "0.7333802", "0.7327136", "0.73163265", "0.73162127", "0.7308752", "0.72898585", "0.7285142", "0.727812", "0.7271588", "0.72190624", "0.72190624", "0.7195318", "0.71868676", "0.7174927", "0.7174927", "0.71698797", "0.71592957", "0.71573544", "0.7155435", "0.7153108", "0.71376306", "0.7134396", "0.7124157", "0.71208626", "0.71028227", "0.70937985", "0.70904034", "0.7077325", "0.7037875", "0.7037452", "0.70318353", "0.7021814", "0.7013478", "0.7011239", "0.6932151", "0.69204706", "0.6920122", "0.6914998", "0.6897866", "0.6854668", "0.68496287", "0.68411314", "0.68161273", "0.67801785", "0.6758677", "0.6745472", "0.6737187", "0.67301154", "0.67250335", "0.6687672", "0.6684275", "0.6682817", "0.66693527", "0.6667736", "0.66659725", "0.6654932", "0.66098326", "0.660831", "0.6604308", "0.659734", "0.6577418", "0.65432256", "0.64789027", "0.6471035", "0.64621687", "0.64439094", "0.64439094", "0.64349747", "0.640291", "0.6397612", "0.6397265", "0.63883436", "0.6387703", "0.6383526", "0.63735116", "0.6353344", "0.6346715", "0.6332508", "0.6320506", "0.6310105", "0.62877685", "0.62387884", "0.62375754" ]
0.74180526
12
Replaces accents with plain alphabets
def _remove_diacritics(self, text: str) -> str: nfkd_form = unicodedata.normalize("NFKD", text) return "".join([char for char in nfkd_form if not unicodedata.combining(char)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _replace_accented(text: str) -> str:\n return unidecode.unidecode(text)", "def remove_accented_chars(text):\n text = unidecode.unidecode(text)\n return text", "def replace_accented(input_str):\n nkfd_form = unicodedata.normalize('NFKD', input_str)\n return u\"\".join([c for c in nkfd_form if not unicodedata.combining(c)])", "def remove_accented_chars(text):\n text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n return text", "def normalize_alphabet(sentence):\n marks = (\n ('á', 'a'), ('â', 'a'), ('ã', 'a'), ('à', 'a'),\n ('Á', 'A'), ('Â', 'A'), ('Ã', 'A'), ('À', 'A'),\n ('é', 'e'), ('ê', 'e'),\n ('É', 'E'), ('Ê', 'E'),\n ('í', 'i'),\n ('Í', 'I'),\n ('ó', 'o'), ('ô', 'o'), ('õ', 'o'),\n ('Ó', 'O'), ('Ô', 'O'), ('Õ', 'O'),\n ('ú', 'u'),\n ('Ú', 'U'),\n ('ç', 'c'),\n ('Ç', 'C'),\n )\n for mark in marks:\n sentence = re.sub(mark[0], mark[1], sentence)\n sentence = sentence.lower()\n sentence = re.sub(r'[?|\\.|!|:|,|;]', '', sentence)\n sentence = re.sub(r'^\\w+\\t+[^\\w]', '', sentence) # Drop tags (?!?)\n return str(sentence)", "def _removeDiacritics(self, text):\n norm_txt = unicodedata.normalize('NFD', text)\n shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))\n # remove accents and other diacritics, replace spaces with \"_\" because identifiers can't have spaces\n no_spaces = unicodedata.normalize(\n 'NFC', shaved).lower().replace(\" \", \"_\")\n final_text = no_spaces\n # only allow [a-z], [0-9] and _\n p = re.compile('[a-z0-9_]+')\n for i in range(0, len(no_spaces)):\n if not (p.match(no_spaces[i])):\n final_text = final_text[:i] + '_' + final_text[i+1:]\n # i the first char is not a-z then replaceit (all identifiers must start with a letter)\n p2 = re.compile('[a-z]+')\n if not p2.match(final_text[0]):\n final_text = 'a' + final_text[1:]\n return final_text", "def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval", "def remove_accents(raw_text):\n\n raw_text = re.sub(u\"[àáâãäå]\", 'a', raw_text)\n raw_text = re.sub(u\"[èéêë]\", 'e', raw_text)\n raw_text = re.sub(u\"[ìíîï]\", 'i', raw_text)\n raw_text = re.sub(u\"[òóôõö]\", 'o', raw_text)\n raw_text = re.sub(u\"[ùúûü]\", 'u', raw_text)\n raw_text = re.sub(u\"[ýÿ]\", 'y', raw_text)\n raw_text = re.sub(u\"[ß]\", 'ss', raw_text)\n raw_text = re.sub(u\"[ñ]\", 'n', raw_text)\n return raw_text", "def make_alphabetic(text):\n text = re.sub(r'[^A-Za-z\\s]', '', text)\n return text.lower()", "def normalize_arabic_alphabet(self, text):\n text = re.sub(\"[إأآا]\", \"ا\", text)\n text = re.sub(\"ى\", \"ي\", text)\n text = re.sub(\"ؤ\", \"ء\", text)\n text = re.sub(\"ئ\", \"ء\", text)\n text = re.sub(\"ة\", \"ه\", text)\n text = re.sub(\"گ\", \"ك\", text)\n return text", "def _transliterate_text(self, _text):\n return _text.upper()", "def asciify(text: str) -> str:\n return \"\".join(\n filter(\n lambda x: x in list(string.ascii_letters) or x.isspace(), \n unidecode.unidecode(text).lower()\n )\n )", "def caesar_encode(self, text, key):\n result_list = []\n for char in text:\n if char.isalpha():\n if char.islower():\n offset = ASCII_LOWER_OFFSET\n else:\n offset = ASCII_UPPER_OFFSET\n char = chr((ord(char) - offset + key) % ALPHABET_SIZE + offset)\n result_list.append(char)\n return ''.join(result_list)", "def desaccentueMessage(message):\r\n\tm = message.upper().replace(\"É\", \"E\").replace(\"À\", \"A\").replace(\"Æ\", \"AE\").replace(\"Ç\", \"C\").replace(\"È\", \"E\")\r\n\tm = m.replace(\"Œ\", \"OE\").replace(\"Ù\", \"U\").replace(\"Î\", \"I\").replace(\"Ï\", \"I\").replace(\"Ê\", \"E\").replace(\"Ë\", \"E\")\r\n\tm = m.replace(\"Ö\", \"O\").replace(\"Ô\", \"O\").replace(\"Â\", \"A\").replace(\"Ä\", \"A\")\r\n\treturn m", "def remove_accents(input_str):\n\n\tnfkd_form = unicodedata.normalize('NFKD', input_str)\n\treturn u\"\".join([c for c in nfkd_form if not unicodedata.combining(c)])", "def replace_greek_uni(s):\n for greek_uni, greek_spelled_out in greek_alphabet.items():\n s = s.replace(greek_spelled_out, greek_uni)\n return s", "def remove_accents(string):\n return unicodedata.normalize('NFKD', string)", "def normalizeUnicode(text):\n return ''.join(normalizeLetter(c) for c in text)", "def strip_accents(text):\n text = unicodedata.normalize(\"NFD\", text)\n text = text.encode(\"ascii\", \"ignore\")\n text = text.decode(\"utf8\")\n return text", "def normalize_text(self, text):\n text = self.normalize_arabic_alphabet(text)\n text = self.remove_diacritics(text)\n\n return text", "def remove_acc(sentence):\n newsent = []\n for word in sentence:\n if re.search(r'[áéíóúàèìòùäëïöü]', word):\n newsent.append(remove_accents(word))\n else:\n newsent.append(word)\n return newsent", "def get_str_no_accent_up(str_to_format):\n accents = {u'a': [u'à', u'ã', u'á', u'â', u'\\xc2'],\n u'c': [u'ç', u'\\xe7'],\n u'e': [u'é', u'è', u'ê', u'ë', u'É', u'\\xca', u'\\xc8', u'\\xe8', u'\\xe9', u'\\xc9'],\n u'i': [u'î', u'ï', u'\\xcf', u'\\xce'],\n u'o': [u'ô', u'ö'],\n u'u': [u'ù', u'ü', u'û'],\n u' ': [u'\\xb0'] }\n for (char, accented_chars) in accents.iteritems():\n for accented_char in accented_chars:\n str_to_format = str_to_format.replace(accented_char, char) \n # str_to_format.encode('latin-1').replace(accented_char, char)\n return str_to_format.replace(u'&#039;', u' ').strip().upper()", "def remove_diacritic(input):\n return unicodedata.normalize('NFKD', input).encode('ASCII', 'ignore')", "def toChar(s):\n s = s.lower()\n ans = \"\"\n for c in s:\n if c in \"abcdefghijklmnopqrstuvwxyz\":\n ans+=c\n return ans", "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue # pragma: no cover\n output.append(char)\n return \"\".join(output)", "def strip_accents(text):\n text = six.ensure_text(text)\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n return str(text)", "def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def _run_strip_accents(self, text):\n text = unicodedata.normalize('NFD', text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == 'Mn':\n continue\n output.append(char)\n return ''.join(output)", "def lower_without_diacritics(s):\n return filter(lambda u: not combining(u), normalize('NFKD', s)).lower()", "def _run_strip_accents(self, text):\n text = unicodedata.normalize(\"NFD\", text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == \"Mn\":\n continue\n output.append(char)\n return \"\".join(output)", "def normalize_txt(txt):\n return unicodedata.normalize('NFD', txt).encode('ascii', 'ignore').decode('utf-8', 'ignoree').lower()", "def remove_unicode_diac(text):\n # Replace diacritics with nothing\n text = text.replace(u\"\\u064B\", \"\") # fatHatayn\n text = text.replace(u\"\\u064C\", \"\") # Dammatayn\n text = text.replace(u\"\\u064D\", \"\") # kasratayn\n text = text.replace(u\"\\u064E\", \"\") # fatHa\n text = text.replace(u\"\\u064F\", \"\") # Damma\n text = text.replace(u\"\\u0650\", \"\") # kasra\n text = text.replace(u\"\\u0651\", \"\") # shaddah\n text = text.replace(u\"\\u0652\", \"\") # sukuun\n text = text.replace(u\"\\u0670\", \"`\") # dagger 'alif\n return text", "def encode(phrase):\n\n encode_dict = {\n 'e': 'p',\n 'a': 'd',\n 't': 'o',\n 'i': 'u'\n }\n\n new_phrase = \"\"\n\n # iterate through letters and replace with desired letter.\n for letter in phrase:\n if letter in encode_dict:\n letter = encode_dict[letter]\n new_phrase += letter\n\n return new_phrase", "def remove_accents(text):\n return ''.join(c for c in unicodedata.normalize('NFKD', text)\n if unicodedata.category(c) != 'Mn')", "def _normalize_asian(cls, sentence: str) ->str:\n sentence = re.sub('([\\\\u4e00-\\\\u9fff\\\\u3400-\\\\u4dbf])', ' \\\\1 ', sentence)\n sentence = re.sub('([\\\\u31c0-\\\\u31ef\\\\u2e80-\\\\u2eff])', ' \\\\1 ', sentence)\n sentence = re.sub('([\\\\u3300-\\\\u33ff\\\\uf900-\\\\ufaff\\\\ufe30-\\\\ufe4f])', ' \\\\1 ', sentence)\n sentence = re.sub('([\\\\u3200-\\\\u3f22])', ' \\\\1 ', sentence)\n sentence = re.sub('(^|^[\\\\u3040-\\\\u309f])([\\\\u3040-\\\\u309f]+)(?=$|^[\\\\u3040-\\\\u309f])', '\\\\1 \\\\2 ', sentence)\n sentence = re.sub('(^|^[\\\\u30a0-\\\\u30ff])([\\\\u30a0-\\\\u30ff]+)(?=$|^[\\\\u30a0-\\\\u30ff])', '\\\\1 \\\\2 ', sentence)\n sentence = re.sub('(^|^[\\\\u31f0-\\\\u31ff])([\\\\u31f0-\\\\u31ff]+)(?=$|^[\\\\u31f0-\\\\u31ff])', '\\\\1 \\\\2 ', sentence)\n sentence = re.sub(cls._ASIAN_PUNCTUATION, ' \\\\1 ', sentence)\n sentence = re.sub(cls._FULL_WIDTH_PUNCTUATION, ' \\\\1 ', sentence)\n return sentence", "def normalize_alef_ar(s):\n\n return _ALEF_NORMALIZE_AR_RE.sub(u'\\u0627', s)", "def umlautHelper(text):\n text = text.replace(\"&#196;\", \"Ä\")\n text = text.replace(\"&#214;\", \"Ö\")\n text = text.replace(\"&#220;\", \"Ü\")\n text = text.replace(\"&#223;\", \"ß\")\n text = text.replace(\"&#228;\", \"ä\")\n text = text.replace(\"&#252;\", \"ü\")\n text = text.replace(\"&#246;\", \"ö\")\n\n return text", "def latinize_word(word):\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()", "def applyCoder(text, coder):\n res=''\n for ch in text:\n if ch in string.ascii_lowercase:\n res = res + coder[ch]\n elif ch in string.ascii_uppercase:\n res = res + coder[ch]\n else:\n res = res + ch\n return res", "def removeAccent(v):\n\tif v == u'á':\n\t\treturn u'a'\n\telif v == u'é':\n\t\treturn u'é'\n\telif v == u'í':\n\t\treturn u'í'\n\telif v == u'ó':\n\t\treturn u'ó'\n\telif v == u'ú':\n\t\treturn u'ú'\n\telse:\n\t\treturn v", "def clean_text(text):\n new_text = \"\"\n text = text.lower()\n for character in text:\n if character.isalpha():\n new_text = new_text + character\n return new_text", "def strip_accents(text):\n try:\n text = unicode(text, 'utf-8')\n except (TypeError, NameError): # unicode is a default on python 3\n pass\n text = unicodedata.normalize('NFD', text)\n text = text.encode('ascii', 'ignore')\n text = text.decode(\"utf-8\")\n return str(text)", "def encrypt(self, text):\n text = text.upper()\n output = []\n text_list = list(text)\n for letter in text_list:\n output.append(self.atbash_dict.get(letter, letter))\n return ''.join(output)", "def encoding(text: str) -> str:\n text = [text[i:i + 3] for i in range(0, len(text), 3)]\n encoded_text = []\n for letter in text:\n completed = False\n for coding in Encoder.__ALPHABET:\n if coding.encode == letter:\n completed = True\n encoded_text.append(coding.code)\n if completed:\n break\n if not completed:\n encoded_text.append(letter)\n encoded_string = \"\".join(encoded_text)\n return encoded_string.lower()", "def replaceNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else ' ' for i in text])", "def convert_abbrev_in_text(text):\r\n tokens = word_tokenize(text)\r\n tokens = [convert_abbrev(word) for word in tokens]\r\n text = ' '.join(tokens)\r\n return text", "def translate(inp: str) -> str:\n\t# list for encdoe cirylic symbols in latinc.\n\tsymbols = (u\"абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯöÖåÅ\",\n\t\t\tu\"abvgdeejzijklmnoprstufhzcss_y_euaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUAoOaA\")\n\t# generate dict like {\"a\":\"a\",\"б\":\"\",...}\n\ttr = {ord(a):ord(b) for a, b in zip(*symbols)}\n\t# switch all symbols\n\toutput = inp.translate(tr)\n\treturn output", "def replace_greek_latin(s):\n for greek_spelled_out, latin in greek_to_latin.items():\n s = s.replace(greek_spelled_out, latin)\n return s", "def remove_accronymes(txt):\n return re.sub(r'(?<!\\w)([A-Z])\\.', r'\\1', txt)", "def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)", "def __unicode_to_ascii(text):\n line = unicodedata.normalize('NFKD', text)\n return ''.join(c for c in line if not unicodedata.combining(c))", "def alphabet_position(text):\n return ' '.join(str(ord(c) - 96) for c in text.lower() if c.isalpha())\n # return ' '.join(str(string.ascii_lowercase.index(s.lower())+1) for s in text if s.lower() in string.ascii_lowercase)", "def normalize_alef_maksura_ar(s):\n\n return s.replace(u'\\u0649', u'\\u064a')", "def to_ascii(string):\n\tfor key in UNICODE_TO_ASCII:\n\t\tstring = string.replace(key, UNICODE_TO_ASCII[key])\n\n\treturn string", "def replace_greek_spelled_out(s):\n for greek_uni, greek_spelled_out in greek_alphabet.items():\n s = s.replace(greek_uni, greek_spelled_out)\n return s", "def asciify(s):\n # http://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms\n return unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')", "def force_ascii(text):\n return \"\".join([c for c in text if ord(c) < 128])", "def task18_letter_replacement(text):\n if text and isinstance(text, str):\n new_text = []\n for char in text:\n new_char_index = ascii_lowercase.index(char) + 1\n new_char = ascii_lowercase[new_char_index]\n if new_char in 'aeiou':\n new_char = new_char.upper()\n new_text.append(new_char)\n return ''.join(new_text)\n else:\n raise ValueError", "def encrypt(word):\r\n if len(word) == 1:\r\n if word.islower() and word !='z':#only encode lower case letters\r\n return chr(ord(word) + 1)\r\n elif word.isupper and word != 'z':\r\n return word\r\n elif word == 'z': # special case: z\r\n return chr(ord(word) -25)\r\n else:\r\n myChar = word[0] #first get first chararacter in the word\r\n if myChar.islower() and myChar != 'z':\r\n myChar = chr(ord(word[0])+1)\r\n elif myChar == 'z': # special case: z\r\n myChar = chr(ord(word[0])-25)\r\n elif myChar.isupper:\r\n pass \r\n return myChar + encrypt(word[1:])", "def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)", "def lire_texte_brut(nom_de_fichier1,nom_de_fichier2):\n with open(nom_de_fichier1,'r') as f :\n texte = f.read()\n texte = texte.replace('.',' point').replace(',', ' virgule').replace(';', 'point virgule').replace('œ','oe').replace(\"'\",\" \").replace(\"-\",\" \").replace(\"’\",\" \")\n accents = {'a' : ['à','â'],\n 'e' : ['é','è','ê'],\n 'i' : ['î'],\n 'u' : ['ù','û'],\n 'o' : ['ô']}\n for lettre in accents :\n for acc in accents[lettre] :\n texte = texte.replace(acc,lettre)\n with open(nom_de_fichier2,'w') as f :\n f.write(texte.lower())\n return None", "def remove_diacritics(self, text):\n text = re.sub(self._arabic_diacritics, '', text)\n return text", "def to_ascii(word_str: str):\n # grammars/ definitions for the mapping of characters\n non_ascii = 'âàêèëéîïôçûùü'\n ascii_mapping = {'âà': 'a',\n 'êèëé': 'e',\n 'îï': 'i',\n 'ô': 'o',\n 'ç': 'c',\n 'ûùü': 'u'}\n non_ascii_upper = non_ascii.upper()\n ascii_mapping_upper = {k.upper(): ascii_mapping[k].upper() for k in ascii_mapping.keys()}\n # building the ascii string\n ret_str = ''\n for char in word_str:\n # lower case french\n if char in non_ascii:\n k = None\n for k_chars in ascii_mapping.keys():\n if char in k_chars:\n k = k_chars\n break\n if k is not None:\n ret_str += ascii_mapping[k]\n # upper case french\n elif char in non_ascii_upper:\n k = None\n for k_chars in ascii_mapping_upper.keys():\n if char in k_chars:\n k = k_chars\n break\n if k is not None:\n ret_str += ascii_mapping_upper[k]\n # regular ascii\n else:\n ret_str += char\n # ascii encoding of replaces characters\n ascii_str = ret_str.encode('utf-8', 'ignore').decode('utf-8', 'ignore')\n # if '?' in ret_str.encode('ascii', 'replace').decode('ascii', 'ignore'):\n # #print(word_str, ret_str.encode('ascii', 'replace').decode('ascii', 'ignore'), ascii_str)\n # print(word_str, ret_str.encode('ascii', 'replace').decode('ascii', 'ignore'), ascii_str)\n # pass\n # return ret_str.encode('ascii', 'replace').decode('ascii', 'ignore')\n return ascii_str", "def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str", "def clean_str(s):\n s = re.sub(r\"[^\\\\p{L}\\\\s]\", \" \", s) # This removes accents, which we want.\n s = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", s) #This removes accents, which we want.\n s = re.sub(r\"\\'s\", \"\", s)\n s = re.sub(r\"\\'ve\", \"have\", s)\n s = re.sub(r\"n\\'t\", \" not\", s)\n s = re.sub(r\"\\'re\", \" are\", s)\n s = re.sub(r\"\\'d\", \" would\", s)\n s = re.sub(r\"\\'ll\", \" will\", s)\n s = re.sub(r\",\", \"\", s) #s = re.sub(r\",\", \" ,\", s)\n s = re.sub(r\"!\", \"\", s)\n # s = re.sub(r\"\\(\", \"\\(\", s)\n # s = re.sub(r\"\\)\", \"\\) \", s)\n s = re.sub(r\"\\?\", \"\", s)\n s = re.sub(r\"\\s{2,}\", \" \", s)\n s = re.sub(r\" \", \" \", s)\n return s.strip().lower()", "def unicode_to_ascii(s: str) -> str:\n chars = []\n for c in unicodedata.normalize('NFD', s):\n if unicodedata.category(c) != 'Mn' and c in ALLOWED_CHARS:\n chars.append(c)\n return \"\".join(chars)", "def convert_latin_to_english(text):\n try:\n text = text.decode('UTF-8')\n except (UnicodeDecodeError, AttributeError):\n pass\n return \"\".join(char for char in\n unicodedata.normalize('NFKD', text)\n if unicodedata.category(char) != 'Mn')", "def preprocess(text):\n return text.lower()", "def fix_characters(title):\n return re.sub('[^0-9a-zA-Z]+', ' ', title)", "def _alphanum(x):\n return lower(sub(r'\\W+', '', str(x)))", "def md_latex_accents(text):\n\n knowl_content = text\n\n knowl_content = re.sub(r'\\\\\"([a-zA-Z])',r\"&\\1uml;\",knowl_content)\n knowl_content = re.sub(r'\\\\\"{([a-zA-Z])}',r\"&\\1uml;\",knowl_content)\n knowl_content = re.sub(r\"\\\\'([a-zA-Z])\",r\"&\\1acute;\",knowl_content)\n knowl_content = re.sub(r\"\\\\'{([a-zA-Z])}\",r\"&\\1acute;\",knowl_content)\n knowl_content = re.sub(r\"\\\\`([a-zA-Z])\",r\"&\\1grave;\",knowl_content)\n knowl_content = re.sub(r\"\\\\`{([a-zA-Z])}\",r\"&\\1grave;\",knowl_content)\n knowl_content = re.sub(r\"``(?P<a>[\\S\\s]*?)''\", r\"&ldquo;\\1&rdquo;\", knowl_content)\n\n return knowl_content", "def convert_to_alphabet(c, avoid_tab_and_lf=False):\n if c == 1:\n return 32 if avoid_tab_and_lf else 9 # space instead of TAB\n if c == 127 - 30:\n return 92 if avoid_tab_and_lf else 10 # \\ instead of LF\n if 32 <= c + 30 <= 126:\n return c + 30\n else:\n return 0 # unknown", "def replace_characters(content_to_change):\n\tfor old_char, new_char in CHARS_TO_REPLACE.iteritems():\n\t\tcontent_to_change = content_to_change.replace(old_char, new_char)\n\n\treturn unicode(content_to_change, DESTINATION_ENCODING)", "def alpha_chars (text):\n for letter in text:\n if letter.isalpha ():\n yield letter", "def map_caesar(key, plaintext):\n letters = string.ascii_lowercase\n mask = letters[key:] + letters[:key]\n transtab = str.maketrans(letters, mask)\n return plaintext.translate(transtab)", "def modify(str1):\r\n str2 = \"\"\r\n for char in str1.lower():\r\n if char == \"a\":\r\n str2 += \"1\"\r\n elif char == \"e\":\r\n str2 += \"2\"\r\n elif char == \"i\":\r\n str2 += \"3\"\r\n elif char == \"o\":\r\n str2 += \"4\"\r\n elif char == \"u\":\r\n str2 += \"5\"\r\n else:\r\n str2 += char\r\n return str2", "def normalize_acronym(self, acronym: str):\n return self.tknzr.tokenize(acronym, to_lower=False)", "def normalize_latin(raw_word):\n nfkd = unicodedata.normalize('NFKD', raw_word)\n lowercased = nfkd.lower()\n no_digits = DIGITS.sub('', lowercased)\n j_to_i = re.sub('j', 'i', no_digits)\n v_to_u = re.sub('v', 'u', j_to_i)\n return NONWORDS.sub('', v_to_u)", "def _format(string):\n return str(filter(str.isalnum, string)).lower()", "def decode_to_text(c, avoid_tab_and_lf=False):\n return \"\".join(map(lambda a: chr(convert_to_alphabet(a, avoid_tab_and_lf)), c))", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string", "def _transform(self, original, code):\n\n msg = list(original)\n for k in range(len(msg)):\n\n if msg[k].isupper():\n j = ord(msg[k]) - ord(\"A\") # Determining correct index for new character.\n msg[k] = code[j]\n\n return \"\".join(msg)", "def replace_chars(field, esc_chars, rep_ch):\n res_field = \"P\"\n if field is not None:\n res_field = re.sub(esc_chars, rep_ch, field).upper()\n # res_field = \"\".join([rep_ch if ch in esc_chars else ch for ch in field.strip()])\n return res_field", "def clean(name):\n name = remove_extra(name)\n name = unidecode.unidecode(name) # Remove diacritics\n name = \"\".join(\n list(filter(lambda c: c in (string.ascii_letters + string.digits + \" \"), name))\n )\n name = name.lower().strip()\n return name", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def toAscii(s):\n return ''.join(\n char for char in unicodedata.normalize('NFD', s)\n if unicodedata.category(char) != 'Mn'\n and char in letters\n )", "def strip_accents(s):\n \n return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')", "def replace_umlauts(text):\n res = text\n res = res.replace('ä', 'ae').replace('ö', 'oe').replace('ü', 'ue')\n res = res.replace('Ä', 'Ae').replace('Ö', 'Oe').replace('Ü', 'Ue')\n res = res.replace('ß', 'ss')\n return res", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()", "def sanitize(instring):\r\n return instring.encode('ascii','replace')", "def alphametics(s):\n words = re.findall(\"[A-Za-z]+\", s)\n chars = set(\"\".join(words)) # Characters to be substituted.\n assert len(chars) <= 10 # There are only ten possible digits.\n firsts = set(w[0] for w in words) # First letters of each of word.\n chars = \"\".join(firsts) + \"\".join(chars - firsts)\n n = len(firsts) # chars[:n] cannot be assigned zero.\n for perm in permutations(\"0123456789\", len(chars)):\n if \"0\" not in perm[:n]:\n trans = str.maketrans(chars, \"\".join(perm))\n equation = s.translate(trans)\n if eval(equation):\n yield equation", "def _capitalize_name(player_name: str) -> str:\n # Remove accents and replace final sigmas with normal ones\n player_name = player_name.translate(\n str.maketrans(\n {\n \"ά\": \"α\",\n \"Ά\": \"α\",\n \"έ\": \"ε\",\n \"Έ\": \"ε\",\n \"ί\": \"ι\",\n \"Ί\": \"ι\",\n \"ή\": \"η\",\n \"Ή\": \"η\",\n \"ύ\": \"υ\",\n \"Ύ\": \"υ\",\n \"ό\": \"ο\",\n \"Ό\": \"o\",\n \"ώ\": \"ω\",\n \"Ώ\": \"ω\",\n \"ς\": \"σ\",\n }\n )\n )\n\n player_name = player_name.upper()\n return player_name", "def coding(text: str) -> str:\n text = list(itertools.chain(text.upper()))\n coded_text = []\n for letter in text:\n completed = False\n for coding in Encoder.__ALPHABET:\n if coding.code == letter:\n completed = True\n coded_text.append(coding.encode)\n if completed:\n break\n if not completed:\n coded_text.append(letter)\n coded_string = \"\".join(coded_text)\n return coded_string", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def replace_special(text):\r\n text = text.replace('\\r\\n', ' ')\r\n text = text.replace('\\n', ' ')\r\n text = text.replace('``', \"''\")\r\n text = text.replace('`', \"'\")\r\n text = text.replace('“', '\"')\r\n text = text.replace('”', '\"')\r\n text = text.replace('’', \"'\")\r\n text = text.replace('‘', \"'\")\r\n text = text.replace(\"'\", \"'\")\r\n text = text.replace('–', \"-\")\r\n text = text.replace('\\\"', '\"')\r\n text = text.replace(\"\\'\", \"'\")\r\n return text", "def normalize(text):\n return text.lower().translate(TRANSLATION_TABLE)", "def __init__(self, encoding):\n self.trans = {}\n for char in u\"ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ\":\n self.trans[char] = u\"A\"\n for char in u\"ȀǞ\":\n self.trans[char] = u\"Ä\"\n self.trans[u\"Ǻ\"] = u\"Å\"\n self.trans[u\"Ä\"] = u\"Ae\"\n self.trans[u\"Å\"] = u\"Aa\"\n for char in u\"àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ\":\n self.trans[char] = u\"a\"\n for char in u\"ȁǟ\":\n self.trans[char] = u\"ä\"\n self.trans[u\"ǻ\"] = u\"å\"\n self.trans[u\"ä\"] = u\"ae\"\n self.trans[u\"å\"] = u\"aa\"\n for char in u\"ḂḄḆƁƂ\":\n self.trans[char] = u\"B\"\n for char in u\"ḃḅḇƀɓƃ\":\n self.trans[char] = u\"b\"\n for char in u\"ĆĈĊÇČƇ\":\n self.trans[char] = u\"C\"\n for char in u\"ćĉċçčƈȼ\":\n self.trans[char] = u\"c\"\n self.trans[u\"Ḉ\"] = u\"Ç\"\n self.trans[u\"ḉ\"] = u\"ç\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ĎḊḌḎḐḒĐƉƊƋ\":\n self.trans[char] = u\"D\"\n for char in u\"ďḋḍḏḑḓđɖɗƌ\":\n self.trans[char] = u\"d\"\n for char in u\"ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ\":\n self.trans[char] = u\"E\"\n for char in u\"ỀẾỄỆỂ\":\n self.trans[char] = u\"Ê\"\n for char in u\"èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ\":\n self.trans[char] = u\"e\"\n for char in u\"ềếễệể\":\n self.trans[char] = u\"ê\"\n for char in u\"ḞƑ\":\n self.trans[char] = u\"F\"\n for char in u\"ḟƒ\":\n self.trans[char] = u\"f\"\n for char in u\"ǴḠĞĠĢǦǤƓ\":\n self.trans[char] = u\"G\"\n for char in u\"ǵḡğġģǧǥɠ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ĝ\"] = u\"Gx\"\n self.trans[u\"ĝ\"] = u\"gx\"\n for char in u\"ḢḤḦȞḨḪH̱ĦǶ\":\n self.trans[char] = u\"H\"\n for char in u\"ḣḥḧȟḩḫ̱ẖħƕ\":\n self.trans[char] = u\"h\"\n for char in u\"IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ\":\n self.trans[char] = u\"I\"\n for char in u\"ıìȉíîĩḭïḯīĭȋįǐiịỉɨ\":\n self.trans[char] = u\"i\"\n for char in u\"ĴJ\":\n self.trans[char] = u\"J\"\n for char in u\"ɟĵ̌ǰ\":\n self.trans[char] = u\"j\"\n for char in u\"ḰǨĶḲḴƘ\":\n self.trans[char] = u\"K\"\n for char in u\"ḱǩķḳḵƙ\":\n self.trans[char] = u\"k\"\n for char in u\"ĹĻĽḶḸḺḼȽŁ\":\n self.trans[char] = u\"L\"\n for char in u\"ĺļľḷḹḻḽƚłɫ\":\n self.trans[char] = u\"l\"\n for char in u\"ḾṀṂ\":\n self.trans[char] = u\"M\"\n for char in u\"ḿṁṃɱ\":\n self.trans[char] = u\"m\"\n for char in u\"ǸŃÑŅŇṄṆṈṊŊƝɲȠ\":\n self.trans[char] = u\"N\"\n for char in u\"ǹńñņňṅṇṉṋŋɲƞ\":\n self.trans[char] = u\"n\"\n for char in u\"ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ\":\n self.trans[char] = u\"O\"\n for char in u\"òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ\":\n self.trans[char] = u\"o\"\n for char in u\"ȌŐȪ\":\n self.trans[char] = u\"Ö\"\n for char in u\"ȍőȫ\":\n self.trans[char] = u\"ö\"\n for char in u\"ỒỐỖỘỔȎ\":\n self.trans[char] = u\"Ô\"\n for char in u\"ồốỗộổȏ\":\n self.trans[char] = u\"ô\"\n for char in u\"ṔṖƤ\":\n self.trans[char] = u\"P\"\n for char in u\"ṕṗƥ\":\n self.trans[char] = u\"p\"\n self.trans[u\"ᵽ\"] = u\"q\"\n for char in u\"ȐŔŖŘȒṘṚṜṞ\":\n self.trans[char] = u\"R\"\n for char in u\"ȑŕŗřȓṙṛṝṟɽ\":\n self.trans[char] = u\"r\"\n for char in u\"ŚṤŞȘŠṦṠṢṨ\":\n self.trans[char] = u\"S\"\n for char in u\"śṥşșšṧṡṣṩȿ\":\n self.trans[char] = u\"s\"\n self.trans[u\"Ŝ\"] = u\"Sx\"\n self.trans[u\"ŝ\"] = u\"sx\"\n for char in u\"ŢȚŤṪṬṮṰŦƬƮ\":\n self.trans[char] = u\"T\"\n for char in u\"ţțťṫṭṯṱŧȾƭʈ\":\n self.trans[char] = u\"t\"\n for char in u\"ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ\":\n self.trans[char] = u\"U\"\n for char in u\"ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ\":\n self.trans[char] = u\"u\"\n for char in u\"ȔŰǛǗǕǙ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ȕűǜǘǖǚ\":\n self.trans[char] = u\"ü\"\n self.trans[u\"Û\"] = u\"Ux\"\n self.trans[u\"û\"] = u\"ux\"\n self.trans[u\"Ȗ\"] = u\"Û\"\n self.trans[u\"ȗ\"] = u\"û\"\n self.trans[u\"Ừ\"] = u\"Ù\"\n self.trans[u\"ừ\"] = u\"ù\"\n self.trans[u\"Ứ\"] = u\"Ú\"\n self.trans[u\"ứ\"] = u\"ú\"\n for char in u\"ṼṾ\":\n self.trans[char] = u\"V\"\n for char in u\"ṽṿ\":\n self.trans[char] = u\"v\"\n for char in u\"ẀẂŴẄẆẈ\":\n self.trans[char] = u\"W\"\n for char in u\"ẁẃŵẅẇẉ\":\n self.trans[char] = u\"w\"\n for char in u\"ẊẌ\":\n self.trans[char] = u\"X\"\n for char in u\"ẋẍ\":\n self.trans[char] = u\"x\"\n for char in u\"ỲÝŶŸỸȲẎỴỶƳ\":\n self.trans[char] = u\"Y\"\n for char in u\"ỳýŷÿỹȳẏỵỷƴ\":\n self.trans[char] = u\"y\"\n for char in u\"ŹẐŻẒŽẔƵȤ\":\n self.trans[char] = u\"Z\"\n for char in u\"źẑżẓžẕƶȥ\":\n self.trans[char] = u\"z\"\n self.trans[u\"ɀ\"] = u\"zv\"\n\n # Latin: extended Latin alphabet\n self.trans[u\"ɑ\"] = u\"a\"\n for char in u\"ÆǼǢ\":\n self.trans[char] = u\"AE\"\n for char in u\"æǽǣ\":\n self.trans[char] = u\"ae\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ƎƏƐ\":\n self.trans[char] = u\"E\"\n for char in u\"ǝəɛ\":\n self.trans[char] = u\"e\"\n for char in u\"ƔƢ\":\n self.trans[char] = u\"G\"\n for char in u\"ᵷɣƣᵹ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ƅ\"] = u\"H\"\n self.trans[u\"ƅ\"] = u\"h\"\n self.trans[u\"Ƕ\"] = u\"Wh\"\n self.trans[u\"ƕ\"] = u\"wh\"\n self.trans[u\"Ɩ\"] = u\"I\"\n self.trans[u\"ɩ\"] = u\"i\"\n self.trans[u\"Ŋ\"] = u\"Ng\"\n self.trans[u\"ŋ\"] = u\"ng\"\n self.trans[u\"Œ\"] = u\"OE\"\n self.trans[u\"œ\"] = u\"oe\"\n self.trans[u\"Ɔ\"] = u\"O\"\n self.trans[u\"ɔ\"] = u\"o\"\n self.trans[u\"Ȣ\"] = u\"Ou\"\n self.trans[u\"ȣ\"] = u\"ou\"\n self.trans[u\"Ƽ\"] = u\"Q\"\n for char in u\"ĸƽ\":\n self.trans[char] = u\"q\"\n self.trans[u\"ȹ\"] = u\"qp\"\n self.trans[u\"\"] = u\"r\"\n self.trans[u\"ſ\"] = u\"s\"\n self.trans[u\"ß\"] = u\"ss\"\n self.trans[u\"Ʃ\"] = u\"Sh\"\n for char in u\"ʃᶋ\":\n self.trans[char] = u\"sh\"\n self.trans[u\"Ʉ\"] = u\"U\"\n self.trans[u\"ʉ\"] = u\"u\"\n self.trans[u\"Ʌ\"] = u\"V\"\n self.trans[u\"ʌ\"] = u\"v\"\n for char in u\"ƜǷ\":\n self.trans[char] = u\"W\"\n for char in u\"ɯƿ\":\n self.trans[char] = u\"w\"\n self.trans[u\"Ȝ\"] = u\"Y\"\n self.trans[u\"ȝ\"] = u\"y\"\n self.trans[u\"IJ\"] = u\"IJ\"\n self.trans[u\"ij\"] = u\"ij\"\n self.trans[u\"Ƨ\"] = u\"Z\"\n for char in u\"ʮƨ\":\n self.trans[char] = u\"z\"\n self.trans[u\"Ʒ\"] = u\"Zh\"\n self.trans[u\"ʒ\"] = u\"zh\"\n self.trans[u\"Ǯ\"] = u\"Dzh\"\n self.trans[u\"ǯ\"] = u\"dzh\"\n for char in u\"ƸƹʔˀɁɂ\":\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in u\"Cʗǃ\":\n self.trans[char] = u\"!\"\n\n # Punctuation and typography\n for char in u\"«»“”„¨\":\n self.trans[char] = u'\"'\n for char in u\"‘’′\":\n self.trans[char] = u\"'\"\n self.trans[u\"•\"] = u\"*\"\n self.trans[u\"@\"] = u\"(at)\"\n self.trans[u\"¤\"] = u\"$\"\n self.trans[u\"¢\"] = u\"c\"\n self.trans[u\"€\"] = u\"E\"\n self.trans[u\"£\"] = u\"L\"\n self.trans[u\"¥\"] = u\"yen\"\n self.trans[u\"†\"] = u\"+\"\n self.trans[u\"‡\"] = u\"++\"\n self.trans[u\"°\"] = u\":\"\n self.trans[u\"¡\"] = u\"!\"\n self.trans[u\"¿\"] = u\"?\"\n self.trans[u\"‰\"] = u\"o/oo\"\n self.trans[u\"‱\"] = u\"o/ooo\"\n for char in u\"¶§\":\n self.trans[char] = u\">\"\n self.trans['…'] = '...'\n for char in u\"‒–—―\":\n self.trans[char] = u\"-\"\n self.trans['·'] = ' '\n self.trans[u\"¦\"] = u\"|\"\n self.trans[u\"⁂\"] = u\"***\"\n self.trans[u\"◊\"] = u\"<>\"\n self.trans[u\"‽\"] = u\"?!\"\n self.trans[u\"؟\"] = u\";-)\"\n self.trans[u\"¹\"] = u\"1\"\n self.trans[u\"²\"] = u\"2\"\n self.trans[u\"³\"] = u\"3\"\n\n # Cyrillic\n self.trans.update({u\"А\": u\"A\", u\"а\": u\"a\", u\"Б\": u\"B\", u\"б\": u\"b\",\n u\"В\": u\"V\", u\"в\": u\"v\", u\"Г\": u\"G\", u\"г\": u\"g\",\n u\"Д\": u\"D\", u\"д\": u\"d\", u\"Е\": u\"E\", u\"е\": u\"e\",\n u\"Ж\": u\"Zh\", u\"ж\": u\"zh\", u\"З\": u\"Z\", u\"з\": u\"z\",\n u\"И\": u\"I\", u\"и\": u\"i\", u\"Й\": u\"J\", u\"й\": u\"j\",\n u\"К\": u\"K\", u\"к\": u\"k\", u\"Л\": u\"L\", u\"л\": u\"l\",\n u\"М\": u\"M\", u\"м\": u\"m\", u\"Н\": u\"N\", u\"н\": u\"n\",\n u\"О\": u\"O\", u\"о\": u\"o\", u\"П\": u\"P\", u\"п\": u\"p\",\n u\"Р\": u\"R\", u\"р\": u\"r\", u\"С\": u\"S\", u\"с\": u\"s\",\n u\"Т\": u\"T\", u\"т\": u\"t\", u\"У\": u\"U\", u\"у\": u\"u\",\n u\"Ф\": u\"F\", u\"ф\": u\"f\", u\"х\": u\"kh\", u\"Ц\": u\"C\",\n u\"ц\": u\"c\", u\"Ч\": u\"Ch\", u\"ч\": u\"ch\", u\"Ш\": u\"Sh\",\n u\"ш\": u\"sh\", u\"Щ\": u\"Shch\", u\"щ\": u\"shch\", u\"Ь\": u\"'\",\n u\"ь\": \"'\", u\"Ъ\": u'\"', u\"ъ\": '\"', u\"Ю\": u\"Yu\",\n u\"ю\": u\"yu\", u\"Я\": u\"Ya\", u\"я\": u\"ya\", u\"Х\": u\"Kh\",\n u\"Χ\": u\"Kh\"})\n\n # Additional Cyrillic letters, most occuring in only one or a few languages\n self.trans.update({u\"Ы\": u\"Y\", u\"ы\": u\"y\", u\"Ё\": u\"Ë\", u\"ё\": u\"ë\",\n u\"Э\": u\"È\", u\"Ѐ\": u\"È\", u\"э\": u\"è\", u\"ѐ\": u\"è\",\n u\"І\": u\"I\", u\"і\": u\"i\", u\"Ї\": u\"Ji\", u\"ї\": u\"ji\",\n u\"Є\": u\"Je\", u\"є\": u\"je\", u\"Ґ\": u\"G\", u\"Ҝ\": u\"G\",\n u\"ґ\": u\"g\", u\"ҝ\": u\"g\", u\"Ђ\": u\"Dj\", u\"ђ\": u\"dj\",\n \"Љ\": \"Lj\", \"љ\": \"lj\",\n u\"Њ\": u\"Nj\", u\"њ\": u\"nj\", u\"Ћ\": u\"Cj\", u\"ћ\": u\"cj\",\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n u\"Ќ\": u\"Kj\", u\"ќ\": u\"kj\", u\"Ӣ\": u\"Ii\", u\"ӣ\": u\"ii\",\n \"Ҳ\": \"H\", \"ҳ\": \"h\",\n u\"Ҷ\": u\"Dz\", u\"ҷ\": u\"dz\", u\"Ө\": u\"Ô\", u\"Ӫ\": u\"Ô\",\n u\"ө\": u\"ô\", u\"ӫ\": u\"ô\", u\"Ү\": u\"Y\", u\"ү\": u\"y\", u\"Һ\": u\"H\",\n u\"һ\": u\"h\", u\"Ә\": u\"AE\", u\"Ӕ\": u\"AE\", u\"ә\": u\"ae\",\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n u\"ѝ\": u\"ì\", u\"Ѝ\": u\"Ì\", u\"Ӑ\": u\"A\", u\"ă\": u\"a\", u\"Ӓ\": u\"Ä\",\n \"Ҽ\": \"Ts\", \"Ҿ\": \"Ts\", \"ҽ\": \"ts\", \"ҿ\": \"ts\",\n u\"Ҙ\": u\"Dh\", u\"ҙ\": u\"dh\", u\"Ӏ\": u\"\", u\"ӏ\": u\"\", u\"Ӆ\": u\"L\",\n u\"ӆ\": u\"l\", u\"Ӎ\": u\"M\", u\"ӎ\": u\"m\", u\"Ӧ\": u\"Ö\", u\"ӧ\": u\"ö\",\n u\"Ҩ\": u\"u\", u\"ҩ\": u\"u\", u\"Ҧ\": u\"Ph\", u\"ҧ\": u\"ph\", u\"Ҏ\": u\"R\",\n u\"ҏ\": u\"r\", u\"Ҫ\": u\"Th\", u\"ҫ\": u\"th\", u\"Ҭ\": u\"T\", u\"ҭ\": u\"t\",\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n u\"ӹ\": u\"u\", u\"Ҵ\": u\"Tts\", u\"ҵ\": u\"tts\", u\"Ӵ\": u\"Ch\", u\"ӵ\": u\"ch\"})\n\n for char in u\"ЈӤҊ\":\n self.trans[char] = u\"J\"\n for char in u\"јӥҋ\":\n self.trans[char] = u\"j\"\n for char in u\"ЏӁӜҶ\":\n self.trans[char] = u\"Dzh\"\n for char in u\"џӂӝҷ\":\n self.trans[char] = u\"dzh\"\n for char in u\"ЅӞӠӋҸ\":\n self.trans[char] = u\"Dz\"\n for char in u\"ѕӟӡӌҹ\":\n self.trans[char] = u\"dz\"\n for char in u\"ҒӶҔ\":\n self.trans[char] = u\"G\"\n for char in u\"ғӷҕ\":\n self.trans[char] = u\"g\"\n for char in u\"ҚҞҠӃ\":\n self.trans[char] = u\"Q\"\n for char in u\"қҟҡӄ\":\n self.trans[char] = u\"q\"\n for char in u\"ҢҤӉӇ\":\n self.trans[char] = u\"Ng\"\n for char in u\"ңҥӊӈ\":\n self.trans[char] = u\"ng\"\n for char in u\"ӖѢҌ\":\n self.trans[char] = u\"E\"\n for char in u\"ӗѣҍ\":\n self.trans[char] = u\"e\"\n for char in u\"ӲӰҮ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ӳӱү\":\n self.trans[char] = u\"ü\"\n\n # Archaic Cyrillic letters\n self.trans.update({u\"Ѹ\": u\"Ou\", u\"ѹ\": u\"ou\", u\"Ѡ\": u\"O\", u\"Ѻ\": u\"O\", u\"ѡ\": u\"o\",\n u\"ѻ\": u\"o\", u\"Ѿ\": u\"Ot\", u\"ѿ\": u\"ot\", u\"Ѣ\": u\"E\", u\"ѣ\": u\"e\",\n u\"Ѥ\": u\"Ei\", u\"Ѧ\": u\"Ei\", u\"ѥ\": u\"ei\", u\"ѧ\": u\"ei\", u\"Ѫ\": u\"Ai\",\n u\"ѫ\": u\"ai\", u\"Ѯ\": u\"X\", u\"ѯ\": u\"x\", u\"Ѱ\": u\"Ps\", u\"ѱ\": u\"ps\",\n u\"Ѳ\": u\"Th\", u\"ѳ\": u\"th\", u\"Ѵ\": u\"Ü\", u\"Ѷ\": u\"Ü\", u\"ѵ\": u\"ü\"})\n\n # Hebrew alphabet\n for char in u\"אע\":\n self.trans[char] = u\"'\"\n self.trans[u\"ב\"] = u\"b\"\n self.trans[u\"ג\"] = u\"g\"\n self.trans[u\"ד\"] = u\"d\"\n self.trans[u\"ה\"] = u\"h\"\n self.trans[u\"ו\"] = u\"v\"\n self.trans[u\"ז\"] = u\"z\"\n self.trans[u\"ח\"] = u\"kh\"\n self.trans[u\"ט\"] = u\"t\"\n self.trans[u\"י\"] = u\"y\"\n for char in u\"ךכ\":\n self.trans[char] = u\"k\"\n self.trans[u\"ל\"] = u\"l\"\n for char in u\"םמ\":\n self.trans[char] = u\"m\"\n for char in u\"ןנ\":\n self.trans[char] = u\"n\"\n self.trans[u\"ס\"] = u\"s\"\n for char in u\"ףפ\":\n self.trans[char] = u\"ph\"\n for char in u\"ץצ\":\n self.trans[char] = u\"ts\"\n self.trans[u\"ק\"] = u\"q\"\n self.trans[u\"ר\"] = u\"r\"\n self.trans[u\"ש\"] = u\"sh\"\n self.trans[u\"ת\"] = u\"th\"\n\n # Arab alphabet\n for char in u\"اﺍﺎ\":\n self.trans[char] = u\"a\"\n for char in u\"بﺏﺐﺒﺑ\":\n self.trans[char] = u\"b\"\n for char in u\"تﺕﺖﺘﺗ\":\n self.trans[char] = u\"t\"\n for char in u\"ثﺙﺚﺜﺛ\":\n self.trans[char] = u\"th\"\n for char in u\"جﺝﺞﺠﺟ\":\n self.trans[char] = u\"g\"\n for char in u\"حﺡﺢﺤﺣ\":\n self.trans[char] = u\"h\"\n for char in u\"خﺥﺦﺨﺧ\":\n self.trans[char] = u\"kh\"\n for char in u\"دﺩﺪ\":\n self.trans[char] = u\"d\"\n for char in u\"ذﺫﺬ\":\n self.trans[char] = u\"dh\"\n for char in u\"رﺭﺮ\":\n self.trans[char] = u\"r\"\n for char in u\"زﺯﺰ\":\n self.trans[char] = u\"z\"\n for char in u\"سﺱﺲﺴﺳ\":\n self.trans[char] = u\"s\"\n for char in u\"شﺵﺶﺸﺷ\":\n self.trans[char] = u\"sh\"\n for char in u\"صﺹﺺﺼﺻ\":\n self.trans[char] = u\"s\"\n for char in u\"ضﺽﺾﻀﺿ\":\n self.trans[char] = u\"d\"\n for char in u\"طﻁﻂﻄﻃ\":\n self.trans[char] = u\"t\"\n for char in u\"ظﻅﻆﻈﻇ\":\n self.trans[char] = u\"z\"\n for char in u\"عﻉﻊﻌﻋ\":\n self.trans[char] = u\"'\"\n for char in u\"غﻍﻎﻐﻏ\":\n self.trans[char] = u\"gh\"\n for char in u\"فﻑﻒﻔﻓ\":\n self.trans[char] = u\"f\"\n for char in u\"قﻕﻖﻘﻗ\":\n self.trans[char] = u\"q\"\n for char in u\"كﻙﻚﻜﻛک\":\n self.trans[char] = u\"k\"\n for char in u\"لﻝﻞﻠﻟ\":\n self.trans[char] = u\"l\"\n for char in u\"مﻡﻢﻤﻣ\":\n self.trans[char] = u\"m\"\n for char in u\"نﻥﻦﻨﻧ\":\n self.trans[char] = u\"n\"\n for char in u\"هﻩﻪﻬﻫ\":\n self.trans[char] = u\"h\"\n for char in u\"وﻭﻮ\":\n self.trans[char] = u\"w\"\n for char in u\"یيﻱﻲﻴﻳ\":\n self.trans[char] = u\"y\"\n # Arabic - additional letters, modified letters and ligatures\n self.trans[u\"ﺀ\"] = u\"'\"\n for char in u\"آﺁﺂ\":\n self.trans[char] = u\"'a\"\n for char in u\"ةﺓﺔ\":\n self.trans[char] = u\"th\"\n for char in u\"ىﻯﻰ\":\n self.trans[char] = u\"á\"\n for char in u\"یﯼﯽﯿﯾ\":\n self.trans[char] = u\"y\"\n self.trans[u\"؟\"] = u\"?\"\n # Arabic - ligatures\n for char in u\"ﻻﻼ\":\n self.trans[char] = u\"la\"\n self.trans[u\"ﷲ\"] = u\"llah\"\n for char in u\"إأ\":\n self.trans[char] = u\"a'\"\n self.trans[u\"ؤ\"] = u\"w'\"\n self.trans[u\"ئ\"] = u\"y'\"\n for char in u\"◌◌\":\n self.trans[char] = u\"\" # indicates absence of vowels\n # Arabic vowels\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"i\"\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"iy\"\n # Arab numerals\n for char in u\"٠۰\":\n self.trans[char] = u\"0\"\n for char in u\"١۱\":\n self.trans[char] = u\"1\"\n for char in u\"٢۲\":\n self.trans[char] = u\"2\"\n for char in u\"٣۳\":\n self.trans[char] = u\"3\"\n for char in u\"٤۴\":\n self.trans[char] = u\"4\"\n for char in u\"٥۵\":\n self.trans[char] = u\"5\"\n for char in u\"٦۶\":\n self.trans[char] = u\"6\"\n for char in u\"٧۷\":\n self.trans[char] = u\"7\"\n for char in u\"٨۸\":\n self.trans[char] = u\"8\"\n for char in u\"٩۹\":\n self.trans[char] = u\"9\"\n # Perso-Arabic\n for char in u\"پﭙﭙپ\":\n self.trans[char] = u\"p\"\n for char in u\"چچچچ\":\n self.trans[char] = u\"ch\"\n for char in u\"ژژ\":\n self.trans[char] = u\"zh\"\n for char in u\"گﮔﮕﮓ\":\n self.trans[char] = u\"g\"\n\n # Greek\n self.trans.update({u\"Α\": u\"A\", u\"α\": u\"a\", u\"Β\": u\"B\", u\"β\": u\"b\", u\"Γ\": u\"G\",\n u\"γ\": u\"g\", u\"Δ\": u\"D\", u\"δ\": u\"d\", u\"Ε\": u\"E\", u\"ε\": u\"e\",\n u\"Ζ\": u\"Z\", u\"ζ\": u\"z\", u\"Η\": u\"I\", u\"η\": u\"i\", u\"θ\": u\"th\",\n u\"Θ\": u\"Th\", u\"Ι\": u\"I\", u\"ι\": u\"i\", u\"Κ\": u\"K\", u\"κ\": u\"k\",\n u\"Λ\": u\"L\", u\"λ\": u\"l\", u\"Μ\": u\"M\", u\"μ\": u\"m\", u\"Ν\": u\"N\",\n u\"ν\": u\"n\", u\"Ξ\": u\"X\", u\"ξ\": u\"x\", u\"Ο\": u\"O\", u\"ο\": u\"o\",\n u\"Π\": u\"P\", u\"π\": u\"p\", u\"Ρ\": u\"R\", u\"ρ\": u\"r\", u\"Σ\": u\"S\",\n u\"σ\": u\"s\", u\"ς\": u\"s\", u\"Τ\": u\"T\", u\"τ\": u\"t\", u\"Υ\": u\"Y\",\n u\"υ\": u\"y\", u\"Φ\": u\"F\", u\"φ\": u\"f\", u\"Ψ\": u\"Ps\", u\"ψ\": u\"ps\",\n u\"Ω\": u\"O\", u\"ω\": u\"o\", u\"ϗ\": u\"&\", u\"Ϛ\": u\"St\", u\"ϛ\": u\"st\",\n u\"Ϙ\": u\"Q\", u\"Ϟ\": u\"Q\", u\"ϙ\": u\"q\", u\"ϟ\": u\"q\", u\"Ϻ\": u\"S\",\n u\"ϻ\": u\"s\", u\"Ϡ\": u\"Ss\", u\"ϡ\": u\"ss\", u\"Ϸ\": u\"Sh\", u\"ϸ\": u\"sh\",\n u\"·\": u\":\", u\"Ά\": u\"Á\", u\"ά\": u\"á\", u\"Έ\": u\"É\", u\"Ή\": u\"É\",\n u\"έ\": u\"é\", u\"ή\": u\"é\", u\"Ί\": u\"Í\", u\"ί\": u\"í\", u\"Ϊ\": u\"Ï\",\n u\"ϊ\": u\"ï\", u\"ΐ\": u\"ï\", u\"Ό\": u\"Ó\", u\"ό\": u\"ó\", u\"Ύ\": u\"Ý\",\n u\"ύ\": u\"ý\", u\"Ϋ\": u\"Y\", u\"ϋ\": u\"ÿ\", u\"ΰ\": u\"ÿ\", u\"Ώ\": u\"Ó\",\n u\"ώ\": u\"ó\"})\n\n # Japanese (katakana and hiragana)\n for char in u\"アァあ\":\n self.trans[char] = u\"a\"\n for char in u\"イィい\":\n self.trans[char] = u\"i\"\n for char in u\"ウう\":\n self.trans[char] = u\"u\"\n for char in u\"エェえ\":\n self.trans[char] = u\"e\"\n for char in u\"オォお\":\n self.trans[char] = u\"o\"\n for char in u\"ャや\":\n self.trans[char] = u\"ya\"\n for char in u\"ュゆ\":\n self.trans[char] = u\"yu\"\n for char in u\"ョよ\":\n self.trans[char] = u\"yo\"\n for char in u\"カか\":\n self.trans[char] = u\"ka\"\n for char in u\"キき\":\n self.trans[char] = u\"ki\"\n for char in u\"クく\":\n self.trans[char] = u\"ku\"\n for char in u\"ケけ\":\n self.trans[char] = u\"ke\"\n for char in u\"コこ\":\n self.trans[char] = u\"ko\"\n for char in u\"サさ\":\n self.trans[char] = u\"sa\"\n for char in u\"シし\":\n self.trans[char] = u\"shi\"\n for char in u\"スす\":\n self.trans[char] = u\"su\"\n for char in u\"セせ\":\n self.trans[char] = u\"se\"\n for char in u\"ソそ\":\n self.trans[char] = u\"so\"\n for char in u\"タた\":\n self.trans[char] = u\"ta\"\n for char in u\"チち\":\n self.trans[char] = u\"chi\"\n for char in u\"ツつ\":\n self.trans[char] = u\"tsu\"\n for char in u\"テて\":\n self.trans[char] = u\"te\"\n for char in u\"トと\":\n self.trans[char] = u\"to\"\n for char in u\"ナな\":\n self.trans[char] = u\"na\"\n for char in u\"ニに\":\n self.trans[char] = u\"ni\"\n for char in u\"ヌぬ\":\n self.trans[char] = u\"nu\"\n for char in u\"ネね\":\n self.trans[char] = u\"ne\"\n for char in u\"ノの\":\n self.trans[char] = u\"no\"\n for char in u\"ハは\":\n self.trans[char] = u\"ha\"\n for char in u\"ヒひ\":\n self.trans[char] = u\"hi\"\n for char in u\"フふ\":\n self.trans[char] = u\"fu\"\n for char in u\"ヘへ\":\n self.trans[char] = u\"he\"\n for char in u\"ホほ\":\n self.trans[char] = u\"ho\"\n for char in u\"マま\":\n self.trans[char] = u\"ma\"\n for char in u\"ミみ\":\n self.trans[char] = u\"mi\"\n for char in u\"ムむ\":\n self.trans[char] = u\"mu\"\n for char in u\"メめ\":\n self.trans[char] = u\"me\"\n for char in u\"モも\":\n self.trans[char] = u\"mo\"\n for char in u\"ラら\":\n self.trans[char] = u\"ra\"\n for char in u\"リり\":\n self.trans[char] = u\"ri\"\n for char in u\"ルる\":\n self.trans[char] = u\"ru\"\n for char in u\"レれ\":\n self.trans[char] = u\"re\"\n for char in u\"ロろ\":\n self.trans[char] = u\"ro\"\n for char in u\"ワわ\":\n self.trans[char] = u\"wa\"\n for char in u\"ヰゐ\":\n self.trans[char] = u\"wi\"\n for char in u\"ヱゑ\":\n self.trans[char] = u\"we\"\n for char in u\"ヲを\":\n self.trans[char] = u\"wo\"\n for char in u\"ンん\":\n self.trans[char] = u\"n\"\n for char in u\"ガが\":\n self.trans[char] = u\"ga\"\n for char in u\"ギぎ\":\n self.trans[char] = u\"gi\"\n for char in u\"グぐ\":\n self.trans[char] = u\"gu\"\n for char in u\"ゲげ\":\n self.trans[char] = u\"ge\"\n for char in u\"ゴご\":\n self.trans[char] = u\"go\"\n for char in u\"ザざ\":\n self.trans[char] = u\"za\"\n for char in u\"ジじ\":\n self.trans[char] = u\"ji\"\n for char in u\"ズず\":\n self.trans[char] = u\"zu\"\n for char in u\"ゼぜ\":\n self.trans[char] = u\"ze\"\n for char in u\"ゾぞ\":\n self.trans[char] = u\"zo\"\n for char in u\"ダだ\":\n self.trans[char] = u\"da\"\n for char in u\"ヂぢ\":\n self.trans[char] = u\"dji\"\n for char in u\"ヅづ\":\n self.trans[char] = u\"dzu\"\n for char in u\"デで\":\n self.trans[char] = u\"de\"\n for char in u\"ドど\":\n self.trans[char] = u\"do\"\n for char in u\"バば\":\n self.trans[char] = u\"ba\"\n for char in u\"ビび\":\n self.trans[char] = u\"bi\"\n for char in u\"ブぶ\":\n self.trans[char] = u\"bu\"\n for char in u\"ベべ\":\n self.trans[char] = u\"be\"\n for char in u\"ボぼ\":\n self.trans[char] = u\"bo\"\n for char in u\"パぱ\":\n self.trans[char] = u\"pa\"\n for char in u\"ピぴ\":\n self.trans[char] = u\"pi\"\n for char in u\"プぷ\":\n self.trans[char] = u\"pu\"\n for char in u\"ペぺ\":\n self.trans[char] = u\"pe\"\n for char in u\"ポぽ\":\n self.trans[char] = u\"po\"\n for char in u\"ヴゔ\":\n self.trans[char] = u\"vu\"\n self.trans[u\"ヷ\"] = u\"va\"\n self.trans[u\"ヸ\"] = u\"vi\"\n self.trans[u\"ヹ\"] = u\"ve\"\n self.trans[u\"ヺ\"] = u\"vo\"\n\n # Japanese and Chinese punctuation and typography\n for char in u\"・·\":\n self.trans[char] = u\" \"\n for char in u\"〃『』《》\":\n self.trans[char] = u'\"'\n for char in u\"「」〈〉〘〙〚〛\":\n self.trans[char] = u\"'\"\n for char in u\"(〔\":\n self.trans[char] = u\"(\"\n for char in u\")〕\":\n self.trans[char] = u\")\"\n for char in u\"[【〖\":\n self.trans[char] = u\"[\"\n for char in u\"]】〗\":\n self.trans[char] = u\"]\"\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in u\"•◦\":\n self.trans[char] = u\"_\"\n for char in u\"※*\":\n self.trans[char] = u\"*\"\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in u\",、\":\n self.trans[char] = u\",\"\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in u\"ეჱ\":\n self.trans[char] = u\"e\"\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in u\"ყ\":\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in u\"წ\":\n self.trans[char] = u\"ts'\"\n for char in u\"ჭ\":\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in u\"पप\":\n self.trans[char] = u\"p\"\n self.trans['अ'] = 'a'\n for char in u\"आा\":\n self.trans[char] = u\"aa\"\n self.trans['प'] = 'pa'\n for char in u\"इि\":\n self.trans[char] = u\"i\"\n for char in u\"ईी\":\n self.trans[char] = u\"ii\"\n for char in u\"उु\":\n self.trans[char] = u\"u\"\n for char in u\"ऊू\":\n self.trans[char] = u\"uu\"\n for char in u\"एे\":\n self.trans[char] = u\"e\"\n for char in u\"ऐै\":\n self.trans[char] = u\"ai\"\n for char in u\"ओो\":\n self.trans[char] = u\"o\"\n for char in u\"औौ\":\n self.trans[char] = u\"au\"\n for char in u\"ऋृर\":\n self.trans[char] = u\"r\"\n for char in u\"ॠॄ\":\n self.trans[char] = u\"rr\"\n for char in u\"ऌॢल\":\n self.trans[char] = u\"l\"\n for char in u\"ॡॣ\":\n self.trans[char] = u\"ll\"\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in u\"टत\":\n self.trans[char] = u\"t\"\n for char in u\"ठथ\":\n self.trans[char] = u\"th\"\n for char in u\"डद\":\n self.trans[char] = u\"d\"\n for char in u\"ढध\":\n self.trans[char] = u\"dh\"\n for char in u\"णन\":\n self.trans[char] = u\"n\"\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in u\"षस\":\n self.trans[char] = u\"s\"\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in u\"क़\":\n self.trans[char] = u\"q\"\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in u\"डढ\":\n self.trans[char] = u\"r\"\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in u\"ख्\":\n self.trans[char] = u\"khn\"\n self.trans['त'] = 'tn'\n for char in u\"द्\":\n self.trans[char] = u\"dn\"\n self.trans['श'] = 'cn'\n for char in u\"ह्\":\n self.trans[char] = u\"fn\"\n for char in u\"अँ\":\n self.trans[char] = u\"m\"\n for char in u\"॒॑\":\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in u\"Տ\":\n self.trans[char] = u\"T'\"\n for char in u\"տ\":\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in u\"க்\":\n self.trans[char] = u\"k\"\n for char in u\"ஙண்ந்ன்\":\n self.trans[char] = u\"n\"\n self.trans['ச'] = 'c'\n for char in u\"ஞ்\":\n self.trans[char] = u\"ñ\"\n for char in u\"ட்\":\n self.trans[char] = u\"th\"\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in u\"ம்\":\n self.trans[char] = u\"m\"\n for char in u\"ய்\":\n self.trans[char] = u\"y\"\n for char in u\"ர்ழ்ற\":\n self.trans[char] = u\"r\"\n for char in u\"ல்ள\":\n self.trans[char] = u\"l\"\n for char in u\"வ்\":\n self.trans[char] = u\"v\"\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in u\"க்ஷ\":\n self.trans[char] = u\"x\"\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in u\"আা\":\n self.trans[char] = u\"a\"\n for char in u\"ইিঈী\":\n self.trans[char] = u\"i\"\n for char in u\"উুঊূ\":\n self.trans[char] = u\"u\"\n for char in u\"ঋৃ\":\n self.trans[char] = u\"ri\"\n for char in u\"এেয়\":\n self.trans[char] = u\"e\"\n for char in u\"ঐৈ\":\n self.trans[char] = u\"oi\"\n for char in u\"ওো\":\n self.trans[char] = u\"o\"\n for char in u\"ঔৌ\":\n self.trans[char] = \"ou\"\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in u\"টত\":\n self.trans[char] = u\"t\"\n for char in u\"ঠথ\":\n self.trans[char] = u\"th\"\n for char in u\"ডদ\":\n self.trans[char] = u\"d\"\n for char in u\"ঢধ\":\n self.trans[char] = u\"dh\"\n for char in u\"ণন\":\n self.trans[char] = u\"n\"\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in u\"য়\":\n self.trans[char] = u\"-\"\n for char in u\"ড়\":\n self.trans[char] = u\"r\"\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in u\"ขฃคฅฆ\":\n self.trans[char] = u\"kh\"\n self.trans['ง'] = 'ng'\n for char in u\"จฉชฌ\":\n self.trans[char] = u\"ch\"\n for char in u\"ซศษส\":\n self.trans[char] = u\"s\"\n for char in u\"ญย\":\n self.trans[char] = u\"y\"\n for char in u\"ฎด\":\n self.trans[char] = u\"d\"\n for char in u\"ฏต\":\n self.trans[char] = u\"t\"\n for char in u\"ฐฑฒถทธ\":\n self.trans[char] = u\"th\"\n for char in u\"ณน\":\n self.trans[char] = u\"n\"\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in u\"ผพภ\":\n self.trans[char] = u\"ph\"\n for char in u\"ฝฟ\":\n self.trans[char] = u\"f\"\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in u\"ลฬ\":\n self.trans[char] = u\"l\"\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in u\"หฮ\":\n self.trans[char] = u\"h\"\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in u\"อวโิ\":\n self.trans[char] = u\"o\"\n for char in u\"ะัา\":\n self.trans[char] = u\"a\"\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in u\"เ็\":\n self.trans[char] = u\"e\"\n self.trans['แ'] = 'ae'\n for char in u\"ใไ\":\n self.trans[char] = u\"ai\"\n for char in u\"่้๊๋็์\":\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans[u\"ಅ\"] = u\"a\"\n for char in u\"ಆಾ\":\n self.trans[char] = u\"aa\"\n for char in u\"ಇಿ\":\n self.trans[char] = u\"i\"\n for char in u\"ಈೀ\":\n self.trans[char] = u\"ii\"\n for char in u\"ಉು\":\n self.trans[char] = u\"u\"\n for char in u\"ಊೂ\":\n self.trans[char] = u\"uu\"\n for char in u\"ಋೂ\":\n self.trans[char] = u\"r'\"\n for char in u\"ಎೆ\":\n self.trans[char] = u\"e\"\n for char in u\"ಏೇ\":\n self.trans[char] = u\"ee\"\n for char in u\"ಐೈ\":\n self.trans[char] = u\"ai\"\n for char in u\"ಒೊ\":\n self.trans[char] = u\"o\"\n for char in u\"ಓೋ\":\n self.trans[char] = u\"oo\"\n for char in u\"ಔೌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ಂ\"] = u\"m'\"\n self.trans[u\"ಃ\"] = u\"h'\"\n self.trans[u\"ಕ\"] = u\"k\"\n self.trans[u\"ಖ\"] = u\"kh\"\n self.trans[u\"ಗ\"] = u\"g\"\n self.trans[u\"ಘ\"] = u\"gh\"\n self.trans[u\"ಙ\"] = u\"ng\"\n self.trans[u\"ಚ\"] = u\"c\"\n self.trans[u\"ಛ\"] = u\"ch\"\n self.trans[u\"ಜ\"] = u\"j\"\n self.trans[u\"ಝ\"] = u\"ny\"\n self.trans[u\"ಟ\"] = u\"tt\"\n self.trans[u\"ಠ\"] = u\"tth\"\n self.trans[u\"ಡ\"] = u\"dd\"\n self.trans[u\"ಢ\"] = u\"ddh\"\n self.trans[u\"ಣ\"] = u\"nn\"\n self.trans[u\"ತ\"] = u\"t\"\n self.trans[u\"ಥ\"] = u\"th\"\n self.trans[u\"ದ\"] = u\"d\"\n self.trans[u\"ಧ\"] = u\"dh\"\n self.trans[u\"ನ\"] = u\"n\"\n self.trans[u\"ಪ\"] = u\"p\"\n self.trans[u\"ಫ\"] = u\"ph\"\n self.trans[u\"ಬ\"] = u\"b\"\n self.trans[u\"ಭ\"] = u\"bh\"\n self.trans[u\"ಮ\"] = u\"m\"\n self.trans[u\"ಯ\"] = u\"y\"\n self.trans[u\"ರ\"] = u\"r\"\n self.trans[u\"ಲ\"] = u\"l\"\n self.trans[u\"ವ\"] = u\"v\"\n self.trans[u\"ಶ\"] = u\"sh\"\n self.trans[u\"ಷ\"] = u\"ss\"\n self.trans[u\"ಸ\"] = u\"s\"\n self.trans[u\"ಹ\"] = u\"h\"\n self.trans[u\"ಳ\"] = u\"ll\"\n self.trans[u\"೦\"] = u\"0\"\n self.trans[u\"೧\"] = u\"1\"\n self.trans[u\"೨\"] = u\"2\"\n self.trans[u\"೩\"] = u\"3\"\n self.trans[u\"೪\"] = u\"4\"\n self.trans[u\"೫\"] = u\"5\"\n self.trans[u\"೬\"] = u\"6\"\n self.trans[u\"೭\"] = u\"7\"\n self.trans[u\"೮\"] = u\"8\"\n self.trans[u\"೯\"] = u\"9\"\n # Telugu\n self.trans['అ'] = 'a'\n for char in u\"ఆా\":\n self.trans[char] = u\"aa\"\n for char in u\"ఇి\":\n self.trans[char] = u\"i\"\n for char in u\"ఈీ\":\n self.trans[char] = u\"ii\"\n for char in u\"ఉు\":\n self.trans[char] = u\"u\"\n for char in u\"ఊూ\":\n self.trans[char] = u\"uu\"\n for char in u\"ఋృ\":\n self.trans[char] = u\"r'\"\n for char in u\"ౠౄ\":\n self.trans[char] = u'r\"'\n self.trans[u\"ఌ\"] = u\"l'\"\n self.trans[u\"ౡ\"] = u'l\"'\n for char in u\"ఎె\":\n self.trans[char] = u\"e\"\n for char in u\"ఏే\":\n self.trans[char] = u\"ee\"\n for char in u\"ఐై\":\n self.trans[char] = u\"ai\"\n for char in u\"ఒొ\":\n self.trans[char] = u\"o\"\n for char in u\"ఓో\":\n self.trans[char] = u\"oo\"\n for char in u\"ఔౌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ం\"] = u\"'\"\n self.trans[u\"ః\"] = u'\"'\n self.trans[u\"క\"] = u\"k\"\n self.trans[u\"ఖ\"] = u\"kh\"\n self.trans[u\"గ\"] = u\"g\"\n self.trans[u\"ఘ\"] = u\"gh\"\n self.trans[u\"ఙ\"] = u\"ng\"\n self.trans[u\"చ\"] = u\"ts\"\n self.trans[u\"ఛ\"] = u\"tsh\"\n self.trans[u\"జ\"] = u\"j\"\n self.trans[u\"ఝ\"] = u\"jh\"\n self.trans[u\"ఞ\"] = u\"ñ\"\n for char in u\"టత\":\n self.trans[char] = u\"t\"\n for char in u\"ఠథ\":\n self.trans[char] = u\"th\"\n for char in u\"డద\":\n self.trans[char] = u\"d\"\n for char in u\"ఢధ\":\n self.trans[char] = u\"dh\"\n for char in u\"ణన\":\n self.trans[char] = u\"n\"\n self.trans[u\"ప\"] = u\"p\"\n self.trans[u\"ఫ\"] = u\"ph\"\n self.trans[u\"బ\"] = u\"b\"\n self.trans[u\"భ\"] = u\"bh\"\n self.trans[u\"మ\"] = u\"m\"\n self.trans[u\"య\"] = u\"y\"\n for char in u\"రఱ\":\n self.trans[char] = u\"r\"\n for char in u\"లళ\":\n self.trans[char] = u\"l\"\n self.trans[u\"వ\"] = u\"v\"\n self.trans[u\"శ\"] = u\"sh\"\n for char in u\"షస\":\n self.trans[char] = u\"s\"\n self.trans[u\"హ\"] = u\"h\"\n self.trans[u\"్\"] = \"\"\n for char in u\"ంఁ\":\n self.trans[char] = u\"^\"\n self.trans[u\"ః\"] = u\"-\"\n self.trans[u\"౦\"] = u\"0\"\n self.trans[u\"౧\"] = u\"1\"\n self.trans[u\"౨\"] = u\"2\"\n self.trans[u\"౩\"] = u\"3\"\n self.trans[u\"౪\"] = u\"4\"\n self.trans[u\"౫\"] = u\"5\"\n self.trans[u\"౬\"] = u\"6\"\n self.trans[u\"౭\"] = u\"7\"\n self.trans[u\"౮\"] = u\"8\"\n self.trans[u\"౯\"] = u\"9\"\n self.trans[u\"౹\"] = u\"1/4\"\n self.trans[u\"౺\"] = u\"1/2\"\n self.trans[u\"౻\"] = u\"3/4\"\n self.trans[u\"౼\"] = u\"1/16\"\n self.trans[u\"౽\"] = u\"1/8\"\n self.trans[u\"౾\"] = u\"3/16\"\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans[u\"ກ\"] = \"k\"\n for char in u\"ຂຄ\":\n self.trans[char] = \"kh\"\n self.trans[u\"ງ\"] = \"ng\"\n self.trans[u\"ຈ\"] = \"ch\"\n for char in u\"ສຊ\":\n self.trans[char] = \"s\"\n self.trans[u\"ຍ\"] = \"ny\"\n self.trans[u\"ດ\"] = \"d\"\n self.trans[u\"ຕ\"] = \"t\"\n for char in u\"ຖທ\":\n self.trans[char] = \"th\"\n self.trans[u\"ນ\"] = \"n\"\n self.trans[u\"ບ\"] = \"b\"\n self.trans[u\"ປ\"] = \"p\"\n for char in u\"ຜພ\":\n self.trans[char] = \"ph\"\n for char in u\"ຝຟ\":\n self.trans[char] = \"f\"\n for char in u\"ມໝ\":\n self.trans[char] = \"m\"\n self.trans[u\"ຢ\"] = \"y\"\n for char in u\"ຣຼ\":\n self.trans[char] = \"r\"\n for char in u\"ລຼ\":\n self.trans[char] = \"l\"\n self.trans[u\"ວ\"] = \"v\"\n self.trans['ຮ'] = 'h'\n self.trans[u\"ອ\"] = \"'\"\n for char in u\"ະັ\":\n self.trans[char] = \"a\"\n self.trans[u\"ິ\"] = \"i\"\n self.trans[u\"ຶ\"] = \"ue\"\n self.trans[u\"ຸ\"] = \"u\"\n self.trans[u\"ເ\"] = u\"é\"\n self.trans[u\"ແ\"] = u\"è\"\n for char in u\"ໂົາໍ\":\n self.trans[char] = \"o\"\n self.trans[u\"ຽ\"] = \"ia\"\n self.trans[u\"ເຶ\"] = \"uea\"\n self.trans[u\"ຍ\"] = \"i\"\n for char in u\"ໄໃ\":\n self.trans[char] = \"ai\"\n self.trans[u\"ຳ\"] = \"am\"\n self.trans[u\"າ\"] = \"aa\"\n self.trans[u\"ີ\"] = \"ii\"\n self.trans[u\"ື\"] = \"yy\"\n self.trans[u\"ູ\"] = \"uu\"\n self.trans[u\"ເ\"] = \"e\"\n self.trans[u\"ແ\"] = \"ei\"\n self.trans[u\"໐\"] = \"0\"\n self.trans[u\"໑\"] = \"1\"\n self.trans[u\"໒\"] = \"2\"\n self.trans[u\"໓\"] = \"3\"\n self.trans[u\"໔\"] = \"4\"\n self.trans[u\"໕\"] = \"5\"\n self.trans[u\"໖\"] = \"6\"\n self.trans[u\"໗\"] = \"7\"\n self.trans[u\"໘\"] = \"8\"\n self.trans[u\"໙\"] = \"9\"\n # Chinese -- note: incomplete\n for char in u\"埃挨哎唉哀皑癌蔼矮艾碍爱隘\":\n self.trans[char] = u\"ai\"\n for char in u\"鞍氨安俺按暗岸胺案\":\n self.trans[char] = u\"an\"\n for char in u\"肮昂盎\":\n self.trans[char] = u\"ang\"\n for char in u\"凹敖熬翱袄傲奥懊澳\":\n self.trans[char] = u\"ao\"\n for char in u\"芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸\":\n self.trans[char] = u\"ba\"\n for char in u\"白柏百摆佰败拜稗\":\n self.trans[char] = u\"bai\"\n for char in u\"斑班搬扳般颁板版扮拌伴瓣半办绊\":\n self.trans[char] = u\"ban\"\n for char in u\"邦帮梆榜膀绑棒磅蚌镑傍谤\":\n self.trans[char] = u\"bang\"\n for char in u\"苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆\":\n self.trans[char] = u\"bao\"\n for char in u\"杯碑悲卑北辈背贝钡倍狈备惫焙被\":\n self.trans[char] = u\"bei\"\n for char in u\"奔苯本笨\":\n self.trans[char] = u\"ben\"\n for char in u\"崩绷甭泵蹦迸\":\n self.trans[char] = u\"beng\"\n for char in u\"逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛\":\n self.trans[char] = u\"bi\"\n for char in u\"鞭边编贬扁便变卞辨辩辫遍\":\n self.trans[char] = u\"bian\"\n for char in u\"标彪膘表\":\n self.trans[char] = u\"biao\"\n for char in u\"鳖憋别瘪\":\n self.trans[char] = u\"bie\"\n for char in u\"彬斌濒滨宾摈\":\n self.trans[char] = u\"bin\"\n for char in u\"兵冰柄丙秉饼炳病并\":\n self.trans[char] = u\"bing\"\n for char in u\"玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳\":\n self.trans[char] = u\"bo\"\n for char in u\"哺补埠不布步簿部怖\":\n self.trans[char] = u\"bu\"\n for char in u\"猜裁材才财睬踩采彩菜蔡\":\n self.trans[char] = u\"cai\"\n for char in u\"餐参蚕残惭惨灿\":\n self.trans[char] = u\"can\"\n for char in u\"苍舱仓沧藏\":\n self.trans[char] = u\"cang\"\n for char in u\"操糙槽曹草\":\n self.trans[char] = u\"cao\"\n for char in u\"厕策侧册测\":\n self.trans[char] = u\"ce\"\n for char in u\"层蹭\":\n self.trans[char] = u\"ceng\"\n for char in u\"插叉茬茶查碴搽察岔差诧\":\n self.trans[char] = u\"cha\"\n for char in u\"拆柴豺\":\n self.trans[char] = u\"chai\"\n for char in u\"搀掺蝉馋谗缠铲产阐颤\":\n self.trans[char] = u\"chan\"\n for char in u\"昌猖场尝常长偿肠厂敞畅唱倡\":\n self.trans[char] = u\"chang\"\n for char in u\"超抄钞朝嘲潮巢吵炒\":\n self.trans[char] = u\"chao\"\n for char in u\"车扯撤掣彻澈\":\n self.trans[char] = u\"che\"\n for char in u\"郴臣辰尘晨忱沉陈趁衬\":\n self.trans[char] = u\"chen\"\n for char in u\"撑称城橙成呈乘程惩澄诚承逞骋秤\":\n self.trans[char] = u\"cheng\"\n for char in u\"吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽\":\n self.trans[char] = u\"chi\"\n for char in u\"充冲虫崇宠\":\n self.trans[char] = u\"chong\"\n for char in u\"抽酬畴踌稠愁筹仇绸瞅丑臭\":\n self.trans[char] = u\"chou\"\n for char in u\"初出橱厨躇锄雏滁除楚储矗搐触处\":\n self.trans[char] = u\"chu\"\n self.trans['揣'] = 'chuai'\n for char in u\"川穿椽传船喘串\":\n self.trans[char] = u\"chuan\"\n for char in u\"疮窗幢床闯创\":\n self.trans[char] = u\"chuang\"\n for char in u\"吹炊捶锤垂\":\n self.trans[char] = u\"chui\"\n for char in u\"春椿醇唇淳纯蠢\":\n self.trans[char] = u\"chun\"\n for char in u\"戳绰\":\n self.trans[char] = u\"chuo\"\n for char in u\"疵茨磁雌辞慈瓷词此刺赐次\":\n self.trans[char] = u\"ci\"\n for char in u\"聪葱囱匆从丛\":\n self.trans[char] = u\"cong\"\n self.trans['凑'] = 'cou'\n for char in u\"粗醋簇促\":\n self.trans[char] = u\"cu\"\n for char in u\"蹿篡窜\":\n self.trans[char] = u\"cuan\"\n for char in u\"摧崔催脆瘁粹淬翠\":\n self.trans[char] = u\"cui\"\n for char in u\"村存寸\":\n self.trans[char] = u\"cun\"\n for char in u\"磋撮搓措挫错\":\n self.trans[char] = u\"cuo\"\n for char in u\"搭达答瘩打大\":\n self.trans[char] = u\"da\"\n for char in u\"呆歹傣戴带殆代贷袋待逮怠\":\n self.trans[char] = u\"dai\"\n for char in u\"耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋\":\n self.trans[char] = u\"dan\"\n for char in u\"当挡党荡档\":\n self.trans[char] = u\"dang\"\n for char in u\"刀捣蹈倒岛祷导到稻悼道盗\":\n self.trans[char] = u\"dao\"\n for char in u\"德得的\":\n self.trans[char] = u\"de\"\n for char in u\"蹬灯登等瞪凳邓\":\n self.trans[char] = u\"deng\"\n for char in u\"堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔\":\n self.trans[char] = u\"di\"\n for char in u\"颠掂滇碘点典靛垫电佃甸店惦奠淀殿\":\n self.trans[char] = u\"dian\"\n for char in u\"碉叼雕凋刁掉吊钓调\":\n self.trans[char] = u\"diao\"\n for char in u\"跌爹碟蝶迭谍叠\":\n self.trans[char] = u\"die\"\n for char in u\"丁盯叮钉顶鼎锭定订\":\n self.trans[char] = u\"ding\"\n self.trans['丢'] = 'diu'\n for char in u\"东冬董懂动栋侗恫冻洞\":\n self.trans[char] = u\"dong\"\n for char in u\"兜抖斗陡豆逗痘\":\n self.trans[char] = u\"dou\"\n for char in u\"都督毒犊独读堵睹赌杜镀肚度渡妒\":\n self.trans[char] = u\"du\"\n for char in u\"端短锻段断缎\":\n self.trans[char] = u\"duan\"\n for char in u\"堆兑队对\":\n self.trans[char] = u\"dui\"\n for char in u\"墩吨蹲敦顿囤钝盾遁\":\n self.trans[char] = u\"dun\"\n for char in u\"掇哆多夺垛躲朵跺舵剁惰堕\":\n self.trans[char] = u\"duo\"\n for char in u\"蛾峨鹅俄额讹娥恶厄扼遏鄂饿\":\n self.trans[char] = u\"e\"\n for char in u\"恩嗯\":\n self.trans[char] = u\"en\"\n for char in u\"而儿耳尔饵洱二贰\":\n self.trans[char] = u\"er\"\n for char in u\"发罚筏伐乏阀法珐\":\n self.trans[char] = u\"fa\"\n for char in u\"藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛\":\n self.trans[char] = u\"fan\"\n for char in u\"坊芳方肪房防妨仿访纺放\":\n self.trans[char] = u\"fang\"\n for char in u\"菲非啡飞肥匪诽吠肺废沸费\":\n self.trans[char] = u\"fei\"\n for char in u\"芬酚吩氛分纷坟焚汾粉奋份忿愤粪\":\n self.trans[char] = u\"fen\"\n for char in u\"丰封枫蜂峰锋风疯烽逢冯缝讽奉凤\":\n self.trans[char] = u\"feng\"\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in u\"夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋复傅付阜父腹负富讣附妇缚咐\":\n self.trans[char] = u\"fu\"\n for char in u\"噶嘎\":\n self.trans[char] = u\"ga\"\n for char in u\"该改概钙盖溉\":\n self.trans[char] = u\"gai\"\n for char in u\"干甘杆柑竿肝赶感秆敢赣\":\n self.trans[char] = u\"gan\"\n for char in u\"冈刚钢缸肛纲岗港杠\":\n self.trans[char] = u\"gang\"\n for char in u\"篙皋高膏羔糕搞镐稿告\":\n self.trans[char] = u\"gao\"\n for char in u\"哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各\":\n self.trans[char] = u\"ge\"\n self.trans['给'] = 'gei'\n for char in u\"根跟\":\n self.trans[char] = u\"gen\"\n for char in u\"耕更庚羹埂耿梗\":\n self.trans[char] = u\"geng\"\n for char in u\"工攻功恭龚供躬公宫弓巩汞拱贡共\":\n self.trans[char] = u\"gong\"\n for char in u\"钩勾沟苟狗垢构购够\":\n self.trans[char] = u\"gou\"\n for char in u\"辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇\":\n self.trans[char] = u\"gu\"\n for char in u\"刮瓜剐寡挂褂\":\n self.trans[char] = u\"gua\"\n for char in u\"乖拐怪\":\n self.trans[char] = u\"guai\"\n for char in u\"棺关官冠观管馆罐惯灌贯\":\n self.trans[char] = u\"guan\"\n for char in u\"光广逛\":\n self.trans[char] = u\"guang\"\n for char in u\"瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽\":\n self.trans[char] = u\"gui\"\n for char in u\"辊滚棍\":\n self.trans[char] = u\"gun\"\n for char in u\"锅郭国果裹过\":\n self.trans[char] = u\"guo\"\n self.trans['哈'] = 'ha'\n for char in u\"骸孩海氦亥害骇\":\n self.trans[char] = u\"hai\"\n for char in u\"酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉\":\n self.trans[char] = u\"han\"\n for char in u\"夯杭航\":\n self.trans[char] = u\"hang\"\n for char in u\"壕嚎豪毫郝好耗号浩\":\n self.trans[char] = u\"hao\"\n for char in u\"呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺\":\n self.trans[char] = u\"he\"\n for char in u\"嘿黑\":\n self.trans[char] = u\"hei\"\n for char in u\"痕很狠恨\":\n self.trans[char] = u\"hen\"\n for char in u\"哼亨横衡恒\":\n self.trans[char] = u\"heng\"\n for char in u\"轰哄烘虹鸿洪宏弘红\":\n self.trans[char] = u\"hong\"\n for char in u\"喉侯猴吼厚候后\":\n self.trans[char] = u\"hou\"\n for char in u\"呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户\":\n self.trans[char] = u\"hu\"\n for char in u\"花哗华猾滑画划化话\":\n self.trans[char] = u\"hua\"\n for char in u\"槐徊怀淮坏\":\n self.trans[char] = u\"huai\"\n for char in u\"欢环桓还缓换患唤痪豢焕涣宦幻\":\n self.trans[char] = u\"huan\"\n for char in u\"荒慌黄磺蝗簧皇凰惶煌晃幌恍谎\":\n self.trans[char] = u\"huang\"\n for char in u\"灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘\":\n self.trans[char] = u\"hui\"\n for char in u\"荤昏婚魂浑混\":\n self.trans[char] = u\"hun\"\n for char in u\"豁活伙火获或惑霍货祸\":\n self.trans[char] = u\"huo\"\n for char in u\"击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪\":\n self.trans[char] = u\"ji\"\n for char in u\"嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁\":\n self.trans[char] = u\"jia\"\n for char in u\"歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健舰剑饯渐溅涧建\":\n self.trans[char] = u\"jian\"\n for char in u\"僵姜将浆江疆蒋桨奖讲匠酱降\":\n self.trans[char] = u\"jiang\"\n for char in u\"蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖\":\n self.trans[char] = u\"jiao\"\n for char in u\"揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届\":\n self.trans[char] = u\"jie\"\n for char in u\"巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲\":\n self.trans[char] = u\"jin\"\n for char in u\"荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净\":\n self.trans[char] = u\"jing\"\n for char in u\"囧炯窘\":\n self.trans[char] = u\"jiong\"\n for char in u\"揪究纠玖韭久灸九酒厩救旧臼舅咎就疚\":\n self.trans[char] = u\"jiu\"\n for char in u\"鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧\":\n self.trans[char] = u\"ju\"\n for char in u\"捐鹃娟倦眷卷绢\":\n self.trans[char] = u\"juan\"\n for char in u\"撅攫抉掘倔爵觉决诀绝\":\n self.trans[char] = u\"jue\"\n for char in u\"均菌钧军君峻俊竣浚郡骏\":\n self.trans[char] = u\"jun\"\n for char in u\"喀咖卡咯\":\n self.trans[char] = u\"ka\"\n for char in u\"开揩楷凯慨\":\n self.trans[char] = u\"kai\"\n for char in u\"刊堪勘坎砍看\":\n self.trans[char] = u\"kan\"\n for char in u\"康慷糠扛抗亢炕\":\n self.trans[char] = u\"kang\"\n for char in u\"考拷烤靠\":\n self.trans[char] = u\"kao\"\n for char in u\"坷苛柯棵磕颗科壳咳可渴克刻客课\":\n self.trans[char] = u\"ke\"\n for char in u\"肯啃垦恳\":\n self.trans[char] = u\"ken\"\n for char in u\"坑吭\":\n self.trans[char] = u\"keng\"\n for char in u\"空恐孔控\":\n self.trans[char] = u\"kong\"\n for char in u\"抠口扣寇\":\n self.trans[char] = u\"kou\"\n for char in u\"枯哭窟苦酷库裤\":\n self.trans[char] = u\"ku\"\n for char in u\"夸垮挎跨胯\":\n self.trans[char] = u\"kua\"\n for char in u\"块筷侩快\":\n self.trans[char] = u\"kuai\"\n for char in u\"宽款\":\n self.trans[char] = u\"kuan\"\n for char in u\"匡筐狂框矿眶旷况\":\n self.trans[char] = u\"kuang\"\n for char in u\"亏盔岿窥葵奎魁傀馈愧溃\":\n self.trans[char] = u\"kui\"\n for char in u\"坤昆捆困\":\n self.trans[char] = u\"kun\"\n for char in u\"括扩廓阔\":\n self.trans[char] = u\"kuo\"\n for char in u\"垃拉喇蜡腊辣啦\":\n self.trans[char] = u\"la\"\n for char in u\"莱来赖\":\n self.trans[char] = u\"lai\"\n for char in u\"蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥\":\n self.trans[char] = u\"lan\"\n for char in u\"琅榔狼廊郎朗浪\":\n self.trans[char] = u\"lang\"\n for char in u\"捞劳牢老佬姥酪烙涝\":\n self.trans[char] = u\"lao\"\n for char in u\"勒乐\":\n self.trans[char] = u\"le\"\n for char in u\"雷镭蕾磊累儡垒擂肋类泪\":\n self.trans[char] = u\"lei\"\n for char in u\"棱楞冷\":\n self.trans[char] = u\"leng\"\n for char in u\"厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力璃哩\":\n self.trans[char] = u\"li\"\n self.trans['俩'] = 'lia'\n for char in u\"联莲连镰廉怜涟帘敛脸链恋炼练\":\n self.trans[char] = u\"lian\"\n for char in u\"粮凉梁粱良两辆量晾亮谅\":\n self.trans[char] = u\"liang\"\n for char in u\"撩聊僚疗燎寥辽潦了撂镣廖料\":\n self.trans[char] = u\"liao\"\n for char in u\"列裂烈劣猎\":\n self.trans[char] = u\"lie\"\n for char in u\"琳林磷霖临邻鳞淋凛赁吝拎\":\n self.trans[char] = u\"lin\"\n for char in u\"玲菱零龄铃伶羚凌灵陵岭领另令\":\n self.trans[char] = u\"ling\"\n for char in u\"溜琉榴硫馏留刘瘤流柳六\":\n self.trans[char] = u\"liu\"\n for char in u\"龙聋咙笼窿隆垄拢陇\":\n self.trans[char] = u\"long\"\n for char in u\"楼娄搂篓漏陋\":\n self.trans[char] = u\"lou\"\n for char in u\"芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸\":\n self.trans[char] = u\"lu\"\n for char in u\"峦挛孪滦卵乱\":\n self.trans[char] = u\"luan\"\n for char in u\"掠略\":\n self.trans[char] = u\"lue\"\n for char in u\"抡轮伦仑沦纶论\":\n self.trans[char] = u\"lun\"\n for char in u\"萝螺罗逻锣箩骡裸落洛骆络漯\":\n self.trans[char] = u\"luo\"\n for char in u\"驴吕铝侣旅履屡缕虑氯律率滤绿\":\n self.trans[char] = u\"lv\"\n for char in u\"妈麻玛码蚂马骂嘛吗\":\n self.trans[char] = u\"ma\"\n for char in u\"埋买麦卖迈脉\":\n self.trans[char] = u\"mai\"\n for char in u\"瞒馒蛮满蔓曼慢漫谩\":\n self.trans[char] = u\"man\"\n for char in u\"芒茫盲氓忙莽\":\n self.trans[char] = u\"mang\"\n for char in u\"猫茅锚毛矛铆卯茂冒帽貌贸\":\n self.trans[char] = u\"mao\"\n self.trans['么'] = 'me'\n for char in u\"玫枚梅酶霉煤没眉媒镁每美昧寐妹媚\":\n self.trans[char] = u\"mei\"\n for char in u\"门闷们\":\n self.trans[char] = u\"men\"\n for char in u\"萌蒙檬盟锰猛梦孟\":\n self.trans[char] = u\"meng\"\n for char in u\"眯醚靡糜迷谜弥米秘觅泌蜜密幂\":\n self.trans[char] = u\"mi\"\n for char in u\"棉眠绵冕免勉娩缅面\":\n self.trans[char] = u\"mian\"\n for char in u\"苗描瞄藐秒渺庙妙\":\n self.trans[char] = u\"miao\"\n for char in u\"蔑灭\":\n self.trans[char] = u\"mie\"\n for char in u\"民抿皿敏悯闽\":\n self.trans[char] = u\"min\"\n for char in u\"明螟鸣铭名命\":\n self.trans[char] = u\"ming\"\n self.trans['谬'] = 'miu'\n for char in u\"摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌\":\n self.trans[char] = u\"mo\"\n for char in u\"谋牟某\":\n self.trans[char] = u\"mou\"\n for char in u\"拇牡亩姆母墓暮幕募慕木目睦牧穆\":\n self.trans[char] = u\"mu\"\n for char in u\"拿哪呐钠那娜纳\":\n self.trans[char] = u\"na\"\n for char in u\"氖乃奶耐奈\":\n self.trans[char] = u\"nai\"\n for char in u\"南男难\":\n self.trans[char] = u\"nan\"\n self.trans['囊'] = 'nang'\n for char in u\"挠脑恼闹淖\":\n self.trans[char] = u\"nao\"\n self.trans['呢'] = 'ne'\n for char in u\"馁内\":\n self.trans[char] = u\"nei\"\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in u\"妮霓倪泥尼拟你匿腻逆溺\":\n self.trans[char] = u\"ni\"\n for char in u\"蔫拈年碾撵捻念\":\n self.trans[char] = u\"nian\"\n for char in u\"娘酿\":\n self.trans[char] = u\"niang\"\n for char in u\"鸟尿\":\n self.trans[char] = u\"niao\"\n for char in u\"捏聂孽啮镊镍涅\":\n self.trans[char] = u\"nie\"\n self.trans['您'] = 'nin'\n for char in u\"柠狞凝宁拧泞\":\n self.trans[char] = u\"ning\"\n for char in u\"牛扭钮纽\":\n self.trans[char] = u\"niu\"\n for char in u\"脓浓农弄\":\n self.trans[char] = u\"nong\"\n for char in u\"奴努怒\":\n self.trans[char] = u\"nu\"\n self.trans['暖'] = 'nuan'\n for char in u\"虐疟\":\n self.trans[char] = u\"nue\"\n for char in u\"挪懦糯诺\":\n self.trans[char] = u\"nuo\"\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in u\"欧鸥殴藕呕偶沤\":\n self.trans[char] = u\"ou\"\n for char in u\"啪趴爬帕怕琶\":\n self.trans[char] = u\"pa\"\n for char in u\"拍排牌徘湃派\":\n self.trans[char] = u\"pai\"\n for char in u\"攀潘盘磐盼畔判叛\":\n self.trans[char] = u\"pan\"\n for char in u\"乓庞旁耪胖\":\n self.trans[char] = u\"pang\"\n for char in u\"抛咆刨炮袍跑泡\":\n self.trans[char] = u\"pao\"\n for char in u\"呸胚培裴赔陪配佩沛\":\n self.trans[char] = u\"pei\"\n for char in u\"喷盆\":\n self.trans[char] = u\"pen\"\n for char in u\"砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰\":\n self.trans[char] = u\"peng\"\n for char in u\"坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬\":\n self.trans[char] = u\"pi\"\n for char in u\"篇偏片骗\":\n self.trans[char] = u\"pian\"\n for char in u\"飘漂瓢票\":\n self.trans[char] = u\"piao\"\n for char in u\"撇瞥\":\n self.trans[char] = u\"pie\"\n for char in u\"拼频贫品聘\":\n self.trans[char] = u\"pin\"\n for char in u\"乒坪苹萍平凭瓶评屏\":\n self.trans[char] = u\"ping\"\n for char in u\"坡泼颇婆破魄迫粕剖\":\n self.trans[char] = u\"po\"\n for char in u\"扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮\":\n self.trans[char] = u\"pu\"\n for char in u\"期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄弃汽泣讫\":\n self.trans[char] = u\"qi\"\n for char in u\"掐恰洽\":\n self.trans[char] = u\"qia\"\n for char in u\"牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉\":\n self.trans[char] = u\"qian\"\n for char in u\"枪呛腔羌墙蔷强抢\":\n self.trans[char] = u\"qiang\"\n for char in u\"橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍\":\n self.trans[char] = u\"qiao\"\n for char in u\"切茄且怯窃\":\n self.trans[char] = u\"qie\"\n for char in u\"钦侵亲秦琴勤芹擒禽寝沁\":\n self.trans[char] = u\"qin\"\n for char in u\"青轻氢倾卿清擎晴氰情顷请庆\":\n self.trans[char] = u\"qing\"\n for char in u\"琼穷\":\n self.trans[char] = u\"qiong\"\n for char in u\"秋丘邱球求囚酋泅\":\n self.trans[char] = u\"qiu\"\n for char in u\"趋区蛆曲躯屈驱渠取娶龋趣去\":\n self.trans[char] = u\"qu\"\n for char in u\"圈颧权醛泉全痊拳犬券劝\":\n self.trans[char] = u\"quan\"\n for char in u\"缺炔瘸却鹊榷确雀\":\n self.trans[char] = u\"que\"\n for char in u\"裙群\":\n self.trans[char] = u\"qun\"\n for char in u\"然燃冉染\":\n self.trans[char] = u\"ran\"\n for char in u\"瓤壤攘嚷让\":\n self.trans[char] = u\"rang\"\n for char in u\"饶扰绕\":\n self.trans[char] = u\"rao\"\n for char in u\"惹热\":\n self.trans[char] = u\"re\"\n for char in u\"壬仁人忍韧任认刃妊纫\":\n self.trans[char] = u\"ren\"\n for char in u\"扔仍\":\n self.trans[char] = u\"reng\"\n self.trans['日'] = 'ri'\n for char in u\"戎茸蓉荣融熔溶容绒冗\":\n self.trans[char] = u\"rong\"\n for char in u\"揉柔肉\":\n self.trans[char] = u\"rou\"\n for char in u\"茹蠕儒孺如辱乳汝入褥\":\n self.trans[char] = u\"ru\"\n for char in u\"软阮\":\n self.trans[char] = u\"ruan\"\n for char in u\"蕊瑞锐\":\n self.trans[char] = u\"rui\"\n for char in u\"闰润\":\n self.trans[char] = u\"run\"\n for char in u\"若弱\":\n self.trans[char] = u\"ruo\"\n for char in u\"撒洒萨\":\n self.trans[char] = u\"sa\"\n for char in u\"腮鳃塞赛\":\n self.trans[char] = u\"sai\"\n for char in u\"三叁伞散\":\n self.trans[char] = u\"san\"\n for char in u\"桑嗓丧\":\n self.trans[char] = u\"sang\"\n for char in u\"搔骚扫嫂\":\n self.trans[char] = u\"sao\"\n for char in u\"瑟色涩\":\n self.trans[char] = u\"se\"\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in u\"莎砂杀刹沙纱傻啥煞\":\n self.trans[char] = u\"sha\"\n for char in u\"筛晒\":\n self.trans[char] = u\"shai\"\n for char in u\"珊苫杉山删煽衫闪陕擅赡膳善汕扇缮\":\n self.trans[char] = u\"shan\"\n for char in u\"墒伤商赏晌上尚裳\":\n self.trans[char] = u\"shang\"\n for char in u\"梢捎稍烧芍勺韶少哨邵绍\":\n self.trans[char] = u\"shao\"\n for char in u\"奢赊蛇舌舍赦摄射慑涉社设\":\n self.trans[char] = u\"she\"\n for char in u\"砷申呻伸身深娠绅神沈审婶甚肾慎渗\":\n self.trans[char] = u\"shen\"\n for char in u\"声生甥牲升绳省盛剩胜圣\":\n self.trans[char] = u\"sheng\"\n for char in u\"师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝势是嗜噬适仕侍释饰氏市恃室视试\":\n self.trans[char] = u\"shi\"\n for char in u\"收手首守寿授售受瘦兽\":\n self.trans[char] = u\"shou\"\n for char in u\"蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕\":\n self.trans[char] = u\"shu\"\n for char in u\"刷耍\":\n self.trans[char] = u\"shua\"\n for char in u\"摔衰甩帅\":\n self.trans[char] = u\"shuai\"\n for char in u\"栓拴\":\n self.trans[char] = u\"shuan\"\n for char in u\"霜双爽\":\n self.trans[char] = u\"shuang\"\n for char in u\"谁水睡税\":\n self.trans[char] = u\"shui\"\n for char in u\"吮瞬顺舜\":\n self.trans[char] = u\"shun\"\n for char in u\"说硕朔烁\":\n self.trans[char] = u\"shuo\"\n for char in u\"斯撕嘶思私司丝死肆寺嗣四伺似饲巳\":\n self.trans[char] = u\"si\"\n for char in u\"松耸怂颂送宋讼诵\":\n self.trans[char] = u\"song\"\n for char in u\"搜艘擞\":\n self.trans[char] = u\"sou\"\n for char in u\"嗽苏酥俗素速粟僳塑溯宿诉肃\":\n self.trans[char] = u\"su\"\n for char in u\"酸蒜算\":\n self.trans[char] = u\"suan\"\n for char in u\"虽隋随绥髓碎岁穗遂隧祟\":\n self.trans[char] = u\"sui\"\n for char in u\"孙损笋\":\n self.trans[char] = u\"sun\"\n for char in u\"蓑梭唆缩琐索锁所\":\n self.trans[char] = u\"suo\"\n for char in u\"塌他它她塔獭挞蹋踏\":\n self.trans[char] = u\"ta\"\n for char in u\"胎苔抬台泰酞太态汰\":\n self.trans[char] = u\"tai\"\n for char in u\"坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭\":\n self.trans[char] = u\"tan\"\n for char in u\"汤塘搪堂棠膛唐糖倘躺淌趟烫\":\n self.trans[char] = u\"tang\"\n for char in u\"掏涛滔绦萄桃逃淘陶讨套\":\n self.trans[char] = u\"tao\"\n self.trans['特'] = 'te'\n for char in u\"藤腾疼誊\":\n self.trans[char] = u\"teng\"\n for char in u\"梯剔踢锑提题蹄啼体替嚏惕涕剃屉\":\n self.trans[char] = u\"ti\"\n for char in u\"兲天添填田甜恬舔腆\":\n self.trans[char] = u\"tian\"\n for char in u\"挑条迢眺跳\":\n self.trans[char] = u\"tiao\"\n for char in u\"贴铁帖\":\n self.trans[char] = u\"tie\"\n for char in u\"厅听烃汀廷停亭庭挺艇\":\n self.trans[char] = u\"ting\"\n for char in u\"通桐酮瞳同铜彤童桶捅筒统痛\":\n self.trans[char] = u\"tong\"\n for char in u\"偷投头透\":\n self.trans[char] = u\"tou\"\n for char in u\"凸秃突图徒途涂屠土吐兔\":\n self.trans[char] = u\"tu\"\n for char in u\"湍团\":\n self.trans[char] = u\"tuan\"\n for char in u\"推颓腿蜕褪退\":\n self.trans[char] = u\"tui\"\n for char in u\"吞屯臀\":\n self.trans[char] = u\"tun\"\n for char in u\"拖托脱鸵陀驮驼椭妥拓唾\":\n self.trans[char] = u\"tuo\"\n for char in u\"挖哇蛙洼娃瓦袜\":\n self.trans[char] = u\"wa\"\n for char in u\"歪外\":\n self.trans[char] = u\"wai\"\n for char in u\"豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞\":\n self.trans[char] = u\"wan\"\n for char in u\"汪王亡枉网往旺望忘妄\":\n self.trans[char] = u\"wang\"\n for char in u\"威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫\":\n self.trans[char] = u\"wei\"\n for char in u\"瘟温蚊文闻纹吻稳紊问\":\n self.trans[char] = u\"wen\"\n for char in u\"嗡翁瓮\":\n self.trans[char] = u\"weng\"\n for char in u\"挝蜗涡窝我斡卧握沃\":\n self.trans[char] = u\"wo\"\n for char in u\"巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误\":\n self.trans[char] = u\"wu\"\n for char in u\"昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系隙戏细\":\n self.trans[char] = u\"xi\"\n for char in u\"瞎虾匣霞辖暇峡侠狭下厦夏吓\":\n self.trans[char] = u\"xia\"\n for char in u\"掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线\":\n self.trans[char] = u\"xian\"\n for char in u\"相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象\":\n self.trans[char] = u\"xiang\"\n for char in u\"萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效\":\n self.trans[char] = u\"xiao\"\n for char in u\"楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑\":\n self.trans[char] = u\"xie\"\n for char in u\"薪芯锌欣辛新忻心信衅\":\n self.trans[char] = u\"xin\"\n for char in u\"星腥猩惺兴刑型形邢行醒幸杏性姓\":\n self.trans[char] = u\"xing\"\n for char in u\"兄凶胸匈汹雄熊\":\n self.trans[char] = u\"xiong\"\n for char in u\"休修羞朽嗅锈秀袖绣\":\n self.trans[char] = u\"xiu\"\n for char in u\"墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续\":\n self.trans[char] = u\"xu\"\n for char in u\"轩喧宣悬旋玄选癣眩绚\":\n self.trans[char] = u\"xuan\"\n for char in u\"靴薛学穴雪血\":\n self.trans[char] = u\"xue\"\n for char in u\"勋熏循旬询寻驯巡殉汛训讯逊迅\":\n self.trans[char] = u\"xun\"\n for char in u\"压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶\":\n self.trans[char] = u\"ya\"\n for char in u\"焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验\":\n self.trans[char] = u\"yan\"\n for char in u\"殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾\":\n self.trans[char] = u\"yang\"\n for char in u\"邀腰妖瑶摇尧遥窑谣姚咬舀药要耀\":\n self.trans[char] = u\"yao\"\n for char in u\"椰噎耶爷野冶也页掖业叶曳腋夜液\":\n self.trans[char] = u\"ye\"\n for char in u\"一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎\":\n self.trans[char] = u\"yi\"\n for char in u\"茵荫因殷音阴姻吟银淫寅饮尹引隐印\":\n self.trans[char] = u\"yin\"\n for char in u\"英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映\":\n self.trans[char] = u\"ying\"\n self.trans['哟'] = 'yo'\n for char in u\"拥佣臃痈庸雍踊蛹咏泳涌永恿勇用\":\n self.trans[char] = u\"yong\"\n for char in u\"幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂\":\n self.trans[char] = u\"you\"\n for char in u\"淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻峪御愈欲狱育誉浴寓裕预豫驭\":\n self.trans[char] = u\"yu\"\n for char in u\"鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院\":\n self.trans[char] = u\"yuan\"\n for char in u\"曰约越跃钥岳粤月悦阅\":\n self.trans[char] = u\"yue\"\n for char in u\"耘云郧匀陨允运蕴酝晕韵孕\":\n self.trans[char] = u\"yun\"\n for char in u\"匝砸杂\":\n self.trans[char] = u\"za\"\n for char in u\"栽哉灾宰载再在\":\n self.trans[char] = u\"zai\"\n for char in u\"咱攒暂赞\":\n self.trans[char] = u\"zan\"\n for char in u\"赃脏葬\":\n self.trans[char] = u\"zang\"\n for char in u\"遭糟凿藻枣早澡蚤躁噪造皂灶燥\":\n self.trans[char] = u\"zao\"\n for char in u\"责择则泽\":\n self.trans[char] = u\"ze\"\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in u\"增憎曾赠\":\n self.trans[char] = u\"zeng\"\n for char in u\"扎喳渣札轧铡闸眨栅榨咋乍炸诈\":\n self.trans[char] = u\"zha\"\n for char in u\"摘斋宅窄债寨\":\n self.trans[char] = u\"zhai\"\n for char in u\"瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽\":\n self.trans[char] = u\"zhan\"\n for char in u\"樟章彰漳张掌涨杖丈帐账仗胀瘴障\":\n self.trans[char] = u\"zhang\"\n for char in u\"招昭找沼赵照罩兆肇召\":\n self.trans[char] = u\"zhao\"\n for char in u\"遮折哲蛰辙者锗蔗这浙\":\n self.trans[char] = u\"zhe\"\n for char in u\"珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳\":\n self.trans[char] = u\"zhen\"\n for char in u\"蒸挣睁征狰争怔整拯正政帧症郑证\":\n self.trans[char] = u\"zheng\"\n for char in u\"芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置帜峙制智秩稚质炙痔滞治窒\":\n self.trans[char] = u\"zhi\"\n for char in u\"中盅忠钟衷终种肿重仲众\":\n self.trans[char] = u\"zhong\"\n for char in u\"舟周州洲诌粥轴肘帚咒皱宙昼骤\":\n self.trans[char] = u\"zhou\"\n for char in u\"珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻\":\n self.trans[char] = u\"zhu\"\n for char in u\"抓爪\":\n self.trans[char] = u\"zhua\"\n self.trans['拽'] = 'zhuai'\n for char in u\"专砖转撰赚篆\":\n self.trans[char] = u\"zhuan\"\n for char in u\"桩庄装妆撞壮状\":\n self.trans[char] = u\"zhuang\"\n for char in u\"椎锥追赘坠缀\":\n self.trans[char] = u\"zhui\"\n for char in u\"谆准\":\n self.trans[char] = u\"zhun\"\n for char in u\"捉拙卓桌琢茁酌啄着灼浊\":\n self.trans[char] = u\"zhuo\"\n for char in u\"兹咨资姿滋淄孜紫仔籽滓子自渍字\":\n self.trans[char] = u\"zi\"\n for char in u\"鬃棕踪宗综总纵\":\n self.trans[char] = u\"zong\"\n for char in u\"邹走奏揍\":\n self.trans[char] = u\"zou\"\n for char in u\"租足卒族祖诅阻组\":\n self.trans[char] = u\"zu\"\n for char in u\"钻纂\":\n self.trans[char] = u\"zuan\"\n for char in u\"嘴醉最罪\":\n self.trans[char] = u\"zui\"\n for char in u\"尊遵\":\n self.trans[char] = u\"zun\"\n for char in u\"昨左佐柞做作坐座\":\n self.trans[char] = u\"zuo\"\n # from: https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans[u\"ଂ\"] = \"anusvara\"\n self.trans[u\"ઇ\"] = \"i\"\n self.trans[u\"എ\"] = \"e\"\n self.trans[u\"ગ\"] = \"ga\"\n self.trans[u\"ਜ\"] = \"ja\"\n self.trans[u\"ഞ\"] = \"nya\"\n self.trans[u\"ଢ\"] = \"ddha\"\n self.trans[u\"ધ\"] = \"dha\"\n self.trans[u\"ਬ\"] = \"ba\"\n self.trans[u\"മ\"] = \"ma\"\n self.trans[u\"ଲ\"] = \"la\"\n self.trans[u\"ષ\"] = \"ssa\"\n self.trans[u\"਼\"] = \"nukta\"\n self.trans[u\"ാ\"] = \"aa\"\n self.trans[u\"ୂ\"] = \"uu\"\n self.trans[u\"ે\"] = \"e\"\n self.trans[u\"ੌ\"] = \"au\"\n self.trans[u\"ൎ\"] = \"reph\"\n self.trans[u\"ੜ\"] = \"rra\"\n self.trans[u\"՞\"] = \"?\"\n self.trans[u\"ୢ\"] = \"l\"\n self.trans[u\"૧\"] = \"1\"\n self.trans[u\"੬\"] = \"6\"\n self.trans[u\"൮\"] = \"8\"\n self.trans[u\"୲\"] = \"quarter\"\n self.trans[u\"ൾ\"] = \"ll\"\n self.trans[u\"ਇ\"] = \"i\"\n self.trans[u\"ഉ\"] = \"u\"\n self.trans[u\"ઌ\"] = \"l\"\n self.trans[u\"ਗ\"] = \"ga\"\n self.trans[u\"ങ\"] = \"nga\"\n self.trans[u\"ଝ\"] = \"jha\"\n self.trans[u\"જ\"] = \"ja\"\n self.trans[u\"؟\"] = \"?\"\n self.trans[u\"ਧ\"] = \"dha\"\n self.trans[u\"ഩ\"] = \"nnna\"\n self.trans[u\"ଭ\"] = \"bha\"\n self.trans[u\"બ\"] = \"ba\"\n self.trans[u\"ഹ\"] = \"ha\"\n self.trans[u\"ଽ\"] = \"avagraha\"\n self.trans[u\"઼\"] = \"nukta\"\n self.trans[u\"ੇ\"] = \"ee\"\n self.trans[u\"୍\"] = \"virama\"\n self.trans[u\"ૌ\"] = \"au\"\n self.trans[u\"੧\"] = \"1\"\n self.trans[u\"൩\"] = \"3\"\n self.trans[u\"୭\"] = \"7\"\n self.trans[u\"૬\"] = \"6\"\n self.trans[u\"൹\"] = \"mark\"\n self.trans[u\"ਖ਼\"] = \"khha\"\n self.trans[u\"ਂ\"] = \"bindi\"\n self.trans[u\"ഈ\"] = \"ii\"\n self.trans[u\"ઍ\"] = \"e\"\n self.trans[u\"ଌ\"] = \"l\"\n self.trans[u\"ഘ\"] = \"gha\"\n self.trans[u\"ઝ\"] = \"jha\"\n self.trans[u\"ଡ଼\"] = \"rra\"\n self.trans[u\"ਢ\"] = \"ddha\"\n self.trans[u\"ന\"] = \"na\"\n self.trans[u\"ભ\"] = \"bha\"\n self.trans[u\"ବ\"] = \"ba\"\n self.trans[u\"ਲ\"] = \"la\"\n self.trans[u\"സ\"] = \"sa\"\n self.trans[u\"ઽ\"] = \"avagraha\"\n self.trans[u\"଼\"] = \"nukta\"\n self.trans[u\"ੂ\"] = \"uu\"\n self.trans[u\"ൈ\"] = \"ai\"\n self.trans[u\"્\"] = \"virama\"\n self.trans[u\"ୌ\"] = \"au\"\n self.trans[u\"൨\"] = \"2\"\n self.trans[u\"૭\"] = \"7\"\n self.trans[u\"୬\"] = \"6\"\n self.trans[u\"ੲ\"] = \"iri\"\n self.trans[u\"ഃ\"] = \"visarga\"\n self.trans[u\"ં\"] = \"anusvara\"\n self.trans[u\"ଇ\"] = \"i\"\n self.trans[u\"ഓ\"] = \"oo\"\n self.trans[u\"ଗ\"] = \"ga\"\n self.trans[u\"ਝ\"] = \"jha\"\n self.trans[u\"?\"] = \"?\"\n self.trans[u\"ണ\"] = \"nna\"\n self.trans[u\"ઢ\"] = \"ddha\"\n self.trans[u\"ଧ\"] = \"dha\"\n self.trans[u\"ਭ\"] = \"bha\"\n self.trans[u\"ള\"] = \"lla\"\n self.trans[u\"લ\"] = \"la\"\n self.trans[u\"ଷ\"] = \"ssa\"\n self.trans[u\"ൃ\"] = \"r\"\n self.trans[u\"ૂ\"] = \"uu\"\n self.trans[u\"େ\"] = \"e\"\n self.trans[u\"੍\"] = \"virama\"\n self.trans[u\"ୗ\"] = \"mark\"\n self.trans[u\"ൣ\"] = \"ll\"\n self.trans[u\"ૢ\"] = \"l\"\n self.trans[u\"୧\"] = \"1\"\n self.trans[u\"੭\"] = \"7\"\n self.trans[u\"൳\"] = \"1/4\"\n self.trans[u\"୷\"] = \"sixteenths\"\n self.trans[u\"ଆ\"] = \"aa\"\n self.trans[u\"ઋ\"] = \"r\"\n self.trans[u\"ഊ\"] = \"uu\"\n self.trans[u\"ਐ\"] = \"ai\"\n self.trans[u\"ଖ\"] = \"kha\"\n self.trans[u\"છ\"] = \"cha\"\n self.trans[u\"ച\"] = \"ca\"\n self.trans[u\"ਠ\"] = \"ttha\"\n self.trans[u\"ଦ\"] = \"da\"\n self.trans[u\"ફ\"] = \"pha\"\n self.trans[u\"പ\"] = \"pa\"\n self.trans[u\"ਰ\"] = \"ra\"\n self.trans[u\"ଶ\"] = \"sha\"\n self.trans[u\"ഺ\"] = \"ttta\"\n self.trans[u\"ੀ\"] = \"ii\"\n self.trans[u\"ો\"] = \"o\"\n self.trans[u\"ൊ\"] = \"o\"\n self.trans[u\"ୖ\"] = \"mark\"\n self.trans[u\"୦\"] = \"0\"\n self.trans[u\"૫\"] = \"5\"\n self.trans[u\"൪\"] = \"4\"\n self.trans[u\"ੰ\"] = \"tippi\"\n self.trans[u\"୶\"] = \"eighth\"\n self.trans[u\"ൺ\"] = \"nn\"\n self.trans[u\"ଁ\"] = \"candrabindu\"\n self.trans[u\"അ\"] = \"a\"\n self.trans[u\"ઐ\"] = \"ai\"\n self.trans[u\"ക\"] = \"ka\"\n self.trans[u\"ਸ਼\"] = \"sha\"\n self.trans[u\"ਛ\"] = \"cha\"\n self.trans[u\"ଡ\"] = \"dda\"\n self.trans[u\"ઠ\"] = \"ttha\"\n self.trans[u\"ഥ\"] = \"tha\"\n self.trans[u\"ਫ\"] = \"pha\"\n self.trans[u\"ર\"] = \"ra\"\n self.trans[u\"വ\"] = \"va\"\n self.trans[u\"ୁ\"] = \"u\"\n self.trans[u\"ી\"] = \"ii\"\n self.trans[u\"ੋ\"] = \"oo\"\n self.trans[u\"ૐ\"] = \"om\"\n self.trans[u\"ୡ\"] = \"ll\"\n self.trans[u\"ૠ\"] = \"rr\"\n self.trans[u\"੫\"] = \"5\"\n self.trans[u\"ୱ\"] = \"wa\"\n self.trans[u\"૰\"] = \"sign\"\n self.trans[u\"൵\"] = \"quarters\"\n self.trans[u\"ਫ਼\"] = \"fa\"\n self.trans[u\"ઁ\"] = \"candrabindu\"\n self.trans[u\"ਆ\"] = \"aa\"\n self.trans[u\"ઑ\"] = \"o\"\n self.trans[u\"ଐ\"] = \"ai\"\n self.trans[u\"ഔ\"] = \"au\"\n self.trans[u\"ਖ\"] = \"kha\"\n self.trans[u\"ડ\"] = \"dda\"\n self.trans[u\"ଠ\"] = \"ttha\"\n self.trans[u\"ത\"] = \"ta\"\n self.trans[u\"ਦ\"] = \"da\"\n self.trans[u\"ର\"] = \"ra\"\n self.trans[u\"ഴ\"] = \"llla\"\n self.trans[u\"ુ\"] = \"u\"\n self.trans[u\"ୀ\"] = \"ii\"\n self.trans[u\"ൄ\"] = \"rr\"\n self.trans[u\"ૡ\"] = \"ll\"\n self.trans[u\"ୠ\"] = \"rr\"\n self.trans[u\"੦\"] = \"0\"\n self.trans[u\"૱\"] = \"sign\"\n self.trans[u\"୰\"] = \"isshar\"\n self.trans[u\"൴\"] = \"1/2\"\n self.trans[u\"ਁ\"] = \"bindi\"\n self.trans[u\"આ\"] = \"aa\"\n self.trans[u\"ଋ\"] = \"r\"\n self.trans[u\"ഏ\"] = \"ee\"\n self.trans[u\"ખ\"] = \"kha\"\n self.trans[u\"ଛ\"] = \"cha\"\n self.trans[u\"ട\"] = \"tta\"\n self.trans[u\"ਡ\"] = \"dda\"\n self.trans[u\"દ\"] = \"da\"\n self.trans[u\"ଫ\"] = \"pha\"\n self.trans[u\"യ\"] = \"ya\"\n self.trans[u\"શ\"] = \"sha\"\n self.trans[u\"ി\"] = \"i\"\n self.trans[u\"ੁ\"] = \"u\"\n self.trans[u\"ୋ\"] = \"o\"\n self.trans[u\"ੑ\"] = \"udaat\"\n self.trans[u\"૦\"] = \"0\"\n self.trans[u\"୫\"] = \"5\"\n self.trans[u\"൯\"] = \"9\"\n self.trans[u\"ੱ\"] = \"addak\"\n self.trans[u\"ൿ\"] = \"k\"\n self.trans[u\"ആ\"] = \"aa\"\n self.trans[u\"ଊ\"] = \"uu\"\n self.trans[u\"એ\"] = \"e\"\n self.trans[u\"ਔ\"] = \"au\"\n self.trans[u\"ഖ\"] = \"kha\"\n self.trans[u\"ଚ\"] = \"ca\"\n self.trans[u\"ટ\"] = \"tta\"\n self.trans[u\"ਤ\"] = \"ta\"\n self.trans[u\"ദ\"] = \"da\"\n self.trans[u\"ପ\"] = \"pa\"\n self.trans[u\"ય\"] = \"ya\"\n self.trans[u\"ശ\"] = \"sha\"\n self.trans[u\"િ\"] = \"i\"\n self.trans[u\"െ\"] = \"e\"\n self.trans[u\"൦\"] = \"0\"\n self.trans[u\"୪\"] = \"4\"\n self.trans[u\"૯\"] = \"9\"\n self.trans[u\"ੴ\"] = \"onkar\"\n self.trans[u\"ଅ\"] = \"a\"\n self.trans[u\"ਏ\"] = \"ee\"\n self.trans[u\"କ\"] = \"ka\"\n self.trans[u\"ઔ\"] = \"au\"\n self.trans[u\"ਟ\"] = \"tta\"\n self.trans[u\"ഡ\"] = \"dda\"\n self.trans[u\"ଥ\"] = \"tha\"\n self.trans[u\"ત\"] = \"ta\"\n self.trans[u\"ਯ\"] = \"ya\"\n self.trans[u\"റ\"] = \"rra\"\n self.trans[u\"ଵ\"] = \"va\"\n self.trans[u\"ਿ\"] = \"i\"\n self.trans[u\"ു\"] = \"u\"\n self.trans[u\"ૄ\"] = \"rr\"\n self.trans[u\"ൡ\"] = \"ll\"\n self.trans[u\"੯\"] = \"9\"\n self.trans[u\"൱\"] = \"100\"\n self.trans[u\"୵\"] = \"sixteenth\"\n self.trans[u\"અ\"] = \"a\"\n self.trans[u\"ਊ\"] = \"uu\"\n self.trans[u\"ഐ\"] = \"ai\"\n self.trans[u\"ક\"] = \"ka\"\n self.trans[u\"ଔ\"] = \"au\"\n self.trans[u\"ਚ\"] = \"ca\"\n self.trans[u\"ഠ\"] = \"ttha\"\n self.trans[u\"થ\"] = \"tha\"\n self.trans[u\"ତ\"] = \"ta\"\n self.trans[u\"ਪ\"] = \"pa\"\n self.trans[u\"ര\"] = \"ra\"\n self.trans[u\"વ\"] = \"va\"\n self.trans[u\"ീ\"] = \"ii\"\n self.trans[u\"ૅ\"] = \"e\"\n self.trans[u\"ୄ\"] = \"rr\"\n self.trans[u\"ൠ\"] = \"rr\"\n self.trans[u\"ਜ਼\"] = \"za\"\n self.trans[u\"੪\"] = \"4\"\n self.trans[u\"൰\"] = \"10\"\n self.trans[u\"୴\"] = \"quarters\"\n self.trans[u\"ਅ\"] = \"a\"\n self.trans[u\"ഋ\"] = \"r\"\n self.trans[u\"ઊ\"] = \"uu\"\n self.trans[u\"ଏ\"] = \"e\"\n self.trans[u\"ਕ\"] = \"ka\"\n self.trans[u\"ഛ\"] = \"cha\"\n self.trans[u\"ચ\"] = \"ca\"\n self.trans[u\"ଟ\"] = \"tta\"\n self.trans[u\"ਥ\"] = \"tha\"\n self.trans[u\"ഫ\"] = \"pha\"\n self.trans[u\"પ\"] = \"pa\"\n self.trans[u\"ଯ\"] = \"ya\"\n self.trans[u\"ਵ\"] = \"va\"\n self.trans[u\"ି\"] = \"i\"\n self.trans[u\"ോ\"] = \"oo\"\n self.trans[u\"ୟ\"] = \"yya\"\n self.trans[u\"൫\"] = \"5\"\n self.trans[u\"૪\"] = \"4\"\n self.trans[u\"୯\"] = \"9\"\n self.trans[u\"ੵ\"] = \"yakash\"\n self.trans[u\"ൻ\"] = \"n\"\n self.trans[u\"ઃ\"] = \"visarga\"\n self.trans[u\"ം\"] = \"anusvara\"\n self.trans[u\"ਈ\"] = \"ii\"\n self.trans[u\"ઓ\"] = \"o\"\n self.trans[u\"ഒ\"] = \"o\"\n self.trans[u\"ਘ\"] = \"gha\"\n self.trans[u\"ଞ\"] = \"nya\"\n self.trans[u\"ણ\"] = \"nna\"\n self.trans[u\"ഢ\"] = \"ddha\"\n self.trans[u\"ਲ਼\"] = \"lla\"\n self.trans[u\"ਨ\"] = \"na\"\n self.trans[u\"ମ\"] = \"ma\"\n self.trans[u\"ળ\"] = \"lla\"\n self.trans[u\"ല\"] = \"la\"\n self.trans[u\"ਸ\"] = \"sa\"\n self.trans[u\"¿\"] = \"?\"\n self.trans[u\"ା\"] = \"aa\"\n self.trans[u\"ૃ\"] = \"r\"\n self.trans[u\"ൂ\"] = \"uu\"\n self.trans[u\"ੈ\"] = \"ai\"\n self.trans[u\"ૣ\"] = \"ll\"\n self.trans[u\"ൢ\"] = \"l\"\n self.trans[u\"੨\"] = \"2\"\n self.trans[u\"୮\"] = \"8\"\n self.trans[u\"൲\"] = \"1000\"\n self.trans[u\"ਃ\"] = \"visarga\"\n self.trans[u\"ଉ\"] = \"u\"\n self.trans[u\"ઈ\"] = \"ii\"\n self.trans[u\"ਓ\"] = \"oo\"\n self.trans[u\"ଙ\"] = \"nga\"\n self.trans[u\"ઘ\"] = \"gha\"\n self.trans[u\"ഝ\"] = \"jha\"\n self.trans[u\"ਣ\"] = \"nna\"\n self.trans[u\"ન\"] = \"na\"\n self.trans[u\"ഭ\"] = \"bha\"\n self.trans[u\"ଜ\"] = \"ja\"\n self.trans[u\"ହ\"] = \"ha\"\n self.trans[u\"સ\"] = \"sa\"\n self.trans[u\"ഽ\"] = \"avagraha\"\n self.trans[u\"ૈ\"] = \"ai\"\n self.trans[u\"്\"] = \"virama\"\n self.trans[u\"୩\"] = \"3\"\n self.trans[u\"૨\"] = \"2\"\n self.trans[u\"൭\"] = \"7\"\n self.trans[u\"ੳ\"] = \"ura\"\n self.trans[u\"ൽ\"] = \"l\"\n self.trans[u\"ઉ\"] = \"u\"\n self.trans[u\"ଈ\"] = \"ii\"\n self.trans[u\"ഌ\"] = \"l\"\n self.trans[u\"ઙ\"] = \"nga\"\n self.trans[u\"ଘ\"] = \"gha\"\n self.trans[u\"ജ\"] = \"ja\"\n self.trans[u\"ਞ\"] = \"nya\"\n self.trans[u\"ନ\"] = \"na\"\n self.trans[u\"ബ\"] = \"ba\"\n self.trans[u\"ਮ\"] = \"ma\"\n self.trans[u\"હ\"] = \"ha\"\n self.trans[u\"ସ\"] = \"sa\"\n self.trans[u\"ਾ\"] = \"aa\"\n self.trans[u\"ૉ\"] = \"o\"\n self.trans[u\"ୈ\"] = \"ai\"\n self.trans[u\"ൌ\"] = \"au\"\n self.trans[u\"૩\"] = \"3\"\n self.trans[u\"୨\"] = \"2\"\n self.trans[u\"൬\"] = \"6\"\n self.trans[u\"੮\"] = \"8\"\n self.trans[u\"ർ\"] = \"rr\"\n self.trans[u\"ଃ\"] = \"visarga\"\n self.trans[u\"ഇ\"] = \"i\"\n self.trans[u\"ਉ\"] = \"u\"\n self.trans[u\"ଓ\"] = \"o\"\n self.trans[u\"ഗ\"] = \"ga\"\n self.trans[u\"ਙ\"] = \"nga\"\n self.trans[u\"ઞ\"] = \"nya\"\n self.trans[u\"ଣ\"] = \"nna\"\n self.trans[u\"ധ\"] = \"dha\"\n self.trans[u\"મ\"] = \"ma\"\n self.trans[u\"ଳ\"] = \"lla\"\n self.trans[u\"ഷ\"] = \"ssa\"\n self.trans[u\"ਹ\"] = \"ha\"\n self.trans[u\"ਗ਼\"] = \"ghha\"\n self.trans[u\"ા\"] = \"aa\"\n self.trans[u\"ୃ\"] = \"r\"\n self.trans[u\"േ\"] = \"ee\"\n self.trans[u\"ൗ\"] = \"mark\"\n self.trans[u\"ଢ଼\"] = \"rha\"\n self.trans[u\"ୣ\"] = \"ll\"\n self.trans[u\"൧\"] = \"1\"\n self.trans[u\"੩\"] = \"3\"\n self.trans[u\"૮\"] = \"8\"\n self.trans[u\"୳\"] = \"half\"\n for char in self.trans:\n value = self.trans[char]\n if value == \"?\":\n continue\n while value.encode(encoding, 'replace').decode(encoding) == \"?\" and value in self.trans:\n assert value != self.trans[value], \"%r == self.trans[%r]!\" % (value, value)\n value = self.trans[value]\n self.trans[char] = value" ]
[ "0.7602486", "0.7276023", "0.7271902", "0.7019935", "0.6963169", "0.6874722", "0.682965", "0.67585826", "0.66468215", "0.6607765", "0.6562943", "0.6530856", "0.64882517", "0.6410148", "0.640093", "0.63787353", "0.628986", "0.6249889", "0.6244368", "0.6241801", "0.61995006", "0.6197868", "0.61901504", "0.6157522", "0.6150554", "0.61498815", "0.6148272", "0.61392754", "0.6115296", "0.6100756", "0.6098456", "0.6063924", "0.6053002", "0.6048332", "0.6044439", "0.60411775", "0.60292566", "0.6028794", "0.60163367", "0.60038483", "0.5995028", "0.59868675", "0.5985984", "0.5976899", "0.59378755", "0.58994675", "0.58891433", "0.58775276", "0.5876529", "0.5872697", "0.5870476", "0.5869475", "0.58613825", "0.5861359", "0.5853602", "0.58514845", "0.5836595", "0.58364606", "0.5822983", "0.57900286", "0.5779874", "0.57796484", "0.57770646", "0.57746464", "0.57642907", "0.5758675", "0.57546896", "0.57479274", "0.5741631", "0.5733694", "0.5722566", "0.57162964", "0.57131696", "0.570947", "0.5665182", "0.5662729", "0.56627244", "0.5662262", "0.56597984", "0.5656646", "0.56509185", "0.56489617", "0.5645384", "0.56336474", "0.56230253", "0.5619114", "0.5618494", "0.56115144", "0.5603867", "0.55914897", "0.55914897", "0.5590722", "0.5577751", "0.55733824", "0.5565884", "0.5562279", "0.5557774", "0.555422", "0.55521566", "0.5551484" ]
0.6086533
31
Removes any occurence of digits from the text
def _remove_digits(self, text: str) -> str: return re.sub(r"\d+", " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def remove_numbers(text):\n return re.sub(r'\\d+', '',text)", "def remove_numbers(text):\n result = re.sub(r'\\d+', '', text)\n return result", "def remove_digits(text):\n return re.sub(r'[\\d]', '', text)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_numbers(text):\n return ''.join([i for i in text if not i.isdigit()])", "def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)", "def remove_words_digits(text):\n return \" \".join([word for word in str(text).split() if not any(c.isdigit() for c in word) ])", "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def _remove_digits(text: str) -> str:\n table = str.maketrans('', '', digits)\n\n return text.translate(table)", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def sanitize_text(text):\n return re.sub(r\"\\d+\", \"\", text)", "def _remove_digit_blocks(self, text: str) -> str:\n return re.sub(r\"\\b\\d+\\b\", \" \", str(text))", "def remove_flight_numbers(text):\n return ' '.join(word for word in text.split() if not any(char.isdigit() for char in word))", "def strip_numbers(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"-?\\d+\")\n return re.sub(regex, \"\", text)", "def replace_digits(text):\n text = re.sub(r\"\\d+\", \"number\", text)\n \n return text", "def removeNumbers(self, words):\n\t\treturn re.sub(r'\\d', '', words)", "def remove_numbers(self, doc):\n regex = re.compile('[%s]' % re.escape(self.numbers))\n return regex.sub('', doc)", "def strip_non_digits(x: str) -> str:\n exp = re.compile(\"[^\\d]+\")\n return re.sub(exp, \"\", x)", "def replace_digits(text):\n text = re.sub('[0-9]', '5', text)\n return text", "def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x", "def keep_digits(x: str) -> str:\n return \"\".join([c for c in x if c.isdigit()]).strip()", "def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def only_numbers(text):\n text = text.lower()\n text = re.sub(r'\\d+', '', text)\n text = remove_special_characters(text)\n text = text.strip()\n text = re.sub(r' ', '', text)\n return len(text)", "def integers_only(text) -> str:\n return ''.join(x for x in text if x.isdigit())", "def collapse_numbers(text: str):\n groups = re.findall(r\"[\\d|\\s]{1,}\", text)\n\n results = list()\n for numbers in groups:\n squashed = squash(numbers)\n if squashed != \"\":\n results.append(squashed)\n\n return results", "def replace_numbers(words):\n p = inflect.engine()\n remove_numbers = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n remove_numbers.append(new_word)\n else:\n remove_numbers.append(word)\n return remove_numbers", "def strip_leading_chars(val):\n for i, c in enumerate(val):\n if c in \"0123456789.\":\n return val[i:]\n return \"\"", "def get_nummeric_only(text):\n\n nummeric_string =\"\"\n \n for character in text:\n if character.isnumeric():\n \n nummeric_string+=character\n \n return nummeric_string", "def tweet_clean_numbers(word):\n if not re.search(r'[0-9]+', word):\n return word\n if len(word)==4 and re.search(r'[0-9]{4}', word) and 1900 < int(word) < 2019:\n return word\n word = re.sub(r'^([0-9]|[\\+\\-%/\\*\\.:])+[0-9%/\\+\\*\\.x:]*$', '<number>', word)\n return word", "def remove_numbers(self, docs):\n \n new_docs = []\n \n for text in docs:\n \n text = re.sub(r'\\b\\d+\\b',' ',text)\n text = re.sub(r'\\s+',' ',text)\n \n new_docs.append(text)\n\n return pd.Series(new_docs)", "def cleanup(text):\n\n\tRE_D = re.compile('\\d')\n\n\ttokens = text.split()\n\tnew_tokens = list()\n\tfor t in tokens:\n\t\tif RE_D.search(t):\n\t\t\tcontinue\n\t\tfor p in string.punctuation:\n\t\t\tif p == \".\":\n\t\t\t\tcontinue\n\t\t\tt=t.replace(p,\"\")\n\t\tnew_tokens.append(t.lower().strip())\n\n\treturn \" \".join(new_tokens)", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def cleanInteger(number):\n \n number = str(number).replace(' ', '')\n \n test = number\n for i in range(10):\n test = test.replace(str(i), '')\n \n if test:\n return None\n \n return number", "def clean_text(self, num='substitute'):\n for i, doc in enumerate(self.documents):\n if num is 'spell':\n doc = doc.replace('0', ' zero ')\n doc = doc.replace('1', ' one ')\n doc = doc.replace('2', ' two ')\n doc = doc.replace('3', ' three ')\n doc = doc.replace('4', ' four ')\n doc = doc.replace('5', ' five ')\n doc = doc.replace('6', ' six ')\n doc = doc.replace('7', ' seven ')\n doc = doc.replace('8', ' eight ')\n doc = doc.replace('9', ' nine ')\n elif num is 'substitute':\n doc = re.sub('(\\\\d+)', ' NUM ', doc)\n elif num is 'remove':\n doc = re.sub('[0-9]', ' ', doc)\n doc = doc.replace('$', ' dollar ')\n doc = doc.lower()\n doc = re.sub('[^a-z]', ' ', doc)\n doc = ' '.join(doc.split())\n self.documents[i] = doc", "def replace_numbers(text, replace_with=\"_NUMBER_\"):\n return RE_NUMBER.sub(replace_with, text)", "def remove_punct(self,text):", "def remove_tokens_with_letters_and_numbers(text_list):\n for i in range(len(text_list)):\n t = text_list[i]\n if any(c.isdigit() for c in t) and any(c.isalpha() for c in t):\n text_list[i] = REPLACEMENT_STRING\n return text_list", "def clear_punctuation(document):\n return re.sub(r'\\D', '', str(document))", "def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))", "def remove_first_digits(text_path):\n\n out = ''\n\n for i, t in enumerate(text_path):\n \n if t == '_':\n out = text_path[i+1:]\n break\n \n return out", "def compact(number):\n return clean(number, ' -./,').strip()", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return ' '.join(new_words)", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return ' '.join(new_words)", "def clean(number):\n digits = [c for c in number if c.isdigit()]\n if len(digits) == 11 and digits[0] == \"1\":\n return ''.join(digits[1:])\n elif len(digits) != 10:\n return \"0000000000\"\n else:\n return ''.join(digits)", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def _replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n try:\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n except:\n pass\n else:\n new_words.append(word)\n return new_words", "def onlynumbers(name):\n return re.sub(\"[a-zA-Z:]\\(\\)\\:\",\"\",name)", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def remove_digit(self, values, box, digit):\n values[box] = values[box].replace(digit, '')\n return values", "def clean_num(quote):\n for char in ROMAN:\n quote = quote.replace(*char)\n return quote", "def compact(number):\n return clean(number, ' -').strip()", "def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )", "def find_only_numbers(detected_message_with_numbers):\n detected_message_only_numbers = re.sub(r\"[^\\d \\._]\", \"\", detected_message_with_numbers)\n return \" \".join(split_words(detected_message_only_numbers, only_unique=True))", "def replace_numbers(words):\r\n p = inflect.engine()\r\n new_words = []\r\n for word in words:\r\n if word.isdigit():\r\n new_word = p.number_to_words(word)\r\n new_words.append(new_word)\r\n else:\r\n new_words.append(word)\r\n return new_words", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def replace_numbers(words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def clean_text(text):\n text = text.replace(\"\\uf0b7\", \" \")\n text = text.replace(\":\", \" \")\n text = text.replace(\".\", \" \")\n text = text.replace(\",\", \" \")\n text = text.replace(\"/\", \" \")\n text = text.replace(\"(\", \" \")\n text = text.replace(\")\", \" \")\n text = text.replace(\"[\", \" \")\n text = text.replace(\"]\", \" \")\n text = text.replace(\"+\", \" \")\n text = text.replace(\"?\", \" \")\n text = text.replace(\"*\", \" \")\n text = text.replace(\"#\", \" \")\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n text = re.sub(\" $\", \"\", text)\n return text", "def remove_repeating_characters(text):\n return RegexFilters.replace_repeating_characters(text, \"$1\")", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def _replace_numbers(self, words):\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def clean(text):\n\n # removing paragraph numbers\n text = re.sub('[0-9]+.\\t', '', str(text))\n # removing new line characters\n text = re.sub('\\n ', ' ', str(text))\n text = re.sub('\\n', ' ', str(text))\n # removing apostrophes\n text = re.sub(\"'s\", '', str(text))\n # removing hyphens\n text = re.sub(\"-\", '', str(text))\n text = re.sub(\"— \", '', str(text))\n # removing quotation marks\n text = re.sub('\\\"', '', str(text))\n # removing salutations\n text = re.sub(\"Mr\\.\", 'Mr', str(text))\n text = re.sub(\"Mrs\\.\", 'Mrs', str(text))\n # removing any reference to outside text\n text = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", str(text))\n\n return text", "def remove_nonalpha(text):\n text = ''.join(c for c in text if c.isalpha() or c == ' ')\n return re.sub(\" +\", \" \", text)", "def retain_alpha_nums(s):\n return re.sub(r'[^a-zA-Z0-9]', ' ', s)", "def trimAlphaNum(self, value):\n\n while value and value[-1].isalnum():\n value = value[:-1]\n\n while value and value[0].isalnum():\n value = value[1:]\n\n return value", "def stripname(name, stripnums = True):\n\tfor pattern in removestuffregex:\n\t\tname = re.sub(pattern, \"\", name)\n\tif stripnums:\n\t\tname = re.sub(numberregex, \"\", name)\n\tfor pattern in removestuff:\n\t\tname = name.replace(pattern, \"\")\n\treturn name", "def cleanText(markup,stripNonAlphaNumeric=False, stripNumbers=False):\n markupNew = markup.copy()\n if( stripNonAlphaNumeric ):\n txt = r1.sub(\" \",markupNew.getRawText() )\n else:\n txt = markupNew.getRawText()\n\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if( stripNumbers ):\n txt = r3.sub(\"\",txt)\n\n markupNew.graph[\"__txt\"] = txt\n if( markupNew.getVerbose() ):\n print u\"cleaned text is now\",markupNew.getText()\n return markupNew", "def compact(number):\n return clean(number, ' -.').upper().strip()", "def sanitize(value: str) -> str:\n return str(re.sub(r'\\d+', '<counter>', value))", "def page_number_remover(file: str) -> str:\n with open(file, 'r') as f:\n content = f.read()\n content_new = page_number_RE.sub(repl='', string=content)\n return content_new", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def _removeRepetitions(s, encod='utf-8'): \n if not isinstance(s, unicode):\n s = unicode(s, encod,'replace')\n \n # Remove additional caracters \n s = re.sub(r'(\\w)\\1{2,100}', r'\\1', s) \n # Remove additional white spaces \n s = re.sub( '\\s+', ' ', s ).strip() \n \n return s", "def _fix_surprising_number(val, s):\n if (\n isinstance(val, (int, float)) and \"!!\" not in s\n and _contains_non_numeric_chars(s)\n ):\n return s\n return val", "def remove_phone(body):\r\n phone = re.compile('[0-9]{7}|[0-9]{3}[\\- ][0-9]{3}[\\- ][0-9]{4}|[0-9]{10}|\\([0-9]{3}\\)[\\- ][0-9]{3}[\\- ][0-9]{4}')\r\n body = re.sub(phone, 'phone', body)\r\n return body", "def normalize_text(text,pad_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~',remove_punc='!\"#$%&\\'()*+,-/:;<=>?@[\\\\]^_`{|}~',remove_number='[0-9]',chars=False):\n punc_spaces = re.compile('([%s])' % re.escape(pad_punc))\n punc = re.compile('[%s]' % re.escape(remove_punc))\n text = text.lower()\n if chars:\n text = re.sub(punc,'',text)\n else:\n text = re.sub('\\.{3,}',' dots',text)\n text = re.sub(punc_spaces, r' \\1 ', text)\n text = re.sub(remove_number,'',text)\n text = re.sub(punc,'',text)\n text = re.sub(r'\\b((?![ai])[a-z])\\b','',text)\n text = re.sub('\\s{2,}', ' ', text)\n text = re.sub('\\n', ' ', text)\n text = re.sub('\\t', ' ', text)\n text=text.strip()\n \n return text", "def replace_numbers(words: list) -> list:\n p = inflect.engine()\n new_words = []\n for word in words:\n if word.isdigit():\n new_word = p.number_to_words(word)\n new_words.append(new_word)\n else:\n new_words.append(word)\n return new_words", "def replace_spelled_numbers(sentence):\n def try_spelled_num_to_digits(text):\n try:\n return spelled_num_to_digits(text)\n except NumberError:\n return text\n return SPELLED_NUMBER_RE.sub(\n lambda m: str(try_spelled_num_to_digits(m.group())), sentence)", "def cleaning(string):\n\n if type(string) == float or type(string) == int:\n return string\n res = ''\n if string != string:\n return string\n string = string.replace(\"\\\\r\", \"\")\n string = string.replace(\"\\\\n\", \"\")\n string = string.replace(\"\\\\b\", \"\")\n string = string.replace(\"\\\\t\", \"\")\n for i in string:\n if i.isalpha():\n res = res + i\n return res.lower()", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def cleanup(text):\n with open(text, 'r') as uncleaned_text:\n no_chapters = re.sub('[A-Z]{3,}', ' ', uncleaned_text.read())\n remove_periods = re.sub('(\\s\\.){4,}', '', no_chapters)\n new_text = re.sub('\\*', '', remove_periods)\n return new_text", "def remove_inner_word_characters(text):\n return RegexFilters.replace_inner_word_characters(text, \"\")", "def clean_training_text(txt):\n return re.sub('[^A-Za-z0-9]+', ' ', str(txt)).strip()", "def cleanText(self, stripNonAlphaNumeric=False, stripNumod_byers=False):\n if stripNonAlphaNumeric:\n txt = REG_CLEAN1.sub(\" \", self.getRawText())\n else:\n txt = self.getRawText()\n\n # clean up white spaces\n txt = REG_CLEAN2.sub(\" \", txt)\n if stripNumod_byers:\n txt = REG_CLEAN3.sub(\"\", txt)\n\n self.graph[\"__scope\"] = (0, len(txt))\n self.graph[\"__txt\"] = txt\n if self.getVerbose():\n print(\"cleaned text is now\", self.getText())", "def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = \"+86-10-\" + new_number[-8:]\n\treturn new_number", "def sanitize_text(text: str) -> str:\n for r in [RE_NOISE, RE_EMAIL, RE_REFERENCE]:\n text = r.sub(\"\", text)\n return text", "def digits_only(self, mystring):\r\n result = \"\"\r\n for ch in mystring:\r\n if ch.isdigit() or ch == '-':\r\n result += ch\r\n return result", "def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)", "def replace_numbers(self, words):\n\t\tp = inflect.engine()\n\t\tnew_words = []\n\t\tfor word in words:\n\t\t\tif word.isdigit():\n\t\t\t\tnew_word = p.number_to_words(word)\n\t\t\t\tnew_words += new_word.split(' ')\n\t\t\telse:\n\t\t\t\tnew_words.append(word)\n\t\treturn new_words", "def removeNumbersLine(self, wordLines):\n\t\treturn self._doPerLine(wordLines, self.removeNumbers)", "def clean_isbn(isbn):\n digits = set(\"0123456789\")\n return [int(x if x in digits else 10) for x in isbn.translate(None, \" -\")]", "def __replace_negative_for_n__(self, text):\n # | - __replace_negative_for_n__\n lst = [pos for pos, char in enumerate(text) if char == \"n\"]\n\n for lett in lst:\n if text[lett + 1].isdigit() is True:\n text = text[:lett] + \"-\" + text[lett + 1:]\n\n return(text)\n # __|" ]
[ "0.8916652", "0.8736428", "0.8688471", "0.8684917", "0.8524612", "0.8524612", "0.8524612", "0.8419046", "0.82202196", "0.82007706", "0.8171787", "0.8140685", "0.8085484", "0.80677295", "0.7969214", "0.79090506", "0.7773248", "0.7762869", "0.76908314", "0.7537781", "0.7432999", "0.7341637", "0.72814894", "0.72429883", "0.7063063", "0.7021244", "0.7000969", "0.6927128", "0.68944126", "0.6878092", "0.68160224", "0.6740509", "0.66927546", "0.66792935", "0.6657428", "0.65412176", "0.6419793", "0.64118874", "0.6411115", "0.6385519", "0.6346063", "0.62931913", "0.6279979", "0.62798136", "0.627879", "0.62395674", "0.62395674", "0.6226036", "0.62099236", "0.6199024", "0.61903334", "0.6182626", "0.6152039", "0.61518145", "0.6136047", "0.61213595", "0.61081946", "0.609694", "0.6082203", "0.6075311", "0.6075311", "0.6075311", "0.6075311", "0.6075311", "0.6075311", "0.6075311", "0.60630506", "0.6058604", "0.60479784", "0.6039944", "0.6035757", "0.60265577", "0.60186094", "0.60140836", "0.6004707", "0.59470224", "0.5936303", "0.59167075", "0.59106225", "0.59066397", "0.59037787", "0.5896447", "0.58889115", "0.5883702", "0.5837743", "0.5836132", "0.5833643", "0.5829138", "0.58266246", "0.5823494", "0.58193797", "0.58089584", "0.5791173", "0.5772315", "0.57551146", "0.57462406", "0.5745288", "0.57440615", "0.57347864", "0.57319623" ]
0.8508615
7
Removes isolated block of digits
def _remove_digit_blocks(self, text: str) -> str: return re.sub(r"\b\d+\b", " ", str(text))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_free_digits(text):\n return RegexFilters.replace_free_digits(text, \" \")", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_nums(self, text):\r\n return text.translate(None, digits)", "def remove_digits(self, text):\n return re.sub('\\d+', '', text)", "def remove_digit(self, values, box, digit):\n values[box] = values[box].replace(digit, '')\n return values", "def remove_numbers(self, doc):\n regex = re.compile('[%s]' % re.escape(self.numbers))\n return regex.sub('', doc)", "def _remove_digits(text: str) -> str:\n table = str.maketrans('', '', digits)\n\n return text.translate(table)", "def _remove_digits(self, text: str) -> str:\n return re.sub(r\"\\d+\", \" \", str(text))", "def remove_numbers_fun(self):\n self.doc = re.sub(\"[0-9]\", \"\", self.doc)", "def remove_digits(text):\n return re.sub(r'[\\d]', '', text)", "def compact(number):\n return clean(number, ' -./,').strip()", "def keep_digits(x: str) -> str:\n return \"\".join([c for c in x if c.isdigit()]).strip()", "def compact(number):\n return clean(number, ' -').strip()", "def strip_non_digits(x: str) -> str:\n exp = re.compile(\"[^\\d]+\")\n return re.sub(exp, \"\", x)", "def strip_leading_chars(val):\n for i, c in enumerate(val):\n if c in \"0123456789.\":\n return val[i:]\n return \"\"", "def delete_first_zeros(digit_with_zeros): \n \n digit_without_zeros = \"\"\n\n snap = 1\n \n d = 0\n\n for d in digit_with_zeros:\n\n if d != \"0\":\n snap = 0\n if snap == 0:\n digit_without_zeros +=d\n \n return digit_without_zeros", "def remove_numbers(text):\n return re.sub(r'\\d+', '',text)", "def remove_numbers(text):\n result = re.sub(r'\\d+', '', text)\n return result", "def strip_numbers(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"-?\\d+\")\n return re.sub(regex, \"\", text)", "def remove_numbers(text):\n return ''.join([i for i in text if not i.isdigit()])", "def remove_flight_numbers(text):\n return ' '.join(word for word in text.split() if not any(char.isdigit() for char in word))", "def clean_numbers(self, x):\n\n # remove \"th\" after a number\n matches = re.findall(r'\\b\\d+\\s*th\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*th\\b', \" \", x)\n\n # remove \"rd\" after a number\n matches = re.findall(r'\\b\\d+\\s*rd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*rd\\b', \" \", x)\n\n # remove \"st\" after a number\n matches = re.findall(r'\\b\\d+\\s*st\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*st\\b', \" \", x)\n\n # remove \"nd\" after a number\n matches = re.findall(r'\\b\\d+\\s*nd\\b', x)\n if len(matches) != 0:\n x = re.sub(r'\\s*nd\\b', \" \", x)\n\n # replace standalone numbers higher than 10 by #\n # this function does not touch numbers linked to words like \"G-20\"\n matches = re.findall(r'^\\d+\\s+|\\s+\\d+\\s+|\\s+\\d+$', x)\n if len(matches) != 0:\n x = re.sub('^[0-9]{5,}\\s+|\\s+[0-9]{5,}\\s+|\\s+[0-9]{5,}$', ' ##### ', x)\n x = re.sub('^[0-9]{4}\\s+|\\s+[0-9]{4}\\s+|\\s+[0-9]{4}$', ' #### ', x)\n x = re.sub('^[0-9]{3}\\s+|\\s+[0-9]{3}\\s+|\\s+[0-9]{3}$', ' ### ', x)\n x = re.sub('^[0-9]{2}\\s+|\\s+[0-9]{2}\\s+|\\s+[0-9]{2}$', ' ## ', x)\n # we do include the range from 1 to 10 as all word-vectors include them\n # x = re.sub('[0-9]{1}', '#', x)\n\n return x", "def removeNumbers(self, words):\n\t\treturn re.sub(r'\\d', '', words)", "def remove_letter(letter, strng):", "def strip(phone):\n return re.sub('\\D', '', Phone.normalize(phone))", "def strip_non_num(phone):\n return ''.join([i for i in phone if i.isdigit()])", "def remove_numbers(self):\n for i in range(len(self.board.board[0])):\n while self.board.board[i].count(0) < 6:\n random_val = random.randint(0, 8)\n self.board.update_board((i, random_val), 0)", "def collapse_numbers(text: str):\n groups = re.findall(r\"[\\d|\\s]{1,}\", text)\n\n results = list()\n for numbers in groups:\n squashed = squash(numbers)\n if squashed != \"\":\n results.append(squashed)\n\n return results", "def compact(number):\n return clean(number, ' -.').upper().strip()", "def remove_words_digits(text):\n return \" \".join([word for word in str(text).split() if not any(c.isdigit() for c in word) ])", "def clean_numbers(text):\n return regex.sub(\"\\d+\", ' NUM', text)", "def compact(number):\n number = clean(number, ' ').upper().strip()\n if number.startswith('AL'):\n number = number[2:]\n if number.startswith('(AL)'):\n number = number[4:]\n return number", "def remove_numbers_from_grid(self):\n #get all non-empty squares from the grid\n non_empty_squares = self.get_non_empty_squares(self.grid)\n non_empty_squares_count = len(non_empty_squares)\n rounds = 3\n while rounds > 0 and non_empty_squares_count >= 17:\n #there should be at least 17 clues\n row,col = non_empty_squares.pop()\n non_empty_squares_count -= 1\n #might need to put the square value back if there is more than one solution\n removed_square = self.grid[row][col]\n self.grid[row][col]=0\n #make a copy of the grid to solve\n grid_copy = copy.deepcopy(self.grid)\n #initialize solutions counter to zero\n self.counter=0\n self.solve_puzzle(grid_copy)\n #if there is more than one solution, put the last removed cell back into the grid\n if self.counter!=1:\n self.grid[row][col]=removed_square\n non_empty_squares_count += 1\n rounds -=1\n return", "def compact(number):\n number = clean(number).strip().replace(' ', '-').split('-')\n if len(number) == 4:\n # zero pad the different sections if they are found\n lengths = (2, 4, 7, 3)\n return ''.join(n.zfill(l) for n, l in zip(number, lengths))\n else:\n # otherwise zero pad the account type\n number = ''.join(number)\n return number[:13] + number[13:].zfill(3)", "def removeNumbersLine(self, wordLines):\n\t\treturn self._doPerLine(wordLines, self.removeNumbers)", "def scratch(line):\n if line.count('~~') >= 2:\n for i in range(0, line.count('~~') - line.count('~~') % 2):\n if i % 2 == 0:\n line = line.replace('~~', '<del>', 1)\n else:\n line = line.replace('~~', '</del>', 1)\n return line", "def clean(number):\n digits = [c for c in number if c.isdigit()]\n if len(digits) == 11 and digits[0] == \"1\":\n return ''.join(digits[1:])\n elif len(digits) != 10:\n return \"0000000000\"\n else:\n return ''.join(digits)", "def removeOneDigit(self, s:str, t:str) -> int:\n count = 0\n for i in range(len(s)):\n if s[:i] + s[i + 1:] > t:\n # print(s[:i] + s[i + 1:], t)\n count = count + 1\n for j in range(len(t)):\n if s > t[:j] + t[j + 1:]:\n # print(s, t[:j] + t[j + 1:])\n count = count + 1\n\n return count", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def trim_decreasing_digits(self):\n vals_to_del = defaultdict(list)\n for key in self.Poss_Tree:\n for choice in self.Poss_Tree[key]:\n if choice < int(str(key)[-1]):\n vals_to_del[key].append(choice)\n for key in vals_to_del:\n for val in vals_to_del[key]:\n self.Poss_Tree[key].remove(val)", "def clean_num(quote):\n for char in ROMAN:\n quote = quote.replace(*char)\n return quote", "def cleanInteger(number):\n \n number = str(number).replace(' ', '')\n \n test = number\n for i in range(10):\n test = test.replace(str(i), '')\n \n if test:\n return None\n \n return number", "def replace_digits(text):\n text = re.sub('[0-9]', '5', text)\n return text", "def remove_DL_RS(x):\n REPLACE_GROUP = r'[LSRD]{2}[ -]'\n x = strip_stoich_wrapper(x)\n x = re.sub(r'[LSRD]{2}[ -]', '', x)\n return x", "def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = \"+86-10-\" + new_number[-8:]\n\treturn new_number", "def strip(self, x, y, number):\n return int('-' + str(int(x)) + str(int(y)) + str(number))", "def _removeRepetitions(s, encod='utf-8'): \n if not isinstance(s, unicode):\n s = unicode(s, encod,'replace')\n \n # Remove additional caracters \n s = re.sub(r'(\\w)\\1{2,100}', r'\\1', s) \n # Remove additional white spaces \n s = re.sub( '\\s+', ' ', s ).strip() \n \n return s", "def clear_numbers(self):\r\n self.numbers.clear()", "def sanitize_text(text):\n return re.sub(r\"\\d+\", \"\", text)", "def clear_bpbynumber(self, arg):\n try:\n bp = self.get_bpbynumber(arg)\n except ValueError as err:\n return str(err)\n bp.deleteMe()\n self._prune_breaks(bp.file, bp.line)\n return None", "def clean_number_plate(self, vrn):\n cleaned = re.sub(r'[^\\dA-Z]', '', vrn)\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 7:\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^B', cleaned) and len(cleaned) == 7:\n if cleaned[1] == 'O':\n cleaned = cleaned[:1] + '0' + cleaned[2:]\n if cleaned[1] == 'I':\n cleaned = cleaned[:1] + '1' + cleaned[2:]\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 8:\n if cleaned[0] == 'Y':\n cleaned = 'V' + cleaned[1:]\n if cleaned[1] == 'Y':\n cleaned = cleaned[0] + 'V' + cleaned[2:]\n\n return cleaned", "def remove_mask_when_empty(self, text):\n if text in ['()-', '.-', '..-']:\n return ''\n else:\n return text", "def strip_numbers(s):\n if s:\n s = u' '.join([x for x in s.split(' ') if not x.isdigit()])\n return s", "def del_decim_zeros(num):\r\n \r\n if \".\" in num: \r\n \r\n num_zeros = num_zeros_end(num) \r\n\r\n if num_zeros != 0: \r\n num = num[:-num_zeros] \r\n \r\n if num[-1] == \".\": \r\n num = num[:-1]\r\n \r\n return num\r\n\r\n else:\r\n return num", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n # values[peer] = values[peer].replace(digit, '')\n new_value = values[peer].replace(digit, '')\n assign_value(values, peer, new_value)\n return values", "def _chop_end_codes(line):\n return re.sub(r\"\\s\\s\\s\\s+[\\w]{4}.\\s+\\d*\\Z\", \"\", line)", "def __remove_line_numbers(file_contents: str) -> str:\n\n spaces = ' ' * 6\n result = ''\n\n for line in file_contents.splitlines():\n new_line = spaces + line[6:72].rstrip()\n result += new_line + '\\n'\n\n return result", "def clean_isbn(isbn):\n digits = set(\"0123456789\")\n return [int(x if x in digits else 10) for x in isbn.translate(None, \" -\")]", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit,'')\n return values", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def clean_posiResNums(self) -> None:\n position_copy = self.POSITION\n pos = position_copy.content\n tmpN = \"\"\n tmpID = 0\n tmpOldID = pos[0].resID\n\n for p in pos:\n # print(p)\n # print(tmpN,tmpID)\n if p.resName == tmpN and p.resID == tmpOldID: # same residue as before\n p.resID = tmpID\n elif (\n p.resName == tmpN and p.resID != tmpOldID): # same resiname but diff ID (double? - this is a problem!)\n tmpOldID = p.resID\n tmpID += 1\n p.resID = tmpID\n else: # next name and residue id\n tmpID += 1\n tmpN = p.resName\n tmpOldID = p.resID\n p.resID = tmpID\n\n self.POSITION.content = pos", "def zzx_strip(f):\n if not f or f[0]:\n return f\n\n k = 0\n\n for coeff in f:\n if coeff:\n break\n else:\n k += 1\n\n return f[k:]", "def replace_digits(text):\n text = re.sub(r\"\\d+\", \"number\", text)\n \n return text", "def stripname(name, stripnums = True):\n\tfor pattern in removestuffregex:\n\t\tname = re.sub(pattern, \"\", name)\n\tif stripnums:\n\t\tname = re.sub(numberregex, \"\", name)\n\tfor pattern in removestuff:\n\t\tname = name.replace(pattern, \"\")\n\treturn name", "def onlynumbers(name):\n return re.sub(\"[a-zA-Z:]\\(\\)\\:\",\"\",name)", "def _clean_subtable(chunk):\n chunk = re.sub(r',Breakout', ',5000%', chunk)\n chunk = re.sub(r'(,[+-]?[1-4]),(\\d{3}%\\n)', r'\\1\\2', chunk)\n return chunk", "def remove(self,values):\n for box, value in values.items():\n if len(value) == 1:\n for peer in self.peers[box]:\n values = self.remove_digit(values, peer, value)\n return values", "def remove_repeating_char(self, text):\n return re.sub(r'(.)\\2+', r'\\1', text)", "def eliminate(self, values, square, dig):\n if dig not in values[square]:\n return values\n values[square] = values[square].replace(dig, '')\n if len(values[square]) == 0:\n return False\n elif len(values[square]) == 1:\n other_digit = values[square]\n if not all(self.eliminate(values, other_square, other_digit) \\\n for other_square in self.peers[square]):\n return False\n\n return values", "def safe_number(self):\n mask = '*' * (len(self.account_number) - 4)\n return '{0}{1}'.format(mask, self.account_number[-4:])", "def _chop_end_misc(line):\n return re.sub(r\"\\s+\\d\\d-\\w\\w\\w-\\d\\d\\s+[1-9][0-9A-Z]{3}\\s*\\Z\", \"\", line)", "def _hide_numbers(self):\n global counter\n\n # num of attempts allow for more blocks to be removed\n attempts = self._difficulty\n\n while attempts > 0:\n # selecting random cell and rotational counterpart\n row = randint(0, 8)\n col = randint(0, 8)\n while self._grid_init[row][col] == 0:\n row = randint(0, 8)\n col = randint(0, 8)\n\n # backing up in case removal is gives multiple solutions\n backupone = self._grid_init[row][col]\n backuptwo = self._grid_init[8 - row][8 - col]\n self._grid_init[row][col] = 0\n self._grid_init[8 - row][8 - col] = 0\n\n # cloning grid to test number of solutions\n test_puzzle = []\n for r in range(0, 9):\n test_puzzle.append(self._grid_init[r][:])\n\n # counter for num solutions is set to 0\n counter = 0\n\n # check num of solutions\n self._solve_puzzle(test_puzzle)\n\n # if num of solutions is not one, replace the two blocks\n if counter != 1:\n self._grid_init[row][col] = backupone\n self._grid_init[8 - row][8 - col] = backuptwo\n attempts -= 1", "def clean_puzzle(puzzle):\n output = ''\n for val in puzzle.values():\n if val == '':\n output += '.'\n elif int(val) in range(1, 10):\n output += val\n return output", "def remove_citations(text: str) -> str:\n text = re.sub(\"\\[[a-zA-Z]\\]\", \"\", text)\n return re.sub(r\"\\[(\\s|\\w)*\\d+(\\s|\\w)*\\]\", \"\", text)", "def remove_zeroes(s: str) -> str:\n tup = s.split(\"e\")\n if len(tup) == 2:\n mantissa = tup[0].rstrip(\"0\").rstrip(\".\")\n exponent = int(tup[1])\n if exponent:\n s = \"%se%d\" % (mantissa, exponent)\n else:\n s = mantissa\n return s", "def strip_zeros(a):\n\n return np.trim_zeros(a, trim='b')", "def remove_plus_minus(x):\n x = strip_stoich_wrapper(x)\n REPLACE_GROUP = r'([LSRD])[ -](\\([+-]\\))'\n # Adding in the r'\\1' ensures that we keep the L component, sub the rest\n new_x = re.sub(REPLACE_GROUP, r'\\1', x)\n return new_x", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def strip_stoich(mol: str) -> Tuple[int, str]:\n # First, replace the number in the beginnin\n # check for multiple substrates or products, e.g. 2 NAD+\n stoich_coeff = re.search(STOICH_RE, mol)\n if stoich_coeff:\n stoich_coeff = int(stoich_coeff.group()[0].strip())\n mol = re.sub(STOICH_RE, '', mol)\n else:\n stoich_coeff = 1\n return stoich_coeff, mol", "def clean(c):", "def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy", "def strip_space():\n pass", "def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))", "def remove_padding(self, data):\n pad_len = ord(data[-1])\n return data[:-pad_len]", "def __unpad(self, data):\n return data[0:-ord(data[-1])]", "def strip_other_charcter():\n pass", "def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s", "def delete_num(self, num):\r\n saved = task2.ListADT()\r\n saved.append(\"d\")\r\n if num == \"\":\r\n saved.append(0)\r\n for line_num in range(len(self.text_lines)):\r\n saved.append(self.text_lines[0])\r\n self.text_lines.delete(0)\r\n else:\r\n num = int(num)\r\n if num == 0:\r\n raise ValueError(\"Zero is not a valid line number\")\r\n elif num > 0:\r\n num -= 1\r\n saved.append(num)\r\n saved.append(self.text_lines[num])\r\n self.text_lines.delete(num)\r\n self.memory.push(saved)", "def remove_repeating_characters(text):\n return RegexFilters.replace_repeating_characters(text, \"$1\")", "def test_strip_degenerate(self):\n self.assertEqual(self.RNA(\"UCAG-\").strip_degenerate(), \"UCAG-\")\n self.assertEqual(self.RNA(\"NRYSW\").strip_degenerate(), \"\")\n self.assertEqual(self.RNA(\"USNG\").strip_degenerate(), \"UG\")", "def _fix_surprising_number(val, s):\n if (\n isinstance(val, (int, float)) and \"!!\" not in s\n and _contains_non_numeric_chars(s)\n ):\n return s\n return val", "def unpad(plain):\n return plain[:-ord(plain[-1])]", "def remove_numbers(self, docs):\n \n new_docs = []\n \n for text in docs:\n \n text = re.sub(r'\\b\\d+\\b',' ',text)\n text = re.sub(r'\\s+',' ',text)\n \n new_docs.append(text)\n\n return pd.Series(new_docs)", "def rstrip(self) -> String:\n pass", "def pkcs5_unpad(self,s):\n return s[0:-ord(s[-1])]", "def _removeIndent(self, block, count=1):\n return re.compile(r\"^%s\" % \" \" * count, re.M).sub(\"\", block)", "def remove_phone(body):\r\n phone = re.compile('[0-9]{7}|[0-9]{3}[\\- ][0-9]{3}[\\- ][0-9]{4}|[0-9]{10}|\\([0-9]{3}\\)[\\- ][0-9]{3}[\\- ][0-9]{4}')\r\n body = re.sub(phone, 'phone', body)\r\n return body", "def tidy(number):\n return np.around(number, 2)" ]
[ "0.7002717", "0.6815302", "0.6815302", "0.6815302", "0.6682333", "0.6597962", "0.6564372", "0.6525351", "0.6459419", "0.6430528", "0.6379692", "0.63564974", "0.6303089", "0.6271742", "0.6265842", "0.6203773", "0.6179784", "0.6123884", "0.6112976", "0.610751", "0.6047446", "0.60082716", "0.6007147", "0.59223837", "0.5857433", "0.58574295", "0.58335686", "0.5807342", "0.58047295", "0.580168", "0.5786732", "0.5719731", "0.5713579", "0.56752396", "0.56716007", "0.5651356", "0.56391597", "0.5635632", "0.5587494", "0.5574101", "0.5564009", "0.5529222", "0.5525492", "0.55184704", "0.5503484", "0.55010927", "0.5500569", "0.54773015", "0.5474191", "0.54570585", "0.54495466", "0.5448377", "0.5447089", "0.5439863", "0.5430112", "0.54172194", "0.53810984", "0.5373443", "0.53568965", "0.5333412", "0.5333412", "0.5327249", "0.5315946", "0.53159213", "0.530326", "0.5295002", "0.5291244", "0.5274106", "0.52471906", "0.5243635", "0.5243407", "0.5232287", "0.52008426", "0.51552224", "0.5151913", "0.51327664", "0.51320356", "0.51301634", "0.51286435", "0.51229805", "0.5100598", "0.5099697", "0.5090663", "0.5089672", "0.50857663", "0.5082613", "0.50807935", "0.50803226", "0.5080155", "0.5076454", "0.50702614", "0.5061066", "0.5057244", "0.50545394", "0.505322", "0.50504726", "0.5045436", "0.5031031", "0.5028135", "0.5017331" ]
0.7632309
0
Removes special characters as defined by the pattern in self.special_chars_pattern
def _remove_special_chars(self, text: str) -> str: pattern = re.compile(self.special_chars_pattern) text = re.sub(pattern, " ", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_chars(text):\n schars = ''.join([a for a in string.punctuation if a not in \".,?\"])\n\n text = re.sub('[%s]' % re.escape(schars), '', text)\n return text", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def remove_special_characters(string_list):", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def remove_special_char(self,text):\n modified_text = re.sub(',|;|#,$','',text)\n return modified_text", "def remove_special_chars(s):\n stripped = re.sub('[^\\w\\s]', ' ', s)\n stripped = re.sub('_', ' ', stripped)\n\n # Make all whitespaces only one space\n stripped = re.sub('\\s+', ' ', stripped)\n\n stripped = stripped.strip()\n\n return stripped", "def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()", "def remove_special_chars(sentence):\r\n result = re.sub(r\"[^a-zA-Z0-9.]+\", ' ', re.sub('\\.\\.+', ' ', sentence))\r\n return result", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def remove_string_special_characters(s):\n stripped = re.sub('[^\\w\\s]', '', s)\n stripped = re.sub('_', '', stripped)\n stripped = re.sub('\\s+', ' ', stripped)\n stripped = stripped.strip()\n\n return stripped", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def handle_special_symbols(text: str\n ) -> str:\n valid_special_symbols = {' ', '_'}\n\n def criteria(c: str\n ) -> str:\n return c if c.isalnum() or c in valid_special_symbols else ' '\n\n return ''.join(criteria(c) for c in list(text))", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word", "def remove_special_chars(self, text_list):\n return [self._remove_special_chars(text) for text in text_list]", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def remove_special_tags(text):\n clean = re.compile('{.*?}')\n return re.sub(clean, '', text)", "def remove_non_alphabetic_text(text):\n return RegexFilters.replace_non_alphabetic_text(text, \"\")", "def _remove_bad_chars(self, expression):\n\n bad_chars = ['\"', \"'\", '/', ',', '.', '(', ')', '—', '&', ';', '$', '%', '‘', '’', '!', '?', '«', '»', '-', '<', '>',\n '+', '#', '|', ':', '_', '°', 'ª', 'º', '*', '{', '}', '[', ']']\n\n if isinstance(expression, str):\n for char in bad_chars:\n expression = expression.replace(char, ' ')\n elif isinstance(expression, list):\n expression = [token.replace(char, '') for char in bad_chars\n for token in expression]\n else:\n raise ValueError(f'expression must be a string or list. '\n 'type {type(expression)} was passed')\n\n return expression", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def string_cleanup(s, garbage=\":,-()&\"):\n s_new = ''\n for x in s:\n if x not in garbage:\n s_new += x\n\n return s_new", "def test_special_characters(self):\n testString = sanitize('[-;]\\`{\\}')\n self.assertEqual(testString, '_________')", "def remove_special_chars(company_names):\n regex_remove_special_chars = '([\\.&,/\\'])'\n regex_replace_special_chars = '[-–]'\n regex_replace_multiple_spaces = '[\\s]{2,}'\n feature_as_list = remove_sub_string(regex_remove_special_chars, company_names, False)\n feature_as_list = remove_sub_string(regex_replace_special_chars, feature_as_list, False, \" \")\n feature_as_list = remove_sub_string(regex_replace_multiple_spaces, feature_as_list, False, \" \")\n return feature_as_list", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string", "def remove_punct(self,text):", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def replace_special(text):\r\n text = text.replace('\\r\\n', ' ')\r\n text = text.replace('\\n', ' ')\r\n text = text.replace('``', \"''\")\r\n text = text.replace('`', \"'\")\r\n text = text.replace('“', '\"')\r\n text = text.replace('”', '\"')\r\n text = text.replace('’', \"'\")\r\n text = text.replace('‘', \"'\")\r\n text = text.replace(\"'\", \"'\")\r\n text = text.replace('–', \"-\")\r\n text = text.replace('\\\"', '\"')\r\n text = text.replace(\"\\'\", \"'\")\r\n return text", "def remove_characters(tokens):\n pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))\n no_char_tokens = filter(None, [pattern.sub('', token) for token in tokens])\n return no_char_tokens", "def remove_special_tokens(text: str, special_tokens: List[str]) -> str:\n token_idxs: Set[int] = set()\n for token in special_tokens:\n start_idx = text.find(token)\n assert start_idx != -1, f\"{token} not found in text.\"\n end_idx = start_idx + len(token)\n assert text[end_idx:].find(token) == -1, f\"{token} duplicated in text.\"\n idxs = range(start_idx, end_idx)\n overlap = token_idxs.intersection(idxs)\n assert len(overlap) == 0, f\"{token} overlaps another special token at {idxs}.\"\n token_idxs.update(idxs)\n text = \"\".join([c for i, c in enumerate(text) if i not in token_idxs])\n return text", "def get_clean_text(messy_text: str) -> str:\n new_text = \"\"\n replace = {\n \"*\": \"\\\"\",\n \"!\": \"?\",\n \"/\": ',',\n \"?\": \"!\"\n }\n remove = \"1234567890&@#$%^()_+|><~\"\n pls_do_upper = False\n for l in messy_text:\n if l in replace:\n new_text += replace[l]\n elif l not in remove:\n if pls_do_upper:\n new_text += l.upper()\n else:\n new_text += l\n return new_text", "def _clean_term(self, term):\n return filter(lambda char: char in allowed_chars, term)", "def remove_diacritics(self, text):\n text = re.sub(self._arabic_diacritics, '', text)\n return text", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def remove_punctuation_and_splchars(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_word = remove_special_characters(new_word, True)\n new_words.append(new_word)\n return new_words", "def clean_word(self, word):\n return self.filter_pattern.sub(u'', word.lower())", "def Clean(s):\n for c in BAD_CHARACTERS:\n s = s.replace(c, '_')\n return s", "def remove_extra_characters(self, text):\n if text:\n parsed_text = text\n parsed_text = parsed_text.replace(\"[\", \"\")\n parsed_text = parsed_text.replace(\"]\", \"\")\n parsed_text = parsed_text.replace(\"{\", \"\")\n parsed_text = parsed_text.replace(\"}\", \"\")\n parsed_text = parsed_text.replace(\"|\", \" \")\n parsed_text = parsed_text.replace(\"-\", \"\")\n parsed_text = parsed_text.replace(\"&nbsp;\", \"\")\n parsed_text = parsed_text.replace(\":'\", \"\")\n parsed_text = parsed_text.replace(\"'\", \"\")\n parsed_text = parsed_text.replace(\"#\", \"\")\n parsed_text = parsed_text.replace(\"':\", \"\")\n parsed_text = parsed_text.replace(\"=\", \"\")\n parsed_text = parsed_text.replace(\"*\", \"\")\n parsed_text = parsed_text.replace(\"/\", \"\")\n parsed_text = parsed_text.replace(\"<--\", \"\")\n parsed_text = parsed_text.replace(\"-->\", \"\")\n parsed_text = parsed_text.replace(\"<!--\", \"\")\n parsed_text = parsed_text.replace(\">\", \"\")\n parsed_text = parsed_text.replace(\"<\", \"\")\n\n parsed_text = parsed_text.replace('__NOTOC__', '')\n\n return parsed_text", "def wipe_bad_chars(filename):\n return multi_replace(filename, {'(': '', ' ': '_', ')': '', '/': '_'})", "def escape_special_characters_for_regex(expression):\n spec_char_escaper = re.compile(r\"[^a-zA-Z0-9]\", re.IGNORECASE)\n expression = re.sub(spec_char_escaper, r'\\1', expression)\n return expression", "def _remove_diacritics(self, text: str) -> str:\n nfkd_form = unicodedata.normalize(\"NFKD\", text)\n return \"\".join([char for char in nfkd_form if not unicodedata.combining(char)])", "def _strip_text(text):\n text = re.sub(r'[ ,?:]|%s', \"\", text.lower())\n for chr in \"-%\":\n new_text = text.replace(chr, \"\")\n if new_text:\n text = new_text\n return text.lower()", "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "def sanitize(wl):\n s = []\n for word in wl:\n for symbol in ['.', '!', ',', '\\n', '\\r', '?']:\n if symbol in word:\n s.append(symbol)\n word = word.replace(symbol, '')\n \n s.append(word)\n return s", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()", "def safe_name(self, name):\n\n output = \"\"\n for char in name:\n if char not in '\\\\/<>:\"|?*':\n output += char\n\n return output", "def sanitize_input(term: str) -> str:\n return term.strip().replace(\"*\", \"\").replace(\"'\", \"\\\\'\").replace(\"~\", \"\")", "def sanitize(mystr):\n retainlist = \"_-\"\n return re.sub(r'[^\\w' + retainlist + ']', '_', mystr)", "def strip(self, str_text):\n punct_chars = [' ', '.', ',', '!', '?', '&', '\"', \"'\", '-', ':']\n str_text = [i for i in str_text if i not in punct_chars]\n str_text = ''.join(str_text)\n return str_text", "def fix_characters(title):\n return re.sub('[^0-9a-zA-Z]+', ' ', title)", "def sanitize_text(text: str) -> str:\n for r in [RE_NOISE, RE_EMAIL, RE_REFERENCE]:\n text = r.sub(\"\", text)\n return text", "def strip_other_charcter():\n pass", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def remove_nonalpha(text):\n text = ''.join(c for c in text if c.isalpha() or c == ' ')\n return re.sub(\" +\", \" \", text)", "def removeUnicode(text):\n text = re.sub(r'(\\\\u[0-9A-Fa-f]+)',r'', text) \n text = re.sub(r'[^\\x00-\\x7f]',r'',text)\n return text", "def remove(string, list_of_unwanted_car, replacement_char=\"_\"):\n new_string = string\n for unwanted_char in list_of_unwanted_car:\n new_string = new_string.replace(unwanted_char, replacement_char)\n return new_string", "def _removeDiacritics(self, text):\n norm_txt = unicodedata.normalize('NFD', text)\n shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))\n # remove accents and other diacritics, replace spaces with \"_\" because identifiers can't have spaces\n no_spaces = unicodedata.normalize(\n 'NFC', shaved).lower().replace(\" \", \"_\")\n final_text = no_spaces\n # only allow [a-z], [0-9] and _\n p = re.compile('[a-z0-9_]+')\n for i in range(0, len(no_spaces)):\n if not (p.match(no_spaces[i])):\n final_text = final_text[:i] + '_' + final_text[i+1:]\n # i the first char is not a-z then replaceit (all identifiers must start with a letter)\n p2 = re.compile('[a-z]+')\n if not p2.match(final_text[0]):\n final_text = 'a' + final_text[1:]\n return final_text", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def sanitise(string: str) -> str:\n return \"_\".join(re.findall(re.compile(\"[^ @&()/]+\"), string))", "def remove_punctuation(text):\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)", "def filter_invalid_characters(self, string):\n valid_chars = \"abcdefghijklmnopqrstuvwxyz0123456789-.\"\n newstring = \"\"\n for char in string:\n use_char = char\n if char not in valid_chars:\n use_char = '-'\n newstring = newstring + use_char\n\n return newstring", "def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str", "def removeApostrophes(self, words):\n\t\treturn self.__apostropheRegex.sub('', words)", "def remove_inner_word_characters(text):\n return RegexFilters.replace_inner_word_characters(text, \"\")", "def clean(str):\n str = str.replace(u\"“\",u\"``\")\n str = str.replace(u\"”\",u\"''\")\n str = str.replace(u' \"',u\" ``\")\n str = str.replace(u'\"',u\"''\")\n str = str.replace(u'fi',u\"fi\")\n str = str.replace(u'fl',u\"fl\")\n str = str.replace(u'’',u\"'\")\n str = str.replace(u'–',u\"---\")\n str = str.replace(u'&',u\"\\\\&\")\n str = str.replace(u'#',u\"\\\\#\")\n str = str.replace(u'_',u\"\\\\_\")\n \n return str", "def _clean_non_alphanumeric_chars(self):\n\n for i,variable in enumerate(self.model_description.modelVariables):\n clean_name = re.sub(r'[^a-zA-Z0-9_]', '', variable.name)\n if clean_name != variable.name:\n log = \"Sim variable '{}' has been renamed to '{}' \".format(variable.name, clean_name)\n log += \"to comply with Bonsai naming requirements.\"\n print(log)\n self.model_description.modelVariables[i].name = clean_name\n\n return", "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "def clean_text(txt):\n\n cleaned_txt = ''\n for character in txt:\n if character not in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVQXWY ': #punctuation\n character = ''\n cleaned_txt += character\n elif character == character.upper(): #uppercase\n character = character.lower()\n cleaned_txt += character\n else:\n cleaned_txt += character\n return cleaned_txt", "def _only_letters(s):\r\n\treturn _regex_non_letters.sub('', s)", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def strip_characters(input_string):\n\n allowed_characters = re.compile(\"[^a-zA-Z0-9_-]\")\n subbed_string = allowed_characters.sub(\"_\", input_string)\n\n string_to_shorten = re.compile(\"__+\")\n shortened_string = string_to_shorten.sub(\"_\", subbed_string)\n\n return shortened_string", "def filter(string):\n # remove all unwanted characters\n return regex2.sub(' ', string)", "def basic_cleaning2(string):\n\n string = string.lower()\n string = re.sub('[0-9\\(\\)\\!\\^\\%\\$\\'\\\"\\.;,-\\?\\{\\}\\[\\]\\\\/]', ' ', string)\n string = re.sub(' +', ' ', string)\n return string", "def remove_escape_characters(text):\n text_removed_escape = list(map(lambda x: x.replace(\"\\\\\", \"\").replace(\"'\", \"\").strip().lower(), re.split(r\"(?<=\\\\)[a-z]{1}\", repr(text))))\n text_removed_extra_spaces = list(filter(lambda x: x != \"\", text_removed_escape))\n return \" \".join(text_removed_extra_spaces)", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def filter_characters(self, allow_chars=string.printable, drop_chars=None):\n\n if allow_chars is not None:\n if not isinstance(allow_chars, set):\n allow_chars = set(allow_chars)\n\n drop_chars = ''.join(self.unique_characters - allow_chars)\n else:\n if isinstance(drop_chars, (set, list, tuple)):\n drop_chars = ''.join(drop_chars)\n\n if not isinstance(drop_chars, str):\n raise ValueError('`drop_chars` must be a sequence, set or string if `allow_chars` is not given')\n\n return self.replace_characters(str.maketrans(drop_chars, drop_chars, drop_chars))", "def removeNonAsciiFromText(self, text):\n\t\treturn ''.join([i if ord(i) < 128 else '' for i in text])", "def clean(value):\r\n return re.sub('_+', '_', INVALID_CHARS.sub('_', value))", "def strip_unsafe_characters(filename: str):\n return \"\".join([c for c in filename if c.isalpha() or c.isdigit() or c==' ' or c=='_']).rstrip()", "def clean_non_chinese_symbols(text):\n text = regex.sub('[!!]+', \"!\", text)\n text = regex.sub('[??]+', \"?\", text)\n text = regex.sub(\"[a-zA-Z#$%&\\'()*+,-./:;:<=>@,。★、…【】《》“”‘’[\\\\]^_`{|}~]+\", \" UNK \", text)\n return regex.sub(\"\\s+\", \" \", text)", "def cleanASJP(word):\n word = re.sub(r\",\", \"-\", word)\n word = re.sub(r\"\\%\", \"\", word)\n word = re.sub(r\"\\*\", \"\", word)\n word = re.sub(r\"\\\"\", \"\", word)\n word = re.sub(r\".~\", \"\", word)\n word = re.sub(r\"(.)(.)(.)\\$\", r\"\\2\", word)\n word = re.sub(r\"\\$\", \"\", word)\n word = re.sub(r\"\\s+\", \"\", word)\n return word.replace('~', '')", "def _strip_invalid_characters(self: object) -> None:\n for current_invalid_character in Episode._invalid_characters:\n self.episode_broadcast = self.episode_broadcast.replace(current_invalid_character, \" \").strip()\n self.episode_inspectors = self.episode_inspectors.replace(current_invalid_character, \" \").strip()\n self.episode_name = self.episode_name.replace(current_invalid_character, \" \").strip()\n self.episode_sequence = self.episode_sequence.replace(current_invalid_character, \"-\").strip()", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def removeChars(inStr, chars):\n newStr = inStr\n for char in chars:\n newStr = newStr.replace(char, \"\")\n return newStr", "def clean_user_input(self, user_input):\n legal_chars = re.compile(r'^[a-z0-9]$')\n return filter(lambda c: re.match(legal_chars, c), user_input.lower())", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text" ]
[ "0.8309218", "0.7856203", "0.7768344", "0.76876336", "0.7650726", "0.76494753", "0.76118165", "0.7510932", "0.74934494", "0.7467364", "0.74673146", "0.72453177", "0.72366244", "0.71997553", "0.7166965", "0.71665037", "0.7158556", "0.7108985", "0.70958817", "0.6979312", "0.68859047", "0.68012077", "0.6785976", "0.67122483", "0.6678896", "0.66683275", "0.66525376", "0.66440725", "0.66388977", "0.6635551", "0.66243774", "0.6620327", "0.66136354", "0.6534348", "0.65256846", "0.6523468", "0.64947426", "0.64947426", "0.6487875", "0.64854413", "0.6465042", "0.64164585", "0.64132935", "0.6403196", "0.6402464", "0.64002913", "0.6376954", "0.6374678", "0.6374345", "0.6372491", "0.63663805", "0.6348938", "0.6347113", "0.63150656", "0.63027227", "0.6284581", "0.62819916", "0.6268356", "0.626061", "0.6246665", "0.6226451", "0.6221492", "0.6218171", "0.62066644", "0.61956507", "0.61886954", "0.61886954", "0.61835873", "0.617216", "0.61712116", "0.616932", "0.6160545", "0.6156398", "0.61514723", "0.61509645", "0.61450154", "0.61245507", "0.612241", "0.6115574", "0.6105417", "0.61043453", "0.608907", "0.6087141", "0.6085466", "0.6084943", "0.60841024", "0.60725653", "0.60660625", "0.60648865", "0.6059675", "0.60552186", "0.6051383", "0.6047443", "0.6046776", "0.60439193", "0.6038836", "0.6006534", "0.6002457", "0.59959537", "0.5990928" ]
0.85286283
0
Removes special charaters with whitespace on left
def _remove_left_padded_special_chars(self, text: str) -> str: pattern = re.compile("\ +[^A-Za-z0-9\n]") text = re.sub(pattern, " ", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _remove_special_chars(self, text: str) -> str:\n pattern = re.compile(self.special_chars_pattern)\n text = re.sub(pattern, \" \", text)\n return text", "def remove_special_chars(text):\n \n text = re.sub(' +', ' ', re.sub('[^A-Za-z ]+', ' ', text).strip())\n return text", "def remove_special_chars(s):\n stripped = re.sub('[^\\w\\s]', ' ', s)\n stripped = re.sub('_', ' ', stripped)\n\n # Make all whitespaces only one space\n stripped = re.sub('\\s+', ' ', stripped)\n\n stripped = stripped.strip()\n\n return stripped", "def remove_special_characters_from_text(text) -> str:\n return re.sub(r'[^\\w\\s]', '', text.strip())", "def remove_special_characters(text):\n soup = BeautifulSoup(text, \"html.parser\")\n review = soup.get_text()\n review = r\"[^a-zA-z0-9\\s]\"\n review = re.sub(review, \"\", text)\n return review.lower()", "def remove_string_special_characters(s):\n stripped = re.sub('[^\\w\\s]', '', s)\n stripped = re.sub('_', '', stripped)\n stripped = re.sub('\\s+', ' ', stripped)\n stripped = stripped.strip()\n\n return stripped", "def remove_special(s):\n return ansi_escape_chars.sub('', s)", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def remove_special_characters(string_list):", "def remove_specials(sentence):\n sentence = sentence.replace('-', ' ')\n sentence = re.sub(r'[^\\w\\s]', '', sentence)\n return sentence", "def remove_special_char(self,text):\n modified_text = re.sub(',|;|#,$','',text)\n return modified_text", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def remove_special_characters(text, remove_digits=False):\n pattern = r'[^a-zA-z0-9\\s]' if not remove_digits else r'[^a-zA-z\\s]'\n text = re.sub(pattern, '', text)\n return text", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def strip_other_charcter():\n pass", "def remove_non_alpha(self,text):\n \n removelist=\"-\\.\\/\\?\\@\"\n re_alpha_numeric1=r\"[^0-9a-zA-Z\"+removelist+\" ]\"\n clean_text=re.sub(re_alpha_numeric1,'',text)\n clean_text=clean_text.replace('/',' ')\n clean_text=re.sub(' +', ' ', clean_text)\n return clean_text", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def _remove_custom_chars(self, text: str) -> str:\n patterns = \"|\".join([x for x in self.custom_chars])\n return re.sub(patterns, \"\", str(text), flags=re.IGNORECASE)", "def _cleanse(text):\n return ''.join([character for character in text\n if character.isalnum()]).lower()", "def remove_special_chars(sentence):\r\n result = re.sub(r\"[^a-zA-Z0-9.]+\", ' ', re.sub('\\.\\.+', ' ', sentence))\r\n return result", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string", "def non_letter_removal(text):\n return re.sub('[^a-zA-Z]', ' ', text)", "def sanitize(wl):\n s = []\n for word in wl:\n for symbol in ['.', '!', ',', '\\n', '\\r', '?']:\n if symbol in word:\n s.append(symbol)\n word = word.replace(symbol, '')\n \n s.append(word)\n return s", "def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def string_cleanup(s, garbage=\":,-()&\"):\n s_new = ''\n for x in s:\n if x not in garbage:\n s_new += x\n\n return s_new", "def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out", "def filter(string):\n # remove all unwanted characters\n return regex2.sub(' ', string)", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def _remove_special_chars(self, doc: str):\n processed_tweet = re.sub('[\\.,!#¡\\?¿%:;´\"@”“&()\\|]', '', doc)\n return processed_tweet", "def _strip_text(text):\n text = re.sub(r'[ ,?:]|%s', \"\", text.lower())\n for chr in \"-%\":\n new_text = text.replace(chr, \"\")\n if new_text:\n text = new_text\n return text.lower()", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()", "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "def handle_special_symbols(text: str\n ) -> str:\n valid_special_symbols = {' ', '_'}\n\n def criteria(c: str\n ) -> str:\n return c if c.isalnum() or c in valid_special_symbols else ' '\n\n return ''.join(criteria(c) for c in list(text))", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def remove_nonalpha(text):\n text = ''.join(c for c in text if c.isalpha() or c == ' ')\n return re.sub(\" +\", \" \", text)", "def replace_special(text):\r\n text = text.replace('\\r\\n', ' ')\r\n text = text.replace('\\n', ' ')\r\n text = text.replace('``', \"''\")\r\n text = text.replace('`', \"'\")\r\n text = text.replace('“', '\"')\r\n text = text.replace('”', '\"')\r\n text = text.replace('’', \"'\")\r\n text = text.replace('‘', \"'\")\r\n text = text.replace(\"'\", \"'\")\r\n text = text.replace('–', \"-\")\r\n text = text.replace('\\\"', '\"')\r\n text = text.replace(\"\\'\", \"'\")\r\n return text", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def remove_special_chars(text):\n schars = ''.join([a for a in string.punctuation if a not in \".,?\"])\n\n text = re.sub('[%s]' % re.escape(schars), '', text)\n return text", "def remove_escape_characters(text):\n text_removed_escape = list(map(lambda x: x.replace(\"\\\\\", \"\").replace(\"'\", \"\").strip().lower(), re.split(r\"(?<=\\\\)[a-z]{1}\", repr(text))))\n text_removed_extra_spaces = list(filter(lambda x: x != \"\", text_removed_escape))\n return \" \".join(text_removed_extra_spaces)", "def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )", "def get_clean_text(messy_text: str) -> str:\n new_text = \"\"\n replace = {\n \"*\": \"\\\"\",\n \"!\": \"?\",\n \"/\": ',',\n \"?\": \"!\"\n }\n remove = \"1234567890&@#$%^()_+|><~\"\n pls_do_upper = False\n for l in messy_text:\n if l in replace:\n new_text += replace[l]\n elif l not in remove:\n if pls_do_upper:\n new_text += l.upper()\n else:\n new_text += l\n return new_text", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def clean(s):\n punctuations = \"-,.?!;:\\n\\t()[]\\\"-\"\n return s.translate(None, string.punctuation).lower()", "def fix_characters(title):\n return re.sub('[^0-9a-zA-Z]+', ' ', title)", "def preprocess(x):\n\n\tres = re.sub(r'[^\\w\\s]', '', x)\n\tres = res.strip('\\n')\n\n\treturn res", "def replace_any_non_letter_or_number_character(text):\n text = text.strip()\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return text", "def basic_cleaning2(string):\n\n string = string.lower()\n string = re.sub('[0-9\\(\\)\\!\\^\\%\\$\\'\\\"\\.;,-\\?\\{\\}\\[\\]\\\\/]', ' ', string)\n string = re.sub(' +', ' ', string)\n return string", "def processword(word):\n word = word.lower()\n word = word.strip('()?,!`.-:\\\"\\n \\'')\n return word", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def remove_punct(self,text):", "def clean_text(txt):\n\n for symbol in \"\"\".,'?!()/-:;\"\"\":\n txt = txt.replace(symbol, '')\n txt = txt.lower()\n txt = txt.split()\n return txt", "def sanitize_txt(x):\n return '_'.join(smart_split(x.lower()))", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def remove_non_ascii(text):\n return re.sub(r'[^\\x00-\\x7F]', ' ', text)", "def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str", "def clean_str(self,string):\r\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\r\n string = re.sub(r\"\\'s\", \" \\'s\", string)\r\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\r\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\r\n string = re.sub(r\"\\'re\", \" \\'re\", string)\r\n string = re.sub(r\"\\'d\", \" \\'d\", string)\r\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\r\n string = re.sub(r\",\", \" , \", string)\r\n string = re.sub(r\"!\", \" ! \", string)\r\n string = re.sub(r\"\\(\", \" \\( \", string)\r\n string = re.sub(r\"\\)\", \" \\) \", string)\r\n string = re.sub(r\"\\?\", \" \\? \", string)\r\n string = re.sub(r\"\\s{2,}\", \" \", string)\r\n return string.strip().lower()", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def CLEAN(text):\n return _control_char_re.sub('', text)", "def cleaner(self, w_old):\n w_new = re.sub('[\\(\\)]', '', w_old)\n w_new = re.sub('[^А-Яа-яЁё ]', 'ъ', w_new)\n w_new = re.sub(' ', ' ', w_new)\n return w_new", "def clean_training_text(txt):\n return re.sub('[^A-Za-z0-9]+', ' ', str(txt)).strip()", "def remove_extra_characters(self, text):\n if text:\n parsed_text = text\n parsed_text = parsed_text.replace(\"[\", \"\")\n parsed_text = parsed_text.replace(\"]\", \"\")\n parsed_text = parsed_text.replace(\"{\", \"\")\n parsed_text = parsed_text.replace(\"}\", \"\")\n parsed_text = parsed_text.replace(\"|\", \" \")\n parsed_text = parsed_text.replace(\"-\", \"\")\n parsed_text = parsed_text.replace(\"&nbsp;\", \"\")\n parsed_text = parsed_text.replace(\":'\", \"\")\n parsed_text = parsed_text.replace(\"'\", \"\")\n parsed_text = parsed_text.replace(\"#\", \"\")\n parsed_text = parsed_text.replace(\"':\", \"\")\n parsed_text = parsed_text.replace(\"=\", \"\")\n parsed_text = parsed_text.replace(\"*\", \"\")\n parsed_text = parsed_text.replace(\"/\", \"\")\n parsed_text = parsed_text.replace(\"<--\", \"\")\n parsed_text = parsed_text.replace(\"-->\", \"\")\n parsed_text = parsed_text.replace(\"<!--\", \"\")\n parsed_text = parsed_text.replace(\">\", \"\")\n parsed_text = parsed_text.replace(\"<\", \"\")\n\n parsed_text = parsed_text.replace('__NOTOC__', '')\n\n return parsed_text", "def remove_punctuation_and_splchars(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_word = remove_special_characters(new_word, True)\n new_words.append(new_word)\n return new_words", "def preprocess(self, s):\n stripped = re.sub(\"[^\\w\\s]\", \"\", s)\n stripped = re.sub(\"_\", \"\", stripped)\n\n stripped = re.sub(\"\\s+\", \" \", stripped)\n\n stripped = stripped.strip()\n\n return stripped.lower()", "def clean_string(s):\n c = re.sub(r'\\s+', ' ', re.sub(r'[^A-Za-z0-9 .:]', '', s))\n return c", "def clean_word(self, word):\n return self.filter_pattern.sub(u'', word.lower())", "def clean_word(word: str) -> str:\n\n cleaned_word = ''\n for char in word.lower():\n if char.isalnum():\n cleaned_word = cleaned_word + char\n return cleaned_word", "def clean_text(self, text):\n return \"\".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def preprocess_text(text):\n # replace non characers with space and lower case\n temp = re.sub(r\"[/W/D/S.,-]+\", \" \", str(text).lower())\n # merge multiple spaces to a single one\n return re.sub(r\"[ ]+\", \" \", temp)", "def sanitize(value):\n from re import sub\n from unicodedata import normalize\n value = normalize('NFKD', value).encode('ascii', 'ignore')\n value = sub('[^\\w\\s\\.-]', '', value.decode('utf-8')).strip().lower()\n return sub('[-_\\s]+', '_', value)", "def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def remove_special_chars(company_names):\n regex_remove_special_chars = '([\\.&,/\\'])'\n regex_replace_special_chars = '[-–]'\n regex_replace_multiple_spaces = '[\\s]{2,}'\n feature_as_list = remove_sub_string(regex_remove_special_chars, company_names, False)\n feature_as_list = remove_sub_string(regex_replace_special_chars, feature_as_list, False, \" \")\n feature_as_list = remove_sub_string(regex_replace_multiple_spaces, feature_as_list, False, \" \")\n return feature_as_list", "def cleanASJP(word):\n word = re.sub(r\",\", \"-\", word)\n word = re.sub(r\"\\%\", \"\", word)\n word = re.sub(r\"\\*\", \"\", word)\n word = re.sub(r\"\\\"\", \"\", word)\n word = re.sub(r\".~\", \"\", word)\n word = re.sub(r\"(.)(.)(.)\\$\", r\"\\2\", word)\n word = re.sub(r\"\\$\", \"\", word)\n word = re.sub(r\"\\s+\", \"\", word)\n return word.replace('~', '')", "def wipe_bad_chars(filename):\n return multi_replace(filename, {'(': '', ' ': '_', ')': '', '/': '_'})", "def cleanSents(row, field):\n\n text = str(row[field]).lower()\n clean_text = re.sub('[^A-Za-z0-9]+', ' ', text).strip()\n return clean_text", "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip()", "def text_prepare(text):\n \n replace_by_space_re = re.compile('[/(){}\\[\\]\\|@,;]')\n bad_symbols_re = re.compile('[^0-9a-z #+_]')\n\n text = text.lower()\n text = replace_by_space_re.sub(' ', text)\n text = bad_symbols_re.sub('', text)\n text = ' '.join([x for x in text.split() if x])\n\n return text.strip()", "def sanitize(mystr):\n retainlist = \"_-\"\n return re.sub(r'[^\\w' + retainlist + ']', '_', mystr)", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def clean_text_from_latin_supplement_unicode(text):\n return re.sub(r\"([\\u0080-\\u00FF])\", \" \", text)", "def clean_str(string):\r\n\t\t\tstring = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\r\n\t\t\tstring = re.sub(r\"\\'s\", \" \\'s\", string)\r\n\t\t\tstring = re.sub(r\"\\'ve\", \" \\'ve\", string)\r\n\t\t\tstring = re.sub(r\"n\\'t\", \" n\\'t\", string)\r\n\t\t\tstring = re.sub(r\"\\'re\", \" \\'re\", string)\r\n\t\t\tstring = re.sub(r\"\\'d\", \" \\'d\", string)\r\n\t\t\tstring = re.sub(r\"\\'ll\", \" \\'ll\", string)\r\n\t\t\tstring = re.sub(r\",\", \" , \", string)\r\n\t\t\tstring = re.sub(r\"!\", \" ! \", string)\r\n\t\t\tstring = re.sub(r\"\\(\", \" \\( \", string)\r\n\t\t\tstring = re.sub(r\"\\)\", \" \\) \", string)\r\n\t\t\tstring = re.sub(r\"\\?\", \" \\? \", string)\r\n\t\t\tstring = re.sub(r\"\\s{2,}\", \" \", string)\r\n\t\t\treturn string.strip()", "def sanitize(string):\n retval = string.lower()\n retval = re.sub(r\"[^\\w\\s]\", '', retval)\n retval = re.sub(r\"\\s+\", '_', retval)\n return retval", "def clean(name):\n name = remove_extra(name)\n name = unidecode.unidecode(name) # Remove diacritics\n name = \"\".join(\n list(filter(lambda c: c in (string.ascii_letters + string.digits + \" \"), name))\n )\n name = name.lower().strip()\n return name", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)" ]
[ "0.8474704", "0.8094379", "0.80897444", "0.8077067", "0.79241997", "0.7823599", "0.77800685", "0.77404356", "0.7715935", "0.76775664", "0.7647973", "0.7613351", "0.7509526", "0.74965906", "0.7397818", "0.73908854", "0.73888534", "0.73783976", "0.733939", "0.73306227", "0.72967815", "0.72813517", "0.7269577", "0.72642905", "0.7248617", "0.7210239", "0.7210239", "0.72047293", "0.71951056", "0.719493", "0.7186802", "0.71741074", "0.71703255", "0.71593404", "0.7145363", "0.71437025", "0.713015", "0.71278137", "0.7124845", "0.7123982", "0.7107399", "0.7106364", "0.71038264", "0.7101113", "0.70996547", "0.7091471", "0.70834094", "0.7075652", "0.70664835", "0.7050472", "0.7048727", "0.70456785", "0.70317405", "0.7019656", "0.69839203", "0.6979551", "0.69661087", "0.69661087", "0.69638747", "0.69638747", "0.69638747", "0.69638747", "0.69638747", "0.69638747", "0.69620174", "0.6951321", "0.69452024", "0.69450533", "0.69286126", "0.6913035", "0.68953586", "0.6893166", "0.6890997", "0.68890744", "0.68783367", "0.686837", "0.6863597", "0.68513775", "0.6851352", "0.683619", "0.6828", "0.68270534", "0.68237513", "0.6820259", "0.6817219", "0.68170786", "0.6812292", "0.6802562", "0.67933863", "0.67902184", "0.6788007", "0.67801154", "0.6779996", "0.6776842", "0.6776316", "0.6776316", "0.6771188", "0.67710125", "0.677021", "0.6767872" ]
0.76449716
11
Removes stopwords as defined by self.stop_words
def _remove_stopwords(self, text: str) -> str: pattern = r""" (?x) # Set flag to allow verbose regexps \w+(?:-\w+)* # Words with optional internal hyphens | \s* # Any space | [][!"#$%&'*+,-./:;<=>?@\\^():_`{|}~] # Any symbol """ symbol = " " return "".join( t if t not in self.stop_words else symbol for t in re.findall(pattern, text) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_stopwords_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = [token for token in tokens\n if token.lower() not in self.stopword_list]\n self.doc = ' '.join(cleaned_tokens)", "def rm_stop_words(self, words):\n return [word for word in words if word.lower() not in self.stopwords]", "def _remove_stopwords(self, words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def removeStopwords(self, words):\n\t\twordList = [w.strip() for w in words.split(' ')]\n\t\trtnWords = []\n\t\tfor word in wordList:\n\t\t\tif word.lower() not in self._stopwords:\n\t\t\t\trtnWords.append(word)\n\t\treturn \" \".join(rtnWords)", "def remove_stop_words(self):\n self.word_list = [word for word in self.word_list if len(word) > 1 and word not in STOP_WORDS] #The len(word) check is here because there's still one piece of white space I haven't pinned down in each file. I haven't taken the time to figure out a quick way to look at all the whitespace characters yet, but none of the ones I included takes care of that one lonely space. Will keep on it.\n self.word_list.sort()", "def _remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n removed_stopwords = []\n for word in words:\n if word not in stopwords.words('english'):\n removed_stopwords.append(word)\n return removed_stopwords", "def removeStopwords(self, words):\n\t\tnewWords = []\n\t\tfor word in words:\n\t\t\tif word not in stopwords.words('english'):\n\t\t\t\tnewWords.append(word)\n\t\treturn newWords", "def stopword_removal(words):\n stops = set(stopwords.words('english'))\n words = [w for w in words if w not in stops]\n return words", "def remove_stopwords(words):\n stopwords = nltk.corpus.stopwords.words('english')\n return [w for w in words if w not in stopwords]", "def remove_stopwords(words):\r\n new_words = []\r\n for word in words:\r\n if word not in stopwords.words('english'):\r\n new_words.append(word)\r\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopword_list:\n new_words.append(word)\n return new_words", "def removeStopWords(self,phrase):\n if(\"stopWord\" in self._classes):\n return self._stopWord.removeStopWord(phrase)", "def remove_stop_words(document):\n\n stop_words = stopwords.words(\"english\")\n stop_words = set(stop_words + EXTENDED_STOP_WORDS)\n return [token for token in document if token not in stop_words]", "def remove_stopwords(words: list) -> list:\n new_words = []\n for word in words:\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(self,text):\n return \" \".join([word for word in str(text).split() if word not in self.STOPWORDS])", "def remove_stopwords(words):\n new_words = []\n for word in words:\n # print(word)\n if word not in stopwords.words('english'):\n new_words.append(word)\n return new_words", "def remove_stopwords(self, text):\n stopwords_list = stopwords.words('english')\n whitelist = [\"n't\", \"not\", \"no\"]\n words = text.split()\n clean_words = [word for word in words if (word not in stopwords_list or word in whitelist) and len(word) > 1]\n return \" \".join(clean_words)", "def remove_stopwords(text):\n tokens = word_tokenize(text)\n filtered = [word for word in tokens if word not in stop_words]\n filtered = ' '.join(filtered)\n return filtered", "def removeCustomStopwords(self, words, stopWords):\n\t\tremoved = [word for word in words if word not in stopWords]\t\t\n\t\treturn removed", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stopwords.words('spanish'):\n new_words.append(word)\n return new_words", "def removeStopWords(self, text=None, sort=True, lc=False):\n\n\t\tif type(text) == type(str()):\n\t\t\ttext = text.split()\n\n\t\ttextWithStopWords = text\n\t\ttextWithoutStopWords = list()\n\n\t\tif sort:\n\t\t\ttextWithStopWords = sorted(textWithStopWords)\n\n\t\tappend = textWithoutStopWords.append\n\t\tlower = str.lower\n\n\t\t# Loop through all the words in the text\n\t\tfor word in textWithStopWords:\n\n\t\t\t# If the word is not a stop word, add it to textWithoutStopWords\n\t\t\tif lower(word) not in self.stop_words:\n\t\t\t\tif lc==True:\n\t\t\t\t\tappend(lower(word))\n\t\t\t\telse:\n\t\t\t\t\tappend(word)\n\n\t\treturn textWithoutStopWords", "def remove_stopwords(tweet, stopwords):\n\n\timport re\n\n\tstopwords_set = set(stopwords)\n\tsplit_tweet = [word for word in re.split('\\W+', tweet) if word \\\n\t\t\t\t\t\t\t\t\t\t\t not in stopwords_set]\n\treturn ' '.join(split_tweet)", "def remove_stopwords(document):\n return list(filter(lambda x: x not in BasicNL.words, document))", "def remove_stopwords(text):\n stopwords = [\"i\", \"me\", \"my\", \"myself\", \"we\", \"our\", \"ours\", \"ourselves\", \"you\", \"your\", \"yours\", \"yourself\", \"yourselves\", \"he\", \"him\", \"his\", \"himself\", \"she\", \"her\", \"hers\", \"herself\", \"it\", \"its\", \"itself\", \"they\", \"them\", \"their\", \"theirs\", \"themselves\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"these\", \"those\", \"am\", \"is\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \"have\", \"has\", \"had\", \"having\", \"do\", \"does\", \"did\", \"doing\", \"a\", \"an\", \"the\", \"and\", \"but\", \"if\", \"or\", \"because\", \"as\", \"until\", \"while\", \"of\", \"at\", \"by\", \"for\", \"with\", \"about\", \"against\", \"between\", \"into\", \"through\", \"during\", \"before\", \"after\", \"above\", \"below\", \"to\", \"from\", \"up\", \"down\", \"in\", \"out\", \"on\", \"off\", \"over\", \"under\", \"again\", \"further\", \"then\", \"once\", \"here\", \"there\", \"when\", \"where\", \"why\", \"how\", \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"most\", \"other\", \"some\", \"such\", \"no\", \"nor\", \"not\", \"only\", \"own\", \"same\", \"so\", \"than\", \"too\", \"very\", \"s\", \"t\", \"can\", \"will\", \"just\", \"don\", \"should\", \"now\"]\n return \" \".join([word for word in text.split() if word not in stopwords])", "def remove_stopwords(tokens):\n stop_words = set(stopwords.words('english'))\n\n tokens = [w for w in tokens if w not in stop_words]\n\n return tokens", "def _remove_stopwords(self, doc: str):\n processed_tweet = [word for word in doc.split() if\n word not in stopwords.words('spanish') and\n len(word) > 1]\n return ' '.join(processed_tweet)", "def remove_stopwords(text):\n text = \" \"+text\n text = text.upper()\n for word in STOP_WORDS:\n text = text.replace(word.upper(),\" \")\n return text", "def remove_stopwords(text):\n return \" \".join([word for word in str(text).split() if word not in STOPWORDS])", "def remove_stopwords(tokens):\n\n return [t for t in tokens if t not in stopwords.words('english')]", "def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in nltk.corpus.stopwords.words('french'):\n new_words.append(word)\n return new_words", "def remove_stopwords(self, *args):\n if self.remove_stopwords is False:\n raise Exception(\"Error - enable stopword removal functionality\")\n if type(args) != list:\n raise Exception(\"Error - expected a list\")\n if args == []:\n raise Exception(\"Error - no items to remove from stopword list\")\n for arg in args:\n if arg in self.stopword_list:\n self.stopword_list.remove(arg)\n else:\n raise Exception(arg+\" not in list\")", "def remove_stop_words(text_tokens):\n\n return [words for words in text_tokens if words not in stop_words]", "def removeOwnStopWords(self, sort=True, lc=False):\n\t\tself.textFile = self.removeStopWords(text=self.textFile, sort=sort, lc=lc)", "def remove_stopwords(data):\n stop_words = stopwords.words('english')\n words = word_tokenize(str(data))\n new = \"\"\n for word in words:\n if word not in stop_words and len(word) > 1:\n new = new + \" \" + word\n return new", "def removeStopWords(self, words):\n line = []\n for w in words:\n if w not in self.stop_words:\n line.append(w)\n return line", "def remove_stop_words(self, content):\n stop_words = set(stopwords.words('english'))\n words = nltk.word_tokenize(content)\n words = [word.lower() for word in words if word.isalpha()]\n return [w for w in words if not w in stop_words]", "def clean_stopwords(text):\n tokens = tokenize(text)\n tokens = stopwordsRem(tokens)\n return tokens", "def remove_stopwords(texts):\n return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]", "def remove_stop_words(tokens, language):\n if not remove_stop_words_activated or language.__eq__(\"Catalan\"):\n return tokens\n output = []\n stop = stopwords.words(language.lower())\n for token in tokens:\n if token not in stop:\n output.append(token)\n return output", "def remove_stopwords(lista,stopwords):\n lista_out = list()\n for idx, text in enumerate(lista):\n text = ' '.join([word for word in text.split() if word not in stopwords])\n text = text.strip()\n lista_out.append(text)\n #print(\"Len original: {} - Len processed stopwords: {}\".format(len(lista),len(lista_out)))\n return lista_out", "def stopword_removal_from_taggedwords(tagged_words):\n stops = set(stopwords.words('english'))\n tagged_words = [w for w in tagged_words if not w[0] in stops]\n return tagged_words", "def deleteStopWord(self, stopWordToRemove):\n\t\tself.stop_words.remove(stopWordToRemove.lower())", "def removeStopword(text, stop_words=stop_words):\n word_tokens = word_tokenize(text)\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n return ' '.join(filtered_sentence)", "def remove_stopwords(tokens):\n stopword_list = make_stopwords()\n no_stop_tokens = [token for token in tokens if token not in stopword_list]\n return no_stop_tokens", "def delete_words(self, words=None):\n\n if words is None:\n words = self.stopwords\n\n self.__corpora = [\n sub(r' ({0}) '.format('|'.join(words)), ' ', string) for string in self.__corpora\n ]", "def remove_stopwords(setence):\n sent = setence.lower().strip()\n stopwords = load_stopwords(stopword_path)\n tokens = tokenizer.tokenize(sent)\n tokens_filter_stopwords = [word for word in tokens if word not in stopwords]\n string = \" \".join(word for word in tokens_filter_stopwords)\n return string", "def stopWord_removal(list_of_words):\n curated_list = [w for w in list_of_words if not w in STOP_WORDS]\n return curated_list", "def remove_stopwords(text):\r\n text_split=text.split()\r\n text_split=[word for word in text_split if word not in stopwords.words('spanish')]\r\n return text_split", "def remove_stop_words(text):\n return ' '.join(\n [word for word in text.split(' ') if word not in final_stop_words])", "def filter_stop_words(self, word_list):\n punctuation = list(string.punctuation)\n file = open(\"stopwords.txt\")\n stopwords = []\n strippables = string.punctuation + string.whitespace\n for line in file:\n stopwords.append(line.strip(strippables))\n stopwords.extend(punctuation)\n\n terms_without_stop = [word for word in word_list if word not in stopwords]\n\n return terms_without_stop", "def remove_stopwords(text, is_lower_case=False):\n tokens = tokenizer.tokenize(text)\n tokens = [token.strip() for token in tokens]\n if is_lower_case:\n filtered_tokens = [token for token in tokens if token not in stopword_list]\n else:\n filtered_tokens = [token for token in tokens if token.lower() not in stopword_list]\n filtered_text = ' '.join(filtered_tokens)\n return filtered_text", "def remove_stopwords(self, value):\n with open(\"grandpy/stop_words.json\", encoding=\"utf-8\") as json_file:\n stopwords = json.load(json_file)\n key_words = [word for word in value if word not in stopwords[\"stop_words\"]]\n return key_words", "def stopwords_removal(text_vector):\n\n text_vector = [\n i for i in text_vector if i not in stopwords.words('english')]\n return text_vector", "def remove_stopwords_set(sentence: str, stop_words: Collection[str]) -> str:\n return \" \".join([w for w in word_tokenize(sentence) if w not in stop_words])", "def stop_words_remover(tokenized_sent):\n # Convert string back to list\n\n filtered_sentence = []\n stop_words = set(stopwords.words(\"english\"))\n for word in literal_eval(tokenized_sent):\n if word not in stop_words:\n filtered_sentence.append(word)\n return filtered_sentence", "def remove_stopwords(text: Iterable[str]) -> Generator[str, None, None]:\n stop_words = set(stopwords.words(\"english\"))\n return (\n word\n for word in text\n if word not in stop_words\n )", "def remove_stop_words(tokenized_word_list):\n stop_words = set(nltk.corpus.stopwords.words(\"english\"))\n filtered_tokens = [word for word in tokenized_word_list if word not in stop_words]\n return filtered_tokens", "def _remove_stopwords(data, settings):\n column = settings['input_col']\n output_col = settings['output_col']\n frag = settings['id_frag']\n\n stopwords = settings['news_stops_words']\n stopwords += settings['stopwords']\n stopwords = np.unique(stopwords)\n\n tmp = []\n if data.shape[0] > 0:\n if settings['case_sensitive']:\n stopwords = set(stopwords)\n for tokens in data[column].values:\n tmp.append(list(set(tokens).difference(stopwords)))\n\n else:\n stopwords = set([tok.lower() for tok in stopwords])\n\n for tokens in data[column].values:\n entry = [tok.lower() for tok in tokens]\n tmp.append(list(set(entry).difference(stopwords)))\n\n else:\n tmp = np.nan\n\n if output_col in data.columns:\n data.drop([output_col], axis=1, inplace=True)\n\n data[output_col] = tmp\n\n info = generate_info(data, frag)\n return data, info", "def remove_stop_words(stop_list, tokens):\n return [t for t in tokens if len(t) > 2 and not t in stop_list]", "def remove_stop_words(tweet):\n tokens_without_sw = \"\"\n for word in tweet.split():\n if not word.lower() in STOPWORDS:\n tokens_without_sw += word.lower() + \" \"\n return tokens_without_sw", "def remove_stop_words(self, document_tokens=None, sentences=None):\n if sentences is not None or (\n sentences is not None and document_tokens is not None):\n sentences_ = []\n for sentence in sentences:\n sentences_.append(\n [word for word in sentence if word not in self.stop_words])\n return sentences_\n elif document_tokens is not None:\n return [word for word in document_tokens if\n word not in self.stop_words]\n else:\n er_msg = 'Wrong parameters for this methods'\n logging.error(er_msg)\n raise Exception(er_msg)", "def remove_stopwords(word: str) -> str:\n stop_words = stopwords.words('english')\n if not word in stop_words:\n return word\n else:\n return '0'", "def remove_stop_words(self, query):\n ans=\"\"\n words= query.split()\n for word in words:\n if word.lower() not in self.stop_words:\n ans+=word+\" \"\n return ans", "def stopwordsRem(tokens):\n no_sw = [t for t in tokens if not t in stopwords.words('english')]\n return no_sw", "def remove_stopwords(dataset_path: str) -> str:\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str,\n \"text_stemmed\": str,\n \"text_lemmatized\": str,\n }\n\n if \"train\" in dataset_path:\n dtypes[\"target\"] = int\n\n def _rm_stopwords(tokens: List[str]):\n return [w for w in tokens\n if w not in nltk.corpus.stopwords.words('english')]\n\n new_path = _make_new_filepath(dataset_path, \"nostopwords\")\n df = pd.read_csv(\n f\"/data/{dataset_path}\",\n index_col=\"id\",\n dtype=dtypes,\n converters={\"tokens\": ast.literal_eval})\n df[\"tokens\"] = df[\"tokens\"].apply(_rm_stopwords)\n df.to_csv(f\"/data/{new_path}\")\n return new_path", "def cut_words(self, doc):\n return [word for word in jieba.cut(doc) if not word in self.stopwords]", "def removeStopWords(input_str, rm_words=[]): \n filtered_msg = []\n #check if string, and split on spaces\n if isinstance(input_str, basestring):\n input_str = tokenize(input_str)\n #check each word against nltk stopwords and specified input list\n for word in input_str:\n if word.lower() not in stopwords.words('english') and word.lower() not in rm_words:\n filtered_msg.append(word)\n return \" \".join(filtered_msg)", "def remove_stopwords(ingredient, stopwords):\n ingredient = ingredient.lower() # normalizes to lower case\n no_stops = [gram for gram in ingredient.split(\" \") if gram not in stopwords]\n new_ingredient = \" \".join(no_stops)\n return new_ingredient", "def remove_stopwords(data: pd.Series) -> pd.Series:\n pattern = r'\\b(?:{})\\b'.format('|'.join(stopwords.words('english')))\n return data.str.replace(pattern, '')", "def article_stop_words_removal(article,\n preprocess_type=PreprocessWordType.LEMMATIZE):\n stop_words = set(stopwords.words('english'))\n article_words = []\n for word in article.words:\n preprocessed_word = query_utils.preprocess_word(word, preprocess_type)\n if preprocessed_word not in stop_words:\n article_words.append(preprocessed_word)\n return article_words", "def remove_stop_words(\n token_counter: Dict[str, int], words: Set[str] = None\n ) -> Dict[str, int]:\n if words is None:\n words = stop_words\n\n return set(token_counter.keys()).difference(words)", "def removeStopwordsLine(self, wordLines):\n\t\treturn self._doPerLine(wordLines, self.removeStopwords)", "def remove_stopwords(string):\n swords = set(stopwords.words(\"english\"))\n return \" \".join([w for w in word_tokenize(string) if w not in swords])", "def get_stop_words(self):\n self._normalize_params()\n return self.stop_words", "def stopword_filter(words):\n new_words = []\n for w in words:\n if w in stopwords.words(\"german\"): continue\n else: new_words += [w]\n return new_words", "def removestopwords(query):\n wordlist = [word for word in query.split() if word not in stopwords.words('english')]\n return \" \".join(wordlist)", "def stop_words_remover(df):\n stop_words = stop_words_dict['stopwords']\n\n df['Without Stop Words'] = [' '.join([w for w in x.lower().split()\n if w not in stop_words])\n for x in df['Tweets'].tolist()\n ]\n result = []\n l1 = df['Without Stop Words']\n for tweet in l1:\n result.append(tweet.split(' '))\n df['Without Stop Words'] = result\n return df", "def filter_stop_words(self, content, stop_words):\n content = re.sub(r\"[^\\w\\s]\", \"\", content)\n content = re.sub(r\"[0-9]+\", \"\", content)\n new_sent = [\n Word(word).singularize()\n for word in content.lower().split()\n if Word(word).singularize() not in stop_words\n ]\n new_cont = \" \".join(new_sent)\n return new_cont", "def stop_words_remover(df):\n \n df['Without Stop Words'] = df['Tweets'].apply(str.lower).apply(str.split)\n\n for i in range(len(twitter_df)):\n df['Without Stop Words'][i] = [x for x in df['Without Stop Words'][i] if x not in stop_words_dict['stopwords']]\n return df\n pass", "def remove_stopwords(text: str, basic_stopwords: Set[str] = None, additional_stopwords=True) -> str:\n if basic_stopwords is None:\n basic_stopwords = _BASIC_STOPWORDS\n\n _stopwords = basic_stopwords\n\n if additional_stopwords:\n with open(_ADDITIONAL_STOPWORDS_PATH) as f:\n additional_stopwords = set(line.strip() for line in f)\n _stopwords = _stopwords.union(additional_stopwords)\n\n return ' '.join(word for word in text.split()\n if word not in _stopwords)", "def handle_stop_words(self,\n text: str,\n stop_words: Set[str]\n ) -> Union[str, List[str]]:\n if not self.tokenise:\n return ' '.join(\n w for w in word_tokenize(text) if w not in stop_words\n )\n return [w for w in text if w not in stop_words]", "def delete_stop_words(list, wordset):\n for word in wordset:\n list.delete(word)", "def remove_stop_words(dataset):\n for n in range(len(dataset)):\n try:\n # concatenate the title and keywords\n current_title = dataset.iloc[n][\"Title of Post\"]\n current_description = dataset.iloc[n][\"Post Description\"]\n\n token_title = word_tokenize(current_title)\n token_description = word_tokenize(current_description)\n filtered_title = []\n filtered_description = []\n\n for word in token_description:\n if word not in stop_words:\n filtered_description.append(word)\n\n filtered_description = listToString(filtered_description)\n\n for word in token_title:\n if word not in stop_words:\n filtered_title.append(word)\n\n filtered_title = listToString(filtered_title)\n\n dataset.iloc[n][\"Title of Post\"] = filtered_title\n dataset.iloc[n][\"Post Description\"] = filtered_description\n\n except:\n pass\n\n return dataset", "def remove_stop(text):\n STOPWORDS = ['a' ,'b', 'c','d','e','f' ,'g' ,'h','i','j','k','l','m','n','o' ,'p' ,'q','r','s','t','u' ,'v' ,'w','x','y','z']\n return \" \".join([word for word in str(text).split() if word not in STOPWORDS])", "def remove_stop_words(raw_corpus, doc_freq=0.75):\n vectorizer = TfidfVectorizer()\n vectors = vectorizer.fit_transform([doc.lower() for doc in raw_corpus])\n feature_names = vectorizer.get_feature_names()\n dense = vectors.todense()\n denselist = dense.tolist()\n words_tfidf = pd.DataFrame(denselist, columns=feature_names)\n\n new_stopwords = dict.fromkeys(feature_names, 0)\n for (word, data) in words_tfidf.iteritems():\n for num in data.values:\n if num > 0:\n new_stopwords[word] +=1\n\n new_sw = []\n for word, count in new_stopwords.items():\n if count > doc_freq*len(raw_corpus):\n new_sw.append(word)\n stopw = stopwords.words('english')\n stopw = [*stopw, *new_sw]\n text_nostop = []\n for doc in raw_corpus:\n doc_bag = make_bag(doc, stopw)\n text_nostop.append(\" \".join(doc_bag))\n return(text_nostop)", "def stop_words():\n return get_stop_words('es') + get_stop_words('ca') + get_stop_words('en')", "def filter_stopwords(tagged_records):\r\n print('Filtering stopwords')\r\n stop_words = list(stopwords.words('english'))\r\n stop_words.extend(string.punctuation)\r\n stop_words.extend(constants.CONTRACTIONS)\r\n stop_words.extend(constants.MYSQL_STOPWORDS)\r\n dictionary_words = set(nltk_words.words())\r\n\r\n def not_dictionary_word(word): \r\n return word[0] not in dictionary_words and word[1] not in ['NNP', 'NNPS']\r\n\r\n filtered_records = [filter(lambda word: word[0] not in stop_words, record) for record in tagged_records]\r\n filtered_records = [filter(lambda word: not_dictionary_word, record) for record in filtered_records]\r\n filtered_records = [filter(lambda word: not word[0].replace('.', '', 1).isdigit(), record)\r\n for record in filtered_records] # see https://stackoverflow.com/a/23639915/5760608\r\n filtered_records = [list(filter(lambda word: word[1] in POS_TRANSLATOR.keys(), record))\r\n for record in filtered_records]\r\n return filtered_records", "def stopWord(text):\r\n\r\n\ttext = removePunc(text) # memanggil fungsi untuk menghapus tanda baca\r\n\t\r\n\tfile = open('TP2-stopword.txt', 'r')\t\t\t\t # membuka file stopword\r\n\tstopWord = file.read().split()\r\n\r\n\tfor word in text:\t\t\t\t\t \t# menghapus stopword dalam teks\r\n\t\tif word in stopWord:\r\n\t\t\ttext.remove(word)\r\n\r\n\tfile.close()\r\n\r\n\treturn text", "def build_stopwords():\r\n\tprint('\\nbuilding stopwords')\r\n\t\r\n\tif load_stopwords():\r\n\t\treturn\r\n\r\n\tglobal stopwords\r\n\tstopwords = nltk.corpus.stopwords.words('english')\r\n\tfor f in os.listdir(paths.path_data_stopwords):\r\n\t\tpath_stopwords = paths.path_data_stopwords + '/' + f\r\n\t\twith open(path_stopwords,'r') as f:\r\n\t\t\tfor l in f:\r\n\t\t\t\tw = l.strip()\r\n\t\t\t\tw = re.sub(r\"[\\x80-\\xff]\",\" \",w)\r\n\t\t\t\tif (w not in stopwords):\r\n\t\t\t\t\tstopwords.append(w)\r\n\t\r\n\t# wip improve with POS and remove numbers\r\n\twith open(paths.path_data_stopwords_txt,'w') as outf:\r\n\t\toutf.write('\\n'.join(stopwords))\r\n\t\r\n\tprint('\\nstopword count : ' + str(len(stopwords)))", "def rm_stopwords(file_path, word_dict):\n\n # read stop word dict and save in stop_dict\n stop_dict = {}\n with open(word_dict) as d:\n for word in d:\n stop_dict[word.strip(\"\\n\")] = 1\n # remove tmp file if exists\n if os.path.exists(file_path + \".tmp\"):\n os.remove(file_path + \".tmp\")\n\n print(\"now remove stop words in %s.\" % file_path)\n # read source file and rm stop word for each line.\n with open(file_path) as f1, open(file_path + \".tmp\", \"w\") as f2:\n for line in f1:\n tmp_list = [] # save words not in stop dict\n words = line.split()\n for word in words:\n if word not in stop_dict:\n tmp_list.append(word)\n words_without_stop = \" \".join(tmp_list)\n to_write = words_without_stop + \"\\n\"\n f2.write(to_write)\n\n # overwrite origin file with file been removed stop words\n shutil.move(file_path + \".tmp\", file_path)\n print(\"stop words in %s has been removed.\" % file_path)", "def stopword_removal_on_corpus(text_corpus):\n\n text_corpus[text_column_name] = text_corpus[\n text_column_name].apply(stopwords_removal)\n return text_corpus", "def stop_words_remover(df):\n\n # dictionary of english stopwords\n stop_words_dict = {\n 'stopwords':[\n 'where', 'done', 'if', 'before', 'll', 'very', 'keep', 'something', 'nothing', 'thereupon', \n 'may', 'why', '’s', 'therefore', 'you', 'with', 'towards', 'make', 'really', 'few', 'former', \n 'during', 'mine', 'do', 'would', 'of', 'off', 'six', 'yourself', 'becoming', 'through', \n 'seeming', 'hence', 'us', 'anywhere', 'regarding', 'whole', 'down', 'seem', 'whereas', 'to', \n 'their', 'various', 'thereafter', '‘d', 'above', 'put', 'sometime', 'moreover', 'whoever', 'although', \n 'at', 'four', 'each', 'among', 'whatever', 'any', 'anyhow', 'herein', 'become', 'last', 'between', 'still', \n 'was', 'almost', 'twelve', 'used', 'who', 'go', 'not', 'enough', 'well', '’ve', 'might', 'see', 'whose', \n 'everywhere', 'yourselves', 'across', 'myself', 'further', 'did', 'then', 'is', 'except', 'up', 'take', \n 'became', 'however', 'many', 'thence', 'onto', '‘m', 'my', 'own', 'must', 'wherein', 'elsewhere', 'behind', \n 'becomes', 'alone', 'due', 'being', 'neither', 'a', 'over', 'beside', 'fifteen', 'meanwhile', 'upon', 'next', \n 'forty', 'what', 'less', 'and', 'please', 'toward', 'about', 'below', 'hereafter', 'whether', 'yet', 'nor', \n 'against', 'whereupon', 'top', 'first', 'three', 'show', 'per', 'five', 'two', 'ourselves', 'whenever', \n 'get', 'thereby', 'noone', 'had', 'now', 'everyone', 'everything', 'nowhere', 'ca', 'though', 'least', \n 'so', 'both', 'otherwise', 'whereby', 'unless', 'somewhere', 'give', 'formerly', '’d', 'under', \n 'while', 'empty', 'doing', 'besides', 'thus', 'this', 'anyone', 'its', 'after', 'bottom', 'call', \n 'n’t', 'name', 'even', 'eleven', 'by', 'from', 'when', 'or', 'anyway', 'how', 'the', 'all', \n 'much', 'another', 'since', 'hundred', 'serious', '‘ve', 'ever', 'out', 'full', 'themselves', \n 'been', 'in', \"'d\", 'wherever', 'part', 'someone', 'therein', 'can', 'seemed', 'hereby', 'others', \n \"'s\", \"'re\", 'most', 'one', \"n't\", 'into', 'some', 'will', 'these', 'twenty', 'here', 'as', 'nobody', \n 'also', 'along', 'than', 'anything', 'he', 'there', 'does', 'we', '’ll', 'latterly', 'are', 'ten', \n 'hers', 'should', 'they', '‘s', 'either', 'am', 'be', 'perhaps', '’re', 'only', 'namely', 'sixty', \n 'made', \"'m\", 'always', 'those', 'have', 'again', 'her', 'once', 'ours', 'herself', 'else', 'has', 'nine', \n 'more', 'sometimes', 'your', 'yours', 'that', 'around', 'his', 'indeed', 'mostly', 'cannot', '‘ll', 'too', \n 'seems', '’m', 'himself', 'latter', 'whither', 'amount', 'other', 'nevertheless', 'whom', 'for', 'somehow', \n 'beforehand', 'just', 'an', 'beyond', 'amongst', 'none', \"'ve\", 'say', 'via', 'but', 'often', 're', 'our', \n 'because', 'rather', 'using', 'without', 'throughout', 'on', 'she', 'never', 'eight', 'no', 'hereupon', \n 'them', 'whereafter', 'quite', 'which', 'move', 'thru', 'until', 'afterwards', 'fifty', 'i', 'itself', 'n‘t',\n 'him', 'could', 'front', 'within', '‘re', 'back', 'such', 'already', 'several', 'side', 'whence', 'me', \n 'same', 'were', 'it', 'every', 'third', 'together'\n ]\n }\n \n # Create 'Without Stop Words' column: Mikael\n df['Without Stop Words'] = df['Tweets'].str.lower().str.split()\n\n # Extract stop words from 'Without Stop Words' column: Monica\n for row in df['Without Stop Words']:\n for word in row:\n #find stop word in stop word dictionary\n for stop_word in stop_words_dict['stopwords']:\n if word == stop_word:\n #remove stop word from current row\n row.remove(word)\n \n #loop again in case dictionary missed a word\n for row in df['Without Stop Words']:\n for word in row:\n #find stop word in stop word dictionary\n for stop_word in stop_words_dict['stopwords']:\n if word == stop_word:\n #remove stop word from current row\n row.remove(word)\n return df", "def df_remove_stopwords(df, col_name):\n stopwords = getStopWords()\n\n # Create column 'stopwords_removed' on df\n df['stopwords_removed'] = list(map(lambda doc: [word for word in doc if word not in stopwords], df[col_name]))", "def remove_stopwords(text, use_stopwords = None, df = True, exclude_number = True):\n from nltk.corpus import stopwords\n from nltk.tokenize import word_tokenize\n \n if use_stopwords is None:\n use_stopwords = set(stopwords.words(\"english\"))\n \n if df:\n new_text = word_tokenize(text)\n if exclude_number:\n new_text = [word for word in new_text if not word.isnumeric()]\n new_text = \" \".join([word for word in new_text if word not in use_stopwords])\n else:\n new_text = \"\"\n for word in text:\n if word not in use_stopwords:\n new_text += word + \" \"\n\n return new_text", "def stopwords(self):\n with open(STOPWORDS_LIST, 'r') as content:\n return content.read().splitlines()" ]
[ "0.85951626", "0.8196474", "0.8164273", "0.813838", "0.81174606", "0.80364287", "0.80274475", "0.7989631", "0.7988493", "0.7958696", "0.79517734", "0.7930331", "0.7930331", "0.7930331", "0.7930331", "0.7913143", "0.7845168", "0.7843913", "0.7841652", "0.7822895", "0.7813601", "0.77971965", "0.77822906", "0.7713955", "0.7661834", "0.76438123", "0.7630866", "0.7629355", "0.7628658", "0.7619094", "0.7601635", "0.7587071", "0.7583836", "0.75802493", "0.756036", "0.75248563", "0.7509719", "0.7500653", "0.7474536", "0.7462295", "0.741482", "0.7407447", "0.74047685", "0.74039686", "0.73776823", "0.7373431", "0.7344591", "0.73407614", "0.7323358", "0.73143554", "0.73079044", "0.73034024", "0.7290523", "0.7289575", "0.72868866", "0.7270902", "0.7263391", "0.72554183", "0.7254475", "0.7250199", "0.72321934", "0.7223051", "0.722138", "0.7218211", "0.7214426", "0.7188828", "0.7181927", "0.71419406", "0.7114871", "0.70888", "0.7059023", "0.70533484", "0.70191485", "0.6967964", "0.6966029", "0.69534355", "0.6950539", "0.6946359", "0.68935", "0.68751854", "0.68344545", "0.6798618", "0.6782418", "0.6733931", "0.67326117", "0.6709406", "0.6696837", "0.666691", "0.66539574", "0.66129404", "0.66119325", "0.65943843", "0.65828806", "0.6554132", "0.65516484", "0.653142", "0.6516446", "0.65083027", "0.6496509", "0.64921165" ]
0.7446543
40
Removes tabs, newlines and any kind of space characters
def _remove_whitespaces(self, text: str) -> str: return " ".join(re.sub("\xa0", " ", str(text)).split())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new", "def _clean(s):\n return re.sub(r'\\s+', ' ', s.strip())", "def cleanup_sentence(s):\n return re.sub(\"\\s+\", \" \", s.replace(\"\\t\", \"\").strip())", "def removeMultipleSpaces(self) -> None:\n self.text = re.sub('\\s+', ' ', self.text)", "def normalize_text(text):\n text = re.sub(r'[ \\t]+', ' ', text)\n text = re.sub(r'\\r', '', text)\n\n # Remove whitespace in the middle of text.\n text = re.sub(r'[ \\t]+\\n', '\\n', text)\n # Remove whitespace at the end of the text.\n text = text.rstrip()\n\n return text", "def trim_spaces_and_tabs_from_lines_in_txt(txt: str) -> str:\n\n clean_txt = ''\n\n # convert the text string into a buffer so that we can read lines.\n txt_buffer = io.StringIO(txt)\n\n # iterate through each line trimming whitespace.\n next_line = txt_buffer.readline()\n while next_line:\n stripped_next_line = next_line.lstrip(' \\t') # just want spaces and tabs.\n clean_txt += stripped_next_line\n\n # grab the next line.\n next_line = txt_buffer.readline()\n\n return clean_txt", "def remove_space(text):\n for space in spaces:\n text = text.replace(space, ' ')\n text = text.strip()\n text = re.sub('\\s+', ' ', text)\n return text", "def remove_space(text):\n for space in spaces:\n text = text.replace(space, ' ')\n text = text.strip()\n text = re.sub('\\s+', ' ', text)\n return text", "def clean_text(some_text):\n # import re\n some_clean_text = re.sub(r'\\n|\\t', '', some_text) # Remove new line and tabs\n some_clean_text = re.sub(' +', ' ', some_clean_text) # Replace multiple spaces with one space\n return some_clean_text", "def del_whitespace(selfs, text):\n\t\treturn text.replace(' ', '')", "def normalize_whitespace(text):\n return RE_NONBREAKING_SPACE.sub(\" \", RE_LINEBREAK.sub(r\"\\n\", text)).strip()", "def delete_whitespace(text: str) -> str:\n return re.sub(r'\\s+', '', text).strip()", "def strip_space():\n pass", "def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)", "def normalize_whitespace(text):\n text = str(text)\n return re.sub(white_space_regex, ' ', text).strip()", "def removeExtraSpaces(text):\n\tone_space = re.sub(r'\\s+',' ', text)\n\treturn one_space", "def _squeeze_whitespace(text):\n return re.sub(r'\\s+', ' ', text)", "def normalize_space(text):\n return re.sub(r\"\\s+\", \" \", text.strip(), flags=re.UNICODE)", "def normalize_space (text):\n return RE_WS.sub (' ', text.strip ())", "def clean_text(text):\n return text.replace('\\n', ' ').replace('\\r', ' ')", "def _clean(self, string):\n return re.sub('\\s+', ' ', string).strip()", "def despace(txt):\n pre, c = splitdefines(txt)\n pre = \"\\n\".join(pre)\n txt = \" \".join(c)\n txt = txt.replace(\"\\t\", \" \")\n txt = re.sub(r\"\\s+\", \" \", txt, flags=re.S)\n txt = re.sub(r\"([a-zA-Z0-9_])\\s+([^a-zA-Z0-9_\\s])\", r\"\\1\\2\", txt, flags=re.S)\n txt = re.sub(r\"([^a-zA-Z0-9_\\s])\\s+([a-zA-Z0-9_])\", r\"\\1\\2\", txt, flags=re.S)\n txt = re.sub(r\"([^a-zA-Z0-9_\\s])\\s+([^a-zA-Z0-9_\\s])\", r\"\\1\\2\", txt, flags=re.S)\n txt = re.sub(r\"([^a-zA-Z0-9_\\s])\\s+([^a-zA-Z0-9_\\s])\", r\"\\1\\2\", txt, flags=re.S)\n return pre + \"\\n\" + txt", "def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})", "def normalize_whitespace(text):\n return NORMALIZE_WHITESPACE_REGEX.sub(' ', text)", "def parse_text(text):\n return re.sub(r'\\s+', \" \", text)", "def remove_whitespaces(text: str) -> str:\n return text.lstrip().rstrip()", "def strip_whitespace(self, text):\n\t\treturn text.strip()", "def remove_whitespace(text):\n text = text.strip()\n return \" \".join(text.split())", "def _StripWS(s):\r\n return re.sub('\\s+', '', s)", "def _StripWS(s):\r\n return re.sub('\\s+', '', s)", "def normalize_whitespace(text):\n text = text.replace('\"', '').replace(\"'\", '')\n return ' '.join(text.split())", "def clean_tabs(self, text):\n\n if text:\n # lines = text.split('\\n')\n lines = text.splitlines()\n lines = [l for l in lines if l]\n\n # if not lines[0]:\n # lines = lines[1:]\n\n # if not lines[0].startswith(' ') and not lines[0].startswith('\\t') and len(lines) > 1:\n # q = self.indent_width(lines[1])\n # lines[0] = ('\\t' * q) + lines[0]\n # print(q, 523523)\n\n # if not lines[0]:\n # if len(lines[0]) < 2:\n # lines = lines[1:]\n # y = lines[0] if len(lines) < 2 else lines[1]\n y = lines[0]\n # print(lines[0].count('\\t'))\n tabs = self.indent_width(y)\n return '\\n'.join([l[tabs:] for l in lines])\n else:\n return ''", "def cleanup (text) :\n l_idx = 1\n lines = text.split ('\\n')\n\n # count leading non-empty lines\n for line in lines :\n if not line.strip () :\n l_idx += 1\n else :\n break\n\n # check if there is anything more to evaluate\n if len (lines) <= l_idx :\n return text\n\n # determine indentation of that line\n indent = 0\n for c in lines[l_idx] :\n if c == ' ' :\n indent += 1\n else : \n break\n\n # if nothing found, check the following line\n if not indent :\n\n if len (lines) <= l_idx + 1:\n return text\n for c in lines[l_idx + 1] :\n if c == ' ' :\n indent += 1\n else : \n break\n\n # if still nothing found, give up\n if not indent :\n return text\n\n\n # oitherwise trim all lines by that indentation\n out = \"\"\n replace = ' ' * indent\n for line in lines :\n out += re.sub (\"%s\" % ' ' * indent, \"\", line)\n out += \"\\n\"\n\n return out", "def remove_extra_space(text):\n text = \" \".join(text.split())\n return text", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def removeExtraSpaces(self, words):\n\t\treturn re.sub(r'\\s+', ' ', words.strip()).strip()", "def normalise_whitespace(strg):\n return re.sub(r\"\\s+\", \" \", strg).strip()", "def clean_text(text):\n return(re.sub(\" {2,}|\\r|\\n\",\"\", text))", "def remove_extra_space(text):\n return re.sub(' +', ' ', text)", "def clean_text(data):\r\n data = data.replace('\\n', ' ') #remove new lines\r\n replace_l = [\"'\",'!','/','\\\\','=',',',':', '<','>','?','.','\"',')','(','|','-','#','*','+', '_'] #list of characters to remove\r\n data = data.lower() #Convert all the words to lower case\r\n for i in replace_l:\r\n data = data.replace(i,' ') #replace words with blank character\r\n return data #return clean data\r", "def converttabs(text, spaces=4):\n\n return text.replace('\\t', ' ' * spaces)", "def clean_whitespaces(text):\n length = len(text)\n i = 0\n prev_char = None\n while i < length:\n curr_char = text[i]\n return_char = curr_char if curr_char not in string.whitespace else \" \"\n\n if not (prev_char == \" \" and return_char == \" \"):\n yield return_char\n\n prev_char = return_char\n i += 1", "def compact_spaces(st: str) -> str:\n st = st.strip()\n ret = ''\n in_quotes = False\n added_space = False\n for x in st:\n if x == '\"':\n in_quotes = not in_quotes\n added_space = False\n ret += x\n elif in_quotes:\n ret += x\n elif x in ('\\t', '\\n', '\\r', ' '):\n if not added_space:\n ret += ' '\n added_space = True\n else:\n added_space = False\n ret += x\n\n return ret", "def distillTabs(someString):\n if someString is None:\n return None\n while True:\n if someString.find('\\t\\t') == 0:\n someString = someString.replace('\\t\\t','\\t')\n continue\n return someString.replace('\\t', ' ')", "def _detab(self, text):\r\n if '\\t' not in text:\r\n return text\r\n return self._detab_re.subn(self._detab_sub, text)[0]", "def TRIM(text):\n return _trim_re.sub(' ', text.strip())", "def normalize(s):\n return s.strip(inline_whitespace)", "def clean_up_tokenization_spaces(out_string):\n out_string = out_string.replace('<unk>', '')\n out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','\n ).replace(\" ' \", \"'\").replace(\" n't\", \"n't\").replace(\" 'm\", \"'m\").replace(\" do not\", \" don't\"\n ).replace(\" 's\", \"'s\").replace(\" 've\", \"'ve\").replace(\" 're\", \"'re\")\n return out_string", "def strip_space(string):\n return string.replace(' ', '')", "def clean_indent(txt):\n return \"\\n\".join(x.strip() for x in txt.splitlines())", "def detab(self, text):\r\n newtext = []\r\n lines = text.split('\\n')\r\n for line in lines:\r\n if line.startswith(' '*self.tab_length):\r\n newtext.append(line[self.tab_length:])\r\n elif not line.strip():\r\n newtext.append('')\r\n else:\r\n break\r\n return '\\n'.join(newtext), '\\n'.join(lines[len(newtext):])", "def clean(line):\n line = line.lower().replace(\"\\n\",\" \").replace(\"\\r\",\"\").replace(',',\"\").replace(\">\",\"> \").replace(\"<\", \" <\").replace(\"|\",\" \")\n return line", "def stripchars(string):\n return ''.join(re.split('\\t+|\\n+', string))", "def remove_tab_space(self):\n self.result_code = open(\"result.c\", \"r\") # Opening the intermediate file in 'read' mode.\n self.line_array = self.result_code.readlines() # Obtaining an array of strings, where each string is a line from the intermediate file.\n self.result_code.close() # Closing the intermediate file.\n\n self.result_code = open(\"result.c\", \"w\") # Opening the intermediate file in 'write' mode.\n # Looping over all the lines in the input file.\n for line in self.line_array:\n # Checking if the line begins with a white space.\n if line[0] == \" \":\n # Checking from which position the code begins over a loop, in order to remove the tab space.\n for c in range(1, len(line)):\n if line[c] != \" \":\n index = c # Making note of the position from which the code begins in the line.\n break\n self.result_code.write(line[index:]) # Writing the line without the tab space into the intermediate file.\n else:\n self.result_code.write(line) # Writing the entire line into the intermediate file in case there is no tab space at the beginning.\n\n self.result_code.close() # Closing the intermediate file.", "def _remove_special_chars(sentence, replace_with=\"\"):\n sentence = sentence.replace('\\n', replace_with).replace('\\t', replace_with)\n return sentence", "def _remove_extra_whitespaces(self, text: str) -> str:\n return re.sub(\" +\", \" \", text)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "def condense_html_whitespace(html): # first space between tags, then empty new lines and in-between.\n log.debug(\"Removing unnecessary HTML White Spaces and Empty New Lines.\")\n is_ok = \"<textarea\" not in html.lower() and \"<pre\" not in html.lower()\n html = re.sub(r'>\\s+<', '> <', html) if is_ok else html\n return re.sub(r'\\s{2,}|[\\r\\n]', ' ', html) if is_ok else html.strip()", "def normalizeSpaces(strText, bDouble=False):\n if bDouble:\n strText = re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)\n # Remove double spaces from groups\n return re.sub(r\"([(|]) ([|)])\", r\"\\g<1> \\g<2>\", strText, flags=re.UNICODE)\n\n return re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_header(klass, s):\n return re.sub(r\"[\\n\\r\\t]+\", \" \", s).strip()", "def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text", "def clean_training_text(txt):\n return re.sub('[^A-Za-z0-9]+', ' ', str(txt)).strip()", "def cleaningIndent(text):\n\n text = re.sub(r'^[\\s \\t]+', r'', text)\n text = re.sub(r'[\\s \\t]+$', r'', text)\n text = re.sub(r'[\\r\\n]+', r'\\r\\n', text)\n text = re.sub(r'(<(/p|/h[1-6]|/?div|/head|/l|/?lg|/?body|/?back|/?text|/?front)>)', r'\\1\\r\\n', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'([^\\r\\n<>])[\\r\\n]+([^\\r\\n<>])', r'\\1 \\2', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'([^>$])\\r\\n *(<seg)', r'\\1 \\2', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'(>)[\\r\\n]+([^\\s<>])', r'\\1 \\2', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'<p> +', r'<p>', text, flags=re.DOTALL|re.IGNORECASE)\n text = re.sub(r'[\\r\\n]+', r'\\r\\n', text)\n text = re.sub(r' +', r' ', text)\n text = re.sub(r'<p(>| [^>]*>)\\s*</p>', r' ', text)\n return text", "def NormalizeWhitespace (text, preserve=False, replace=False, collapse=False):\n if preserve:\n return text\n text = __TabCRLF_re.sub(' ', text)\n if replace:\n return text\n if collapse:\n return __MultiSpace_re.sub(' ', text).strip()\n # pyxb not imported here; could be.\n raise Exception('NormalizeWhitespace: No normalization specified')", "def rem_whitespace(string):\n unwanted_chars = punctuation + whitespace\n\n pat_l = [r'[' + unwanted_chars + ']',\n r'\\s+',\n r' ',\n r' \\\\',\n r' \\ '\n ]\n\n for p in pat_l:\n rx = re.compile(p)\n string = re.sub(rx, ' ', string)\n\n return string.strip()", "def remove_excess_white_space(lines: str):\n two_plus_white_space = r\"\\s{2,}\"\n return re.sub(two_plus_white_space, \"\", lines)", "def test_remove_multiple_spaces():\n questions_parser = QuestionsParser()\n assert questions_parser.remove_multiple_spaces('Sentence with multiple spaces') == 'Sentence with multiple spaces'", "def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out", "def space_strip(string):\n string= re.sub(\"(?m)^\\s+\", \"\", string)\n return re.sub(\"(?m)\\s+$\", \"\", string)", "def remove_whitespace(text):\n # type (str) -> str\n if text is None:\n return \"\"\n return \"\".join(text.split())", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def _text_remove_s(all_text):\n\t# on n'agit que s'il y a au moins un cara plein\n\t\t# => pas les elts vides, ni \\s dont saut de ligne\n\tif len(all_text) and search('[^\\s]', all_text, flags=MULTILINE):\n\t\tflat_alltext = sub(r'\\n', '¤', all_text, flags=MULTILINE)\n\t\tflat_alltext = sub(r'[¤\\s]+$', '', flat_alltext)\n\t\tflat_alltext = sub(r'^[¤\\s]+', '', flat_alltext)\n\telse:\n\t\tflat_alltext = ''\n\treturn flat_alltext", "def CleanText(text):\n\n pretty_issue = text.lower().strip()\n\n quoteless_issue = re.sub('\\'', '', pretty_issue)\n no_punctuation_issue = re.sub('[^\\w\\s]|_+', ' ', quoteless_issue)\n one_space_issue = ' '.join(no_punctuation_issue.split())\n\n return one_space_issue", "def remove_white_space(ls):\r\n for i in range(len(ls)):\r\n ls[i] = re.sub(r'\\s+', '', ls[i])\r\n\r\n return ls", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def reduce_spaces(tweet):\r\n text = tweet.strip()\r\n text = \" \".join(text.split())\r\n return re.sub(' +', ' ', text)", "def compress(clean):\n if clean is None:\n return None\n clean = re.sub(r'[\\r\\n\\t\\xa0]', ' ', clean)\n clean = re.sub(r'&nbsp;?', ' ', clean)\n clean = re.sub(r'\\s+', ' ', clean)\n return clean.strip()", "def clean_text(text, pattern):\n \n text = unidecode.unidecode(text)\n text.replace('\\\\n', '')\n text.strip(' \\\\n')\n text = re.sub(pattern, ' ', str(text))\n text = re.sub('(\\\\*n)', ' ', str(text))\n text = re.sub('\\w*\\d\\w*', ' ', str(text))\n text = re.sub(' ', ' ', str(text))\n return text", "def remove_spaces(user_data):\n fixed_user_input = re.sub(r\"\\s+\", \"\", user_data)\n return fixed_user_input", "def cleanText(self, stripNonAlphaNumeric=False, stripNumod_byers=False):\n if stripNonAlphaNumeric:\n txt = REG_CLEAN1.sub(\" \", self.getRawText())\n else:\n txt = self.getRawText()\n\n # clean up white spaces\n txt = REG_CLEAN2.sub(\" \", txt)\n if stripNumod_byers:\n txt = REG_CLEAN3.sub(\"\", txt)\n\n self.graph[\"__scope\"] = (0, len(txt))\n self.graph[\"__txt\"] = txt\n if self.getVerbose():\n print(\"cleaned text is now\", self.getText())", "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)", "def __cleanText(self,stripNonAlphaNumeric=False, stripNumbers=False):\n if stripNonAlphaNumeric:\n txt = r1.sub(\" \",self.getRawText() )\n else:\n txt = self.getRawText()\n # clean up white spaces\n txt = r2.sub(\" \",txt)\n if stripNumbers:\n txt = r3.sub(\"\",txt)\n self.graph[\"__txt\"] = txt\n self.graph[\"__scope\"] = (0,len(txt))", "def clean_table_text(raw_table_text):\n # First we split the tables at all newline characters\n table_entries = raw_table_text.split('\\n')\n \n # Define matcher for alphabetical entries\n word_match = re.compile('[a-zA-Z]+')\n \n # For all table entries remove those which are not alphabetical\n for entry in table_entries[:]:\n if word_match.match(entry) == None:\n table_entries.remove(entry)\n cleaned_table_text = ' '.join(table_entries).strip()\n \n return (cleaned_table_text)", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _despace(statement):\n return re.sub(r' +', ' ', statement)", "def _preprocess(self, source):\n source = source.replace(u'\\n', u'').strip()\n source = re.sub(r'<br\\s*\\/?\\s*>', u' ', source, re.I)\n source = re.sub(r'\\s\\s+', u' ', source)\n return source", "def clean_unnecessary_whitespaces(self, tweet):\n tweet = ' '.join(tweet.split())\n\n return tweet", "def clean_text_for_skill_extraction(text):\n multi_space_regex = re.compile(r\"[,;?!()\\\\/]\", re.IGNORECASE)\n text = re.sub(multi_space_regex, ' ', text)\n\n text = clean_text_from_private_unicode(text)\n text = clean_text_from_geometrical_shape_unicode(text)\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n return text", "def _sanitize(text):\n # TODO: any cleanup needed here?\n if text is None:\n return None\n text = text.replace('\\n', ' ')\n return text", "def cleanSents(row, field):\n\n text = str(row[field]).lower()\n clean_text = re.sub('[^A-Za-z0-9]+', ' ', text).strip()\n return clean_text", "def clean_string(s):\n c = re.sub(r'\\s+', ' ', re.sub(r'[^A-Za-z0-9 .:]', '', s))\n return c", "def replace_newline_with_space(text):\n return re.sub(\"[\\n\\r]\", \" \", text)", "def compress_whitespace(cls, s):\n # Cast to string\n s = str(s).strip()\n\n # Sanity check\n if (len(s) == 0):\n return ''\n\n s = re.sub(r'\\s', ' ', s)\n s = re.sub(r' +', ' ', s)\n\n return s.strip()", "def clean_text(text):\n text = text.replace(\"\\uf0b7\", \" \")\n text = text.replace(\":\", \" \")\n text = text.replace(\".\", \" \")\n text = text.replace(\",\", \" \")\n text = text.replace(\"/\", \" \")\n text = text.replace(\"(\", \" \")\n text = text.replace(\")\", \" \")\n text = text.replace(\"[\", \" \")\n text = text.replace(\"]\", \" \")\n text = text.replace(\"+\", \" \")\n text = text.replace(\"?\", \" \")\n text = text.replace(\"*\", \" \")\n text = text.replace(\"#\", \" \")\n\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n\n text = re.sub(\" $\", \"\", text)\n return text", "def remove_space(user_inputs):\r\n return user_inputs.replace(\" \", \"\")" ]
[ "0.8449928", "0.7658336", "0.73809946", "0.7345701", "0.7265191", "0.726251", "0.7227112", "0.7216958", "0.7216958", "0.7206436", "0.72063667", "0.7186538", "0.71344566", "0.7107262", "0.70810986", "0.7080624", "0.7078933", "0.6926076", "0.6899674", "0.68935776", "0.6882423", "0.6863396", "0.6850081", "0.6849867", "0.68335897", "0.6833248", "0.6811353", "0.6758969", "0.6755394", "0.67163104", "0.67163104", "0.67138255", "0.6708798", "0.66892254", "0.6678804", "0.6668556", "0.6658024", "0.665104", "0.6621658", "0.66175365", "0.65929353", "0.6585988", "0.657724", "0.65712315", "0.65635294", "0.65392375", "0.6529394", "0.6524259", "0.65198624", "0.6515746", "0.64979535", "0.64901626", "0.6487872", "0.6485144", "0.64759654", "0.64660686", "0.64619637", "0.6444567", "0.6422235", "0.64219964", "0.64208704", "0.6418293", "0.6418293", "0.6415514", "0.6413456", "0.64104944", "0.64057434", "0.6400132", "0.6396233", "0.6395783", "0.6383084", "0.63762003", "0.6372643", "0.6371321", "0.6357468", "0.6356785", "0.6356752", "0.63547343", "0.63535494", "0.63520795", "0.6342352", "0.6337893", "0.633758", "0.6328792", "0.6316854", "0.6316117", "0.6309601", "0.6305484", "0.62934786", "0.629284", "0.62919927", "0.6289938", "0.62890786", "0.6284796", "0.6279564", "0.6264051", "0.6260473", "0.62593675", "0.62493944", "0.6233949" ]
0.6948386
17
Reduces multiple whitespaces to single whitespace
def _remove_extra_whitespaces(self, text: str) -> str: return re.sub(" +", " ", text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeMultipleSpaces(self) -> None:\n self.text = re.sub('\\s+', ' ', self.text)", "def condense_whitespace(css):\n log.debug(\"Condensing all unnecessary white spaces.\")\n return re.sub(r\"\\s+\", \" \", css)", "def normalize_whitespace(text):\n return NORMALIZE_WHITESPACE_REGEX.sub(' ', text)", "def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)", "def _squeeze_whitespace(text):\n return re.sub(r'\\s+', ' ', text)", "def reduce_spaces(tweet):\r\n text = tweet.strip()\r\n text = \" \".join(text.split())\r\n return re.sub(' +', ' ', text)", "def normalize_whitespace(text):\n text = str(text)\n return re.sub(white_space_regex, ' ', text).strip()", "def normalize_whitespace(text):\n return RE_NONBREAKING_SPACE.sub(\" \", RE_LINEBREAK.sub(r\"\\n\", text)).strip()", "def normalize_space (text):\n return RE_WS.sub (' ', text.strip ())", "def del_whitespace(selfs, text):\n\t\treturn text.replace(' ', '')", "def normalise_whitespace(strg):\n return re.sub(r\"\\s+\", \" \", strg).strip()", "def removeExtraSpaces(self, words):\n\t\treturn re.sub(r'\\s+', ' ', words.strip()).strip()", "def removeExtraSpaces(text):\n\tone_space = re.sub(r'\\s+',' ', text)\n\treturn one_space", "def strip_space():\n pass", "def normalizeSpaces(strText, bDouble=False):\n if bDouble:\n strText = re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)\n # Remove double spaces from groups\n return re.sub(r\"([(|]) ([|)])\", r\"\\g<1> \\g<2>\", strText, flags=re.UNICODE)\n\n return re.sub(r\"[ ]+\", r\" \", strText, flags=re.UNICODE)", "def clean_text_from_multiple_consecutive_whitespaces(text):\n multi_space_regex = re.compile(r\"\\s+\", re.IGNORECASE)\n return re.sub(multi_space_regex, ' ', text)", "def clean_whitespace(text):\n return text\n #return re.sub(r'\\r\\n|\\n', \"\\t\", text)", "def shrink_whitespace(data: pd.Series) -> pd.Series:\n data = data.replace(r'\\s+', value=' ', regex=True)\n return data.str.strip()", "def _clean(s):\n return re.sub(r'\\s+', ' ', s.strip())", "def remove_space(text):\n for space in spaces:\n text = text.replace(space, ' ')\n text = text.strip()\n text = re.sub('\\s+', ' ', text)\n return text", "def remove_space(text):\n for space in spaces:\n text = text.replace(space, ' ')\n text = text.strip()\n text = re.sub('\\s+', ' ', text)\n return text", "def remove_white_space(ls):\r\n for i in range(len(ls)):\r\n ls[i] = re.sub(r'\\s+', '', ls[i])\r\n\r\n return ls", "def remove_excess_white_space(lines: str):\n two_plus_white_space = r\"\\s{2,}\"\n return re.sub(two_plus_white_space, \"\", lines)", "def _despace(statement):\n return re.sub(r' +', ' ', statement)", "def _remove_whitespaces(self, text: str) -> str:\n return \" \".join(re.sub(\"\\xa0\", \" \", str(text)).split())", "def RemoveWhiteSpace(value):\n return \"\".join(value.split())", "def normalize_space(text):\n return re.sub(r\"\\s+\", \" \", text.strip(), flags=re.UNICODE)", "def clean_unnecessary_whitespaces(self, tweet):\n tweet = ' '.join(tweet.split())\n\n return tweet", "def compress_whitespace(cls, s):\n # Cast to string\n s = str(s).strip()\n\n # Sanity check\n if (len(s) == 0):\n return ''\n\n s = re.sub(r'\\s', ' ', s)\n s = re.sub(r' +', ' ', s)\n\n return s.strip()", "def NormalizeWhitespace (text, preserve=False, replace=False, collapse=False):\n if preserve:\n return text\n text = __TabCRLF_re.sub(' ', text)\n if replace:\n return text\n if collapse:\n return __MultiSpace_re.sub(' ', text).strip()\n # pyxb not imported here; could be.\n raise Exception('NormalizeWhitespace: No normalization specified')", "def remove_extra_space(text):\n return re.sub(' +', ' ', text)", "def remove_extra_middle_spaces(x):\n\n return \" \".join(x.split())", "def condense_html_whitespace(html): # first space between tags, then empty new lines and in-between.\n log.debug(\"Removing unnecessary HTML White Spaces and Empty New Lines.\")\n is_ok = \"<textarea\" not in html.lower() and \"<pre\" not in html.lower()\n html = re.sub(r'>\\s+<', '> <', html) if is_ok else html\n return re.sub(r'\\s{2,}|[\\r\\n]', ' ', html) if is_ok else html.strip()", "def remove_space(user_inputs):\r\n return user_inputs.replace(\" \", \"\")", "def test_remove_multiple_spaces():\n questions_parser = QuestionsParser()\n assert questions_parser.remove_multiple_spaces('Sentence with multiple spaces') == 'Sentence with multiple spaces'", "def normalize_whitespace(text):\n text = text.replace('\"', '').replace(\"'\", '')\n return ' '.join(text.split())", "def _removeWhitespaces(self, s):\n return s.translate({ord(c): None for c in string.whitespace})", "def normalize(s):\n return s.strip(inline_whitespace)", "def clean_whitespaces(text):\n length = len(text)\n i = 0\n prev_char = None\n while i < length:\n curr_char = text[i]\n return_char = curr_char if curr_char not in string.whitespace else \" \"\n\n if not (prev_char == \" \" and return_char == \" \"):\n yield return_char\n\n prev_char = return_char\n i += 1", "def remove_whitespace(text):\n text = text.strip()\n return \" \".join(text.split())", "def shrink_space(data):\n # remove leading and trailing spaces\n data = data.strip()\n # collapse multiple lines to one single line\n data = re.sub(\"\\n+\",\"\\n\",data)\n\n return data", "def remove_extra_space(text):\n text = \" \".join(text.split())\n return text", "def _clean(self, string):\n return re.sub('\\s+', ' ', string).strip()", "def remove_whitespaces(text: str) -> str:\n return text.lstrip().rstrip()", "def replace_whitespace(s, rep=' '):\n s = re.sub(r'\\s+', rep, s)\n return s", "def delete_whitespace(text: str) -> str:\n return re.sub(r'\\s+', '', text).strip()", "def remove_white_spaces(input_string):\n return re.sub(r'\\s+', ' ', input_string).strip()", "def strip_space(string):\n return string.replace(' ', '')", "def convert_spaces(msg_list):\n new_list = []\n for str in msg_list:\n new_list.append(str.replace(' ', '+'))\n return (new_list)", "def normalize_whitespace(doc):\n doc.xml_normalize() # Merge text nodes where possible\n for text in list(doc.xml_select('descendant::text()')):\n # If text node is all whitespace or empty, remove it.\n if not text.xml_value.strip():\n text.xml_parent.xml_remove(text)", "def preserve_whitespace(v, quote=True):\n if quote:\n v = html_quote(v)\n v = v.replace('\\n', '<br>\\n')\n v = re.sub(r'()( +)', _repl_nbsp, v)\n v = re.sub(r'(\\n)( +)', _repl_nbsp, v)\n v = re.sub(r'^()( +)', _repl_nbsp, v)\n return '<code>%s</code>' % v", "def collapse(s):\n return ' '.join(s.split()).strip()", "def add_spaces(text):\n return \" \".join(text)", "def remove_spaces(user_data):\n fixed_user_input = re.sub(r\"\\s+\", \"\", user_data)\n return fixed_user_input", "def comma_for_space(x):\n x = strip_stoich_wrapper(x)\n x = x.replace(\" \", \",\")\n return x", "def reduceBlank(text, keepNewLines=False):\n if text is None:\n return None\n text = text.strip()\n if not keepNewLines:\n return re.sub(r'\\s+', ' ', text)\n else:\n text = re.sub(r'\\r', '\\n', text)\n text = re.sub(r'\\s*\\n+\\s*', '\\n', text)\n text = re.sub(r'[ \\t\\f\\v]+', ' ', text)\n return text", "def to_whitespace(self, empty=False, strip_nl=False, strip_comments=False):\n if strip_nl and self.type == tokenize.NL:\n self.string = \"\"\n return\n if strip_comments and self.type == tokenize.COMMENT:\n self.string = \"\"\n return\n if self.type in ignored_types_set:\n return\n if empty:\n self.string = \"\"\n else:\n self.string = \" \" * len(self.token_tuple[1])", "def cleanup_sentence(s):\n return re.sub(\"\\s+\", \" \", s.replace(\"\\t\", \"\").strip())", "def remove_repeated_spaces(text: str) -> str:\n\n return _repeated_spaces.sub(' ', text)", "def clean_up_tokenization_spaces(out_string):\n out_string = out_string.replace('<unk>', '')\n out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','\n ).replace(\" ' \", \"'\").replace(\" n't\", \"n't\").replace(\" 'm\", \"'m\").replace(\" do not\", \" don't\"\n ).replace(\" 's\", \"'s\").replace(\" 've\", \"'ve\").replace(\" 're\", \"'re\")\n return out_string", "def space_strip(string):\n string= re.sub(\"(?m)^\\s+\", \"\", string)\n return re.sub(\"(?m)\\s+$\", \"\", string)", "def fix_whitespace(lines: Sequence[str], eol: str, ends_with_eol: bool) -> str:\n lines = _strip(lines)\n lines = [i.expandtabs(4) for i in lines]\n result = eol.join(lines)\n if ends_with_eol:\n result += eol\n return result", "def replace_spaces_with_pluses(self, sample):\r\n changed = list(sample)\r\n for i, c in enumerate(changed):\r\n if(c == ' ' or c ==' ' or c ==' ' or c=='\\n' or c=='\\n\\n'):\r\n changed[i] = '+'\r\n return ''.join(changed)", "def remove_whitespace(text):\n # type (str) -> str\n if text is None:\n return \"\"\n return \"\".join(text.split())", "def _StripWS(s):\r\n return re.sub('\\s+', '', s)", "def _StripWS(s):\r\n return re.sub('\\s+', '', s)", "def clean(val):\n\n val = re.sub(r'/s+', r'/s', val)\n return val.strip()", "def TRIM(text):\n return _trim_re.sub(' ', text.strip())", "def strip_whitespace(self, text):\n\t\treturn text.strip()", "def simplify(text, whitespace=string.whitespace, delete=\"\"):\n result = []\n word = \"\"\n for char in text:\n if char in delete:\n continue\n elif char in whitespace:\n if word:\n result.append(word)\n word = \"\"\n else:\n word += char\n if word:\n result.append(word)\n return \" \".join(result)", "def remove_whitespace_rarity(s, i):\n text = s.replace(' ', '')\n if os.linesep.join([s for s in text.splitlines() if s]) == '':\n return('None')\n else:\n return(os.linesep.join([s for s in text.splitlines() if s]))", "def ignore_whitespaces(self):\n\n whitespaces = [' ', '\\t', '\\n', '\\r']\n while self.index < self.length and self.xtext[self.index] in whitespaces:\n self.index += 1", "def parse_text(text):\n return re.sub(r'\\s+', \" \", text)", "def preprocess_single_tweet(single_tweet):\n \n single_tweet = remove_stopwords_and_lemmatize(strip_links(single_tweet))\n single_tweet = (lambda single_twt: re.sub(r'[^a-zA-Z]', ' ', single_twt))(single_tweet)\n single_tweet = (lambda x: re.sub(' ', ' ', x))(single_tweet)\n \n return single_tweet", "def delete_whitespaces(str):\n global legal_white_spaces\n\n try:\n str_copy = str\n for i in legal_white_spaces:\n str_copy = str_copy.replace(i, '')\n return str_copy\n except Exception as e:\n print(e)\n return None", "def whitespace_sorter(sentence):\r\n sentence_copy = str(sentence)\r\n sentence_copy = sentence_copy.strip() #Remove leading and trailing whitespace (/s)\r\n sentence_copy = re.sub(\" +\", \" \", sentence_copy) #Coerces all multiple /s characters into a single /s\r\n #It identifies a /s followed by any nonzero number of /s and replaces this with a single /s \r\n return sentence_copy", "def compact_spaces(st: str) -> str:\n st = st.strip()\n ret = ''\n in_quotes = False\n added_space = False\n for x in st:\n if x == '\"':\n in_quotes = not in_quotes\n added_space = False\n ret += x\n elif in_quotes:\n ret += x\n elif x in ('\\t', '\\n', '\\r', ' '):\n if not added_space:\n ret += ' '\n added_space = True\n else:\n added_space = False\n ret += x\n\n return ret", "def _preprocess(self, source):\n source = source.replace(u'\\n', u'').strip()\n source = re.sub(r'<br\\s*\\/?\\s*>', u' ', source, re.I)\n source = re.sub(r'\\s\\s+', u' ', source)\n return source", "def ignore_whitespace(a):\n WHITE_MAP = dict.fromkeys(ord(c) for c in string.whitespace)\n return a.translate(WHITE_MAP)", "def _reset_leading_whitespace(self):\n self._leading_whitespace = ''", "def strip_whitespace(source_string):\n return replace_by_dict(source_string, replace_dict_whitespace)", "def _get_whitespace(line):\n return line[:-len(line.lstrip())]", "def skipWhiteSpace(self):\n pass", "def remove_double_spaces(conversion):\n # '\\n' chars get converted to double blanks ' ' + ' ' ==========================\n # So the below for loop removes one of those ' '\n then_a_space = 0\n length_of_conversion = len(conversion)\n deletion_list = []\n for o in range(length_of_conversion):\n if conversion[o] == ' ':\n then_a_space += 1\n if conversion[o] != ' ':\n then_a_space = 0\n if then_a_space == 2:\n # takes out the needed index for removal since I can't del an index in a for loop\n deletion_list.append(o)\n length_of_deletion_list = len(deletion_list)\n del_iteration = 1\n for u in range(length_of_deletion_list):\n del conversion[deletion_list[u]]\n if u + 1 == length_of_deletion_list: # to avoid an index error as u would become too high in the last portion\n continue\n deletion_list[u + 1] -= del_iteration\n del_iteration += 1", "def cleaner(self, w_old):\n w_new = re.sub('[\\(\\)]', '', w_old)\n w_new = re.sub('[^А-Яа-яЁё ]', 'ъ', w_new)\n w_new = re.sub(' ', ' ', w_new)\n return w_new", "def filter_blanks(user, str):\n return re.sub(r'\\n{2}\\n+', '\\n', str)", "def spacify(string):\n return \" \" + \" \".join(string.splitlines(True))", "def html_space(text):\r\n return cgi.escape(text).replace('\\t', ' ').replace(' ', '&nbsp;')", "def clean_white_spaces(string):\n try:\n # in case it is in byte value\n string = string.decode('utf-8')\n except:\n pass\n\n res = ''\n words = string.split()\n for word in words:\n res = res + str(word)\n return res", "def preprocess_nmt(text):\n def no_space(char, prev_char):\n return char in set(',.!?') and prev_char != ' '\n\n # Replace non-breaking space with space, and convert uppercase letters to\n # lowercase ones\n text = text.replace('\\u202f', ' ').replace('\\xa0', ' ').lower()\n # Insert space between words and punctuation marks\n out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char\n for i, char in enumerate(text)]\n return ''.join(out)", "def munge(self, stylesheet: str) -> str:\n s = ''.join([s.lstrip().replace(' ', ' ').replace(' \\n', '\\n')\n for s in g.splitLines(stylesheet)])\n return s.rstrip() # Don't care about ending newline.", "def normalise_query(query: str) -> str:\n return EXTRA_WHITE_SPACE.sub(' ', query.strip())", "def replace_whitespace(input: str, *, replace_str: str = '') -> str:\n return WHITESPACE_REGEX.sub(replace_str, input)", "def compress(clean):\n if clean is None:\n return None\n clean = re.sub(r'[\\r\\n\\t\\xa0]', ' ', clean)\n clean = re.sub(r'&nbsp;?', ' ', clean)\n clean = re.sub(r'\\s+', ' ', clean)\n return clean.strip()", "def space_detokenizer(batch: List[List[str]]) -> List[str]:\n return [\" \".join(tokens) for tokens in batch]", "def trim_whitespace(str):\n return str.strip()", "def rem_whitespace(string):\n unwanted_chars = punctuation + whitespace\n\n pat_l = [r'[' + unwanted_chars + ']',\n r'\\s+',\n r' ',\n r' \\\\',\n r' \\ '\n ]\n\n for p in pat_l:\n rx = re.compile(p)\n string = re.sub(rx, ' ', string)\n\n return string.strip()", "def unscorize(s):\n return s.replace(\" \", \"_\")", "def remove_space(line):\n split_line = line.split()\n return \"\".join(split_line)", "def replace_newline_with_space(text):\n return re.sub(\"[\\n\\r]\", \" \", text)" ]
[ "0.80924445", "0.7733814", "0.77270997", "0.7487072", "0.7446078", "0.73546934", "0.7331565", "0.73040843", "0.72991526", "0.72107947", "0.7194275", "0.7159008", "0.7136506", "0.7133596", "0.71249396", "0.70942706", "0.70841247", "0.70305616", "0.70219487", "0.69447887", "0.69447887", "0.6906426", "0.68875885", "0.68708366", "0.6835623", "0.68208146", "0.68182", "0.6811238", "0.67622745", "0.6745835", "0.6738393", "0.6730113", "0.6704597", "0.6672822", "0.66674745", "0.66228837", "0.6608093", "0.66078943", "0.6586128", "0.65749323", "0.65245503", "0.6510117", "0.64960843", "0.6474502", "0.641059", "0.6404469", "0.63932943", "0.6371563", "0.6313758", "0.63088685", "0.63044566", "0.63034797", "0.63012993", "0.6279003", "0.6266397", "0.62630486", "0.62499243", "0.6245446", "0.62385285", "0.62354934", "0.6234241", "0.62211317", "0.6212824", "0.62017584", "0.6196708", "0.6196708", "0.6177293", "0.61743957", "0.6169703", "0.61379856", "0.6075421", "0.60714746", "0.6052097", "0.60501736", "0.59961265", "0.5994637", "0.5990692", "0.5986462", "0.5976169", "0.5957996", "0.5953584", "0.5951157", "0.5943889", "0.59380907", "0.5916627", "0.5913751", "0.5911834", "0.59059846", "0.59011626", "0.590112", "0.58868957", "0.5874468", "0.5868052", "0.5864368", "0.585286", "0.58469456", "0.5844895", "0.58340883", "0.583259", "0.58308786" ]
0.68979657
22
This appears to be totally unused, and leftover from testing. It may, in fact, be better/more efficient than the used walk_tree() method.
def work_tree(obj, **kwargs): max_depth = 0 exclusions = kwargs.get('exclusions', {"groups": [], "classes": [], "params": []}) groups_done = {} classes = {"depths": {}, "content": {}} params = {"depths": {}, "content": {}} if hasattr(obj, 'hostname') and not hasattr(obj, 'name'): obj.name = obj.hostname to_index = [(obj, 1)] while to_index: (obj, depth) = to_index.pop() if obj.name in groups_done and groups_done[obj.name] <= depth: continue objclasses = obj.classes.exclude(classname__in=exclusions['classes']) updated_classes = update_values(objclasses, "classname", "classparams", depth=depth, results=classes) objparams = obj.parameters.exclude(paramkey__in=exclusions['params']) updated_params = update_values(objparams, "paramkey", "paramvalue", depth=depth, results=params) if not updated_classes or not updated_params: return ("Fail", "Fail") groups_done[obj.name] = depth depth += 1 for group in obj.groups.exclude(name__in=exclusions['groups']): to_index.append((group, depth)) if max_depth < depth: max_depth = depth params["content"]['max_depth'] = max_depth params["content"]['done_count'] = len(groups_done) return (classes["content"], params["content"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n files = [\n (os.lstat(os.path.join(root, fi)).st_size, os.path.join(root, fi), os.lstat(os.path.join(root, fi)).st_ino) for fi\n in files if (os.lstat(os.path.join(root, fi)).st_size > self.size)]\n self.fileList.extend(files)\n if len(self.excludeList) > 0:\n dirnames[:] = [dir for dir in dirnames if dir not in self.excludeList]\n if not self.cross_mount_points:\n dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))]", "def _traverse_tree(self):\n if not self.children:\n yield self\n for child in self.children:\n yield from child._traverse_tree()", "def test_render_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n node, expect, withtags = self.tree_case_1()\n actual = render_tree(node, get_children)\n assert expect == actual, (expect, actual)\n\n node, expect, withtags = self.tree_case_2()\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call render_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in render_tree (visited was set to {} as default\n # parameter)\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)", "def walk(node):\r\n from collections import deque\r\n todo = deque([node])\r\n while todo:\r\n node = todo.popleft()\r\n todo.extend(iter_child_nodes(node))\r\n yield node", "def walk(self):\n pass", "def test_Tree():", "def walk(d):\n for parent, key, leaf in _walk({}, None, d):\n yield (d, parent, key, leaf)", "def improve_tree(tree, freq_dict):\n # todo", "def traverse_depth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extendleft(reversed(node.children))", "def walkTree(self):\n if self.parentId:\n print self.parentId, self.id, self.value\n for child in self.children.itervalues():\n child.walkTree()", "def test_print_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n save_stdout = sys.stdout\n\n try:\n node, expect, withtags = self.tree_case_1()\n\n IOStream = io.StringIO\n sys.stdout = IOStream()\n print_tree(node, get_children)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n\n # Test that explicitly setting prune to zero works\n # the same as the default (see above)\n node, expect, withtags = self.tree_case_2(prune=0)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 0)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 0, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n\n # Test output with prune=1\n node, expect, withtags = self.tree_case_2(prune=1)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 1)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call print_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in print_tree (visited was set to {} as default\n # parameter)\n sys.stdout = IOStream()\n print_tree(node, get_children, 1)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 1, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n finally:\n sys.stdout = save_stdout", "def traverse_tree(self, root):\n\n\t\tself.pre_stage()\n\t\troot.visit(self)\n\t\tself.post_stage()", "def traverse(self):\n if self.root is None:\n return ''\n return self.root.traverse()", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def file_walker(root,**kwargs):\n\n # Get our keyword argunents, and do some initialization.\n max_depth=kwargs.get('depth',None)\n if max_depth==None:\n max_depth=sys.maxsize # I don't think we'll hit this limit in practice.\n follow_links=kwargs.get('follow_links',True)\n prune=compile_filename_patterns(kwargs.get('prune',[]))\n ignore=compile_filename_patterns(kwargs.get('ignore',[]))\n report_dirs=kwargs.get('report_dirs',False)\n if report_dirs not in (False,True,'first','last'):\n raise ValueError(\"report_dirs=%r is not one of False, True, 'first', or 'last'.\"%(report_dirs,))\n stack=[(0,root)] # Prime our stack with root (at depth 0).\n been_there=set([os.path.abspath(os.path.realpath(root))])\n dir_stack=[] # Stack of paths we're yielding after exhausting those directories.\n\n while stack:\n depth,path=stack.pop()\n if report_dirs in (True,'first'):\n yield path+os.sep\n elif report_dirs=='last':\n dir_stack.append(path+os.sep)\n flist=os.listdir(path)\n flist.sort()\n dlist=[]\n # First, let the caller iterate over these filenames.\n for fn in flist:\n p=os.path.join(path,fn)\n if os.path.isdir(p):\n # Just add this to this path's list of directories for now.\n dlist.insert(0,fn)\n continue\n pat,mat=first_match(fn,ignore)\n if not pat:\n yield p\n # Don't dig deeper than we've been told to.\n if depth<max_depth:\n # Now, let's deal with the directories we found.\n for fn in dlist:\n p=os.path.join(path,fn)\n # We might need to stack this path for our fake recursion.\n if os.path.islink(p) and not follow_links:\n # Nope. We're not following symlinks.\n continue\n rp=os.path.abspath(os.path.realpath(p))\n if rp in been_there:\n # Nope. We've already seen this path (and possibly processed it).\n continue\n m=None\n pat,mat=first_match(fn,prune)\n if pat:\n # Nope. This directory matches one of the prune patterns.\n continue\n # We have a keeper! Record the path and push it onto the stack.\n been_there.add(rp)\n stack.append((depth+1,p))\n while dir_stack:\n yield dir_stack.pop()", "def walk(node):\n\n traversed_nodes.append(node)\n \n # Do something with node value...\n print node.value\n\n # Recurse on each child node\n for child_node in node.child_nodes:\n if child_node not in traversed_nodes:\n walk(child_node)", "def test_list_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def visit(self):\n self.tree = self.recursive_visit(self.tree)\n # assert self.current_line == self.tree.absolute_bounding_box.bottom_right.line", "def tree_probe(self, **kwargs):\n\n def nextSpinner(b_cursorToNextLine):\n \"\"\"Provide a rotating spinner to indicate activity by using a closure.\n\n Returns:\n inner : inner function\n \"\"\"\n spinner = '\\\\|/-'\n pos = 0\n def inner(b_cursorToNextLine):\n nonlocal pos, spinner\n if pos>=len(spinner): pos = 0\n if self.toConsole():\n self.dp.qprint('Probing filesystem... {}'.format(spinner[pos]), end = '')\n if not b_cursorToNextLine:\n self.dp.qprint('\\r', end = '', syslog = self.args['syslog'])\n else:\n self.dp.qprint('\\n', end = '', syslog = self.args['syslog'])\n pos += 1\n return inner\n return inner\n\n def path_shorten(str_path, length = 80) -> str:\n \"\"\"Shorten a Path string\n\n Returns:\n string : a shortened path\n \"\"\"\n if length < 0:\n length = os.get_terminal_size().columns + length\n if len(str_path) > length:\n l_parts = list(pathlib.PurePath(str_path).parts)\n l_copy = l_parts.copy()\n max = len(l_parts)\n offset = -1\n center = max // 2\n while len(str_path) > length:\n offset += 1\n l_shorten = [i % (max + 1) for i in range( center - offset,\n center + offset + 1)]\n for prt in l_shorten: l_copy[prt] = '...'\n str_path = str(pathlib.PurePath(*l_copy))\n return str_path\n\n def elements_flash(l_el, debugLevel):\n \"\"\"\n Flash elements in the passed list at the debugLevel\n \"\"\"\n if self.toConsole():\n for el in l_el:\n self.dp.qprint('%s (%d)\\033[K\\r' % \\\n (path_shorten(el, - len(str(len(l_el))) - 4), len(l_el)),\n level = debugLevel,\n end = '',\n syslog = self.args['syslog'])\n\n\n str_topDir = \".\"\n l_dirs = []\n l_files = []\n b_status = False\n str_path = ''\n l_dirsHere = []\n l_filesHere = []\n b_cursorToNextLine = False\n\n for k, v in kwargs.items():\n if k == 'root': str_topDir = v\n\n if int(self.verbosityLevel) >= 2:\n b_cursorToNextLine = True\n spinner = nextSpinner(b_cursorToNextLine)\n index:int = 0\n for root, dirs, files in pftree.walklevel(str_topDir,\n self.maxdepth,\n followlinks = self.b_followLinks):\n b_status = True\n if self.verbosityLevel >= 2: spinner(b_cursorToNextLine)\n str_path = root.split(os.sep)\n l_dirs.append(root)\n if self.verbosityLevel >= 2: elements_flash(l_dirs, 2)\n if index:\n l_filesHere = [root + '/' + y for y in files]\n else:\n l_filesHere = [root + '/' + y for y in dirs]\n if len(self.str_inputFile):\n l_hit = [s for s in l_filesHere if self.str_inputFile in s]\n if l_hit:\n l_filesHere = l_hit\n else:\n l_filesHere = []\n l_files.append(l_filesHere)\n if self.verbosityLevel >= 3: elements_flash(l_filesHere, 3)\n if self.toConsole() and self.verbosityLevel >=2:\n self.dp.qprint(\"\\033[A\" * 1,\n end = '',\n syslog = self.args['syslog'],\n level = 2 )\n index += 1\n if self.toConsole() and self.verbosityLevel >= 2:\n self.dp.qprint('Probing complete! ', level = 1)\n return {\n 'status': b_status,\n 'l_dir': l_dirs,\n 'l_files': l_files\n }", "def _get_internals(tree):\r\n y = tree.yea\r\n n = tree.nay\r\n a = tree.abstain\r\n if (y.is_leaf == False):\r\n internal_nodes.append(y)\r\n _get_internals(y)\r\n if (n.is_leaf == False):\r\n internal_nodes.append(n)\r\n _get_internals(n)\r\n if (a.is_leaf == False):\r\n internal_nodes.append(a)\r\n _get_internals(a)\r\n return", "async def leaf_it(d):\n async for _parent, _key, leaf in _walk({}, None, d):\n yield leaf", "def walk_tree(top):\n nodes = [top]\n for dirpath, dirnames, filenames in os.walk(top):\n for dirname in dirnames:\n nodes.append(os.path.join(dirpath, dirname))\n for filename in filenames:\n nodes.append(os.path.join(dirpath, filename))\n\n return nodes", "def test_tree_mode2(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar\n xp.tree_mode(True, xp)\n bar = xpb.bar\n bar.tree_mode(True, xp)\n baz = xpb.baz\n baz.tree_mode(True, xp)\n foo_bar = xpb.foo.bar\n foo_bar.tree_mode(True, xp)\n self.assertTrue(xp.is_tree_mode())\n l = [bar, foo_bar, xp, baz]\n self.assertTrue(xp in l)\n l.remove(xp)\n self.assertTrue(len(l) == 3)\n self.assertFalse(xp in l)\n xp.tree_mode(False, xp)\n self.assertFalse(xp.is_tree_mode())", "def traverse_tree(tree, thisFolder, path, submission):\n\n # Get files directly underneath this folder.\n blobs = tree.blobs\n thisFolderName = tree.name\n\n # Add this folder to the path.\n path = os.path.join(path, thisFolderName)\n print(path)\n\n for blob in blobs:\n filepath = os.path.join(path, blob.name)\n add_source_file(blob.name, thisFolder, filepath, submission)\n\n # Get folders directly underneath this folder.\n folders = tree.trees\n for folder in folders:\n srcFolderObj = add_source_folder(folder.name, thisFolder)[0]\n traverse_tree(folder, srcFolderObj, path, submission)\n\n return", "def _traverse_uast(self, root, word2ind, dok_mat):\n stack = [root]\n new_stack = []\n\n while stack:\n for node in stack:\n children = self._process_node(node, word2ind, dok_mat)\n new_stack.extend(children)\n stack = new_stack\n new_stack = []", "def __call__(self, node):\n if not node.children: return;\n ochildren = node.children;\n for n in ochildren:\n mark = self.ProperContainsMarker(n);\n if mark: raise ValueError(\"not implemented\");", "def _find_one_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping,\n ) -> Union[dict, None]:\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n return item\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def in_order_traverse(root):\n stack = deque([root])\n visited = set()\n while stack:\n node = stack.pop()\n if node is None:\n continue\n if node.index in visited:\n print(node.index, end=' ')\n continue\n visited.add(node.index)\n stack.append(node.right)\n stack.append(node)\n stack.append(node.left)", "def walk_tree(tree,\n leaf_func=lambda x: None,\n pre_nonleaf_func=lambda x: None,\n post_nonleaf_func=lambda x: None):\n tree = deepcopy(tree)\n\n def walk(node):\n # Depth First Traversal of an NLTK Tree.\n if is_leaf_node(node):\n leaf_func(node)\n else:\n pre_nonleaf_func(node)\n if len(node) > 0:\n for child in node:\n walk(child)\n post_nonleaf_func(node)\n\n walk(tree)\n return tree", "def apply(self, tree):\n raise NotImplementedError()", "def trie_walk_yielding(root, yieldfunc, seen=[], preceder=[], level=1, level_keys=[]):\n level_keys.append(list(root.keys()))\n subtrees = [root.get(k) for k in root.keys()]\n # yield subtrees\n for i, subtree in enumerate(subtrees):\n sk = list(root.keys())[i]\n seen.append(sk)\n if subtree == {None: None}:\n # the subtree is a leaf\n yield from yieldfunc(preceder, seen, level)\n gone = seen.pop() # leaf will not be remembered (after being shown)\n if i == len(subtrees) - 1:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n if i == len(subtrees) - 1:\n if level_keys[len(preceder)][0] is None:\n while (\n level_keys[len(preceder)][0] is None\n and popped == level_keys[len(preceder)][-1]\n ):\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n elif popped == level_keys[len(preceder)][-1]:\n while popped == level_keys[len(preceder)][-1]:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n continue\n elif subtree is None:\n # the 'subtree' is a 'null child' indicating the parent is 'also a leaf'\n popped = seen.pop() # leaf will not be remembered (nor shown at all)\n yield from yieldfunc(preceder, seen, level)\n continue\n subtree_keys = list(subtree.keys())\n preceder.append(sk)\n yield from trie_walk_yielding(\n subtree, yieldfunc, seen, preceder, level + 1, level_keys\n )", "def depth_first_search(self):\r\n queue = [self.root]\r\n ordered = []\r\n while queue:\r\n node = queue.pop()\r\n ordered.append(node)\r\n queue.extend(node.children)\r\n \r\n while ordered:\r\n yield ordered.pop()", "def _walk(self):\n while self._slice:\n new_slice = []\n for element in self._slice:\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n new_slice.extend(sublist)\n self._slice = new_slice", "def walk(self):\n current = self\n yield current\n while current.parent:\n current = current.parent\n yield current", "def walk(self):\n yield self\n for child in self.children:\n for descendant in child.walk():\n yield descendant", "def traverse(tree):\n nonlocal result\n\n symbol, children, *_ = tree\n\n if children:\n for c in children:\n if c[0].startswith(\"<\"):\n if not c[0].startswith(symbol_name[:-1]):\n if next_leaf(c):\n result += c[0].replace(\"<\", \"\").replace(\">\", \": \") + next_leaf_content(c) + \"\\n\"\n else:\n result += c[0].replace(\"<\", \"\").replace(\">\", \"\") + \" {\" + \"\\n\"\n traverse(c)\n result += \"}\" + \"\\n\"\n else:\n traverse(c) # do not update anything, just traverse", "def visit(self, node):", "def visit(self, node):", "def tree(self) -> None:\n tree = Tree(self.root.path)\n self.root.walk_dir(tree)", "def inOrderTreeWalk(node: TreeNode, node_flat: TreeNode):\n if node is not None:\n node_flat.right = TreeNode(node.val)\n node_flat = node_flat.right\n node_flat = inOrderTreeWalk(node.left, node_flat)\n node_flat = inOrderTreeWalk(node.right, node_flat)\n return node_flat", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def _find_all_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping\n ) -> Union[Sequence[dict], None]:\n frontier = []\n explored = set()\n found = []\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n found.append(item)\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))\n return found", "def filetree(self) -> P:\n ...", "def get_tree_size(thread, root, top, path, docs, sizes, inodes, depth=0, maxdepth=999):\n global filecount\n global skipfilecount\n global inodecount\n global dircount\n global skipdircount\n global total_doc_count\n global warnings\n\n size = 0\n size_du = 0\n dirs = 0\n files = 0\n f_count = 0\n d_count = 0\n f_skip_count = 0\n d_skip_count = 0\n tot_doc_count = 0\n parent_path = None\n size_norecurs = 0\n size_du_norecurs = 0\n files_norecurs = 0\n dirs_norecurs = 0\n \n # use alt scanner\n # try to get stat info for dir path\n if options.altscanner:\n try:\n d_stat = alt_scanner.stat(path)\n except RuntimeError as e:\n logmsg = '[{0}] ALT SCANNER ERROR: {1}'.format(thread, e)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION: {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n else:\n # try to get os stat info for dir path\n try:\n d_stat = os.stat(path)\n except OSError as e:\n logmsg = '[{0}] OS ERROR: {1}'.format(thread, e)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n \n # restore times (atime/mtime)\n if restore_times:\n res, err = set_times(path, d_stat.st_atime, d_stat.st_mtime)\n if not res:\n logmsg = 'OS ERROR setting file times for {0} (error {1})'.format(path, err)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n\n # scan directory\n try:\n logger.debug('[{0}] Scanning path {1}...'.format(thread, path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] Scanning path {1}...'.format(thread, path))\n for entry in os.scandir(path):\n logger.debug('[{0}] Scanning dir entry {1}...'.format(thread, entry.path))\n if options.vverbose:\n logger.info('[{0}] Scanning dir entry {1}...'.format(thread, entry.path)) \n \n if entry.is_symlink():\n logger.debug('[{0}] skipping symlink {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping symlink {1}'.format(thread, entry.path))\n pass\n elif entry.is_dir():\n d_count += 1\n if not dir_excluded(entry.path):\n dirs += 1\n dirs_norecurs += 1\n if maxdepth > 0:\n if depth < maxdepth:\n # recurse into subdir\n if not quit:\n s, sdu, fc, dc = get_tree_size(thread, root, top, entry.path, docs, sizes, inodes, depth+1, maxdepth)\n size += s\n size_du += sdu\n files += fc\n dirs += dc\n else:\n logger.debug('[{0}] not descending {1}, maxdepth {2} reached'.format(\n thread, entry.path, maxdepth))\n if options.verbose or options.vverbose:\n logger.info('[{0}] not descending {1}, maxdepth {2} reached'.format(\n thread, entry.path, maxdepth))\n else:\n logger.debug('[{0}] skipping dir {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping dir {1}'.format(thread, entry.path))\n d_skip_count += 1\n else:\n f_count += 1\n if not file_excluded(entry.name):\n f_stat = entry.stat()\n # restore times (atime/mtime)\n if restore_times and not options.altscanner:\n ret = set_times(entry.path, f_stat.st_atime, f_stat.st_mtime)\n if not ret:\n with crawl_thread_lock:\n warnings += 1\n\n fsize = f_stat.st_size\n # calculate allocated file size (du size)\n if IS_WIN:\n fsize_du = fsize\n elif options.altscanner:\n fsize_du = f_stat.st_sizedu\n else:\n fsize_du = f_stat.st_blocks * blocksize\n # set fsize_du to 0 if inode in inodes list (hardlink)\n if f_stat.st_ino in inodes:\n fsize_du = 0\n # add inode to inodes list if hardlink count > 1\n elif f_stat.st_nlink > 1:\n with crawl_thread_lock:\n inodes.append(f_stat.st_ino)\n fmtime_sec = time.time() - f_stat.st_mtime\n fctime_sec = time.time() - f_stat.st_ctime\n fatime_sec = time.time() - f_stat.st_atime\n\n if not exc_empty_files or (exc_empty_files and fsize > 0):\n if fsize >= minfilesize and \\\n fmtime_sec > minmtime and \\\n fmtime_sec < maxmtime and \\\n fctime_sec > minctime and \\\n fctime_sec < maxctime and \\\n fatime_sec > minatime and \\\n fatime_sec < maxatime:\n size += fsize\n size_norecurs += fsize\n size_du += fsize_du\n size_du_norecurs += fsize_du\n files += 1\n files_norecurs += 1\n # get owner and group names\n if IS_WIN:\n # for windows just set both owner and group to 0, this is what scandir returns for Windows\n # and there is no known fast way to get Windows file owner (pywin32 is slow)\n owner = f_stat.st_uid\n group = f_stat.st_gid\n else:\n owner, group = get_owner_group_names(f_stat.st_uid, f_stat.st_gid)\n \n # check for bad Unicode utf-8 characters\n try:\n if parent_path is None:\n parent_path = get_parent_path(entry.path)\n file_name = get_file_name(entry.name)\n except UnicodeError:\n if parent_path is None:\n parent_path = get_parent_path(entry.path, ignore_errors=True)\n file_name = get_file_name(entry.name, ignore_errors=True)\n logmsg = '[{0}] UNICODE WARNING {1}'.format(thread, os.path.join(parent_path, file_name))\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n # index doc dict\n data = {\n 'name': file_name,\n 'extension': os.path.splitext(entry.name)[1][1:].lower(),\n 'parent_path': parent_path,\n 'size': fsize,\n 'size_du': fsize_du,\n 'owner': owner,\n 'group': group,\n 'mtime': datetime.utcfromtimestamp(int(f_stat.st_mtime)).isoformat(),\n 'atime': datetime.utcfromtimestamp(int(f_stat.st_atime)).isoformat(),\n 'ctime': datetime.utcfromtimestamp(int(f_stat.st_ctime)).isoformat(),\n 'nlink': f_stat.st_nlink,\n 'ino': str(f_stat.st_ino),\n 'type': 'file'\n }\n\n # check if using altscanner and if any additional meta data to add to data dict\n if options.altscanner:\n try:\n extrameta_dict = alt_scanner.add_meta(entry.path, f_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # check plugins for adding extra meta data to data dict\n if plugins_enabled and plugins_files:\n for plugin in plugins:\n try:\n # check if plugin is for file doc\n if plugin.for_type('file'):\n extrameta_dict = plugin.add_meta(entry.path, f_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except (RuntimeWarning, RuntimeError) as e:\n err_message = e.args[0]\n if e.__class__ == RuntimeWarning:\n logmsg = '[{0}] PLUGIN WARNING: {1}'.format(thread, err_message)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n else:\n logmsg = '[{0}] PLUGIN ERROR: {1}'.format(thread, err_message)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n extrameta_dict = e.args[1]\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] PLUGIN EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # add file doc to docs list and upload to ES once it reaches certain size\n docs.append(data.copy())\n doc_count = len(docs)\n if doc_count >= es_chunksize:\n start_bulk_upload(thread, root, docs)\n tot_doc_count += doc_count\n docs.clear()\n\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n \n # if not excluding empty dirs is set or exclude empty dirs is set but there are files or \n # dirs in the current directory, index the dir\n if not exc_empty_dirs or (exc_empty_dirs and (files > 0 or dirs > 0)):\n # get owner and group names\n if IS_WIN:\n # for windows just set both owner and group to 0, this is what scandir returns for Windows\n # and there is no known fast way to get Windows file owner (pywin32 is slow)\n owner = d_stat.st_uid\n group = d_stat.st_gid\n else:\n owner, group = get_owner_group_names(d_stat.st_uid, d_stat.st_gid)\n \n # check for bad Unicode utf-8 characters\n try:\n file_name = get_dir_name(path)\n parent_path = get_parent_path(path)\n except UnicodeError:\n file_name = get_dir_name(path, ignore_errors=True)\n parent_path = get_parent_path(path, ignore_errors=True)\n logmsg = '[{0}] UNICODE WARNING {1}'.format(thread, os.path.join(parent_path, file_name))\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n # index doc dict\n data = {\n 'name': file_name,\n 'parent_path': parent_path,\n 'size': size,\n 'size_norecurs': size_norecurs,\n 'size_du': size_du,\n 'size_du_norecurs': size_du_norecurs,\n 'file_count': files,\n 'file_count_norecurs': files_norecurs, \n 'dir_count': dirs + 1,\n 'dir_count_norecurs': dirs_norecurs + 1,\n 'dir_depth': depth,\n 'mtime': datetime.utcfromtimestamp(int(d_stat.st_mtime)).isoformat(),\n 'atime': datetime.utcfromtimestamp(int(d_stat.st_atime)).isoformat(),\n 'ctime': datetime.utcfromtimestamp(int(d_stat.st_ctime)).isoformat(),\n 'nlink': d_stat.st_nlink,\n 'ino': str(d_stat.st_ino),\n 'owner': owner,\n 'group': group,\n 'type': 'directory'\n }\n\n # check if using altscanner and if any additional meta data to add to data dict\n if options.altscanner:\n try:\n extrameta_dict = alt_scanner.add_meta(path, d_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # check plugins for adding extra meta data to data dict\n if plugins_enabled and plugins_dirs:\n for plugin in plugins:\n # check if plugin is for directory doc\n try:\n if plugin.for_type('directory'):\n extrameta_dict = plugin.add_meta(path, d_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except (RuntimeWarning, RuntimeError) as e:\n err_message = e.args[0]\n if e.__class__ == RuntimeWarning:\n logmsg = '[{0}] PLUGIN WARNING: {1}'.format(thread, err_message)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n else:\n logmsg = '[{0}] PLUGIN ERROR: {1}'.format(thread, err_message)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n extrameta_dict = e.args[1]\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] PLUGIN EXCEPTION: {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n if depth > 0:\n # add file doc to docs list and upload to ES once it reaches certain size\n docs.append(data.copy())\n doc_count = len(docs)\n if doc_count >= es_chunksize:\n start_bulk_upload(thread, root, docs)\n tot_doc_count += doc_count\n docs.clear()\n \n else:\n with crawl_thread_lock:\n sizes[root] = data.copy()\n else:\n d_skip_count += 1\n logger.debug('[{0}] skipping empty dir {1}'.format(thread, path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping empty dir {1}'.format(thread, path))\n if dirs > 0: dirs -= 1\n\n with crawl_thread_lock:\n dircount[root] += d_count - d_skip_count\n filecount[root] += f_count - f_skip_count\n skipfilecount[root] += f_skip_count\n skipdircount[root] += d_skip_count\n total_doc_count[root] += tot_doc_count\n inodecount[root] += d_count + f_count \n\n except OSError as e:\n logmsg = '[{0}] OS ERROR: {1}'.format(thread, e)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n except RuntimeError as e:\n logmsg = '[{0}] ALT SCANNER ERROR: {1}'.format(thread, e)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n return size, size_du, files, dirs", "def traverse(self):\r\n nodes_to_visit = queue.Queue()\r\n nodes_to_visit.put(self.__rootnode)\r\n while nodes_to_visit.empty() is False:\r\n current_node = nodes_to_visit.get()\r\n yield current_node\r\n for child in current_node.children:\r\n nodes_to_visit.put(child)", "def traverseTree(mdsnode,dead_branches=False,depth=float('Nan'),current_depth=0,noisy=False,strict=False,tags=False):\n tagdict={}\n if isinstance(mdsnode,mds.tree.Tree): \n mdsnode=mdsnode.getNode(\"\\\\TOP\")\n \n name = get_mds_shortname(mdsnode) \n me = Branch(mdsnode)#put node information here if you like\n if noisy: print (\" \"*current_depth + name)\n\n #Members are data/signals, put them directly the current Node object\n #if they are arrays\n if mdsnode.getNumMembers()>0:\n leaves=mdsnode.getMembers()\n for leaf in leaves:\n leafname=get_mds_shortname(leaf)\n leafshape=get_mds_shape(leaf)\n if dead_branches or not len(leafshape) ==0:\n if noisy: print (\" \"*(current_depth+1) + leafname +\": array%s\"%str(leafshape))\n setattr(me,leafname,Leaf(leaf,strict))\n tagdict[leafname]=getattr(me,leafname)\n else:\n if noisy: print(\" \"*(current_depth+1) + leafname)\n #Children contain no immediate data, just links to more nodes. If depth is\n #not beyond limit, go down these 'branches' and add contents to the current\n #Node object\n if not depth <= current_depth and mdsnode.getNumChildren()>0:\n branches = mdsnode.getChildren()\n for b in branches:\n subname,subnode,subtags=traverseTree(b, dead_branches,depth,current_depth+1,noisy,strict)\n if len(subnode.__getDescendants__())>0:\n setattr(me,subname,subnode)\n tagdict[subname]=getattr(me,subname)\n for k,v in subtags.items(): #merge tags in\n tagdict[k]=v\n \n if current_depth==0:#we are done, returning to user\n if tags: \n for tag,obj in tagdict.items():\n setattr(me,tag,obj)\n else:\n tagbranch=Branch(mdsnode)\n for tag,obj in tagdict.items():\n setattr(tagbranch,tag,obj)\n setattr(me,'tags',tagbranch) \n return me\n return (name, me,tagdict) #else, we are still recursing back down the tree", "def walktree (self, top = \".\", depthfirst = True):\n \n names = os.listdir(top)\n if not depthfirst:\n yield top, names\n for name in names:\n try:\n st = os.lstat(os.path.join(top, name))\n except os.error:\n continue\n if stat.S_ISDIR(st.st_mode):\n for (newtop, children) in self.walktree (os.path.join(top, name), depthfirst):\n #print 'Scanning ', newtop\n yield newtop, children\n if depthfirst:\n yield top, names", "def __iter__(self):\n return self._collect(self.root, '')", "def traverse(self):\n return self.root.traverse()", "def testBinarySearchTree():\n\n \"\"\"\n Example After Deletion\n 7\n / \\\n 1 4\n\n \"\"\"\n t = BinarySearchTree()\n t.insert(8)\n t.insert(3)\n t.insert(6)\n t.insert(1)\n t.insert(10)\n t.insert(14)\n t.insert(13)\n t.insert(4)\n t.insert(7)\n\n # Prints all the elements of the list in order traversal\n print(t.__str__())\n\n if t.getNode(6) is not None:\n print(\"The label 6 exists\")\n else:\n print(\"The label 6 doesn't exist\")\n\n if t.getNode(-1) is not None:\n print(\"The label -1 exists\")\n else:\n print(\"The label -1 doesn't exist\")\n\n if not t.empty():\n print((\"Max Value: \", t.getMax().getLabel()))\n print((\"Min Value: \", t.getMin().getLabel()))\n\n t.delete(13)\n t.delete(10)\n t.delete(8)\n t.delete(3)\n t.delete(6)\n t.delete(14)\n\n # Gets all the elements of the tree In pre order\n # And it prints them\n list = t.traversalTree(InPreOrder, t.root)\n for x in list:\n print(x)", "def __call__(self, node):\n if node.children:\n if len(node.children) == 1:\n if self.TagEqual(node.children[0], node):\n #print node.ToString()\n node.tag = self.Tag(node, node.children[0]);\n lst = node.children[0].children;\n node.children = lst;", "def __iter__(self):\n yield self\n if not self.is_leaf():\n yield from self.left_subtree\n yield from self.right_subtree", "def test_tree_mode4(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_2.reparent(None)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def walk(self):\n if self.left is not None:\n yield from self.left.walk()\n yield self.item\n if self.right is not None:\n yield from self.right.walk()", "def test_children_tree(depth_one_tree):\n assert str(depth_one_tree.root.children) == str([1, 2, 3, 4])", "def test_get_children():\n builder = TreeBuilder()\n builder.create_root(1)\n builder.add_child(7)\n builder.add_child(2, move=True)\n builder.add_child(13)\n t = builder.build()\n\n assert t[0].data == 7\n assert t[1].data == 2\n assert t[1][0].data == 13", "def _auxRefreshTree(self, tree_index):\n tree_item = self.treeItem(tree_index)\n logger.debug(\"_auxRefreshTree({}): {}{}\".format(\n tree_index, tree_item.obj_path,\n \"*\" if tree_item.children_fetched else \"\"))\n\n if tree_item.children_fetched:\n\n old_items = tree_item.child_items\n new_items = self._fetchObjectChildren(tree_item.obj,\n tree_item.obj_path)\n\n old_item_names = [(item.obj_name,\n item.is_attribute) for item in old_items]\n new_item_names = [(item.obj_name,\n item.is_attribute) for item in new_items]\n seqMatcher = SequenceMatcher(isjunk=None, a=old_item_names,\n b=new_item_names,\n autojunk=False)\n opcodes = seqMatcher.get_opcodes()\n\n logger.debug(\"(reversed) \"\n \"opcodes: {}\".format(list(reversed(opcodes))))\n\n for tag, i1, i2, j1, j2 in reversed(opcodes):\n\n if 1 or tag != 'equal':\n logger.debug(\" {:7s}, a[{}:{}] ({}), b[{}:{}] ({})\"\n .format(tag, i1, i2,\n old_item_names[i1:i2], j1, j2,\n new_item_names[j1:j2]))\n\n if tag == 'equal':\n # Only when node names are equal is _auxRefreshTree\n # called recursively.\n assert i2-i1 == j2-j1, (\"equal sanity \"\n \"check failed \"\n \"{} != {}\".format(i2-i1, j2-j1))\n for old_row, new_row in zip(range(i1, i2), range(j1, j2)):\n old_items[old_row].obj = new_items[new_row].obj\n child_index = self.index(old_row, 0, parent=tree_index)\n self._auxRefreshTree(child_index)\n\n elif tag == 'replace':\n # Explicitly remove the old item and insert the new.\n # The old item may have child nodes which indices must be\n # removed by Qt, otherwise it crashes.\n assert i2-i1 == j2-j1, (\"replace sanity \"\n \"check failed \"\n \"{} != {}\").format(i2-i1, j2-j1)\n\n # row number of first removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" calling \"\n \"beginInsertRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n\n elif tag == 'delete':\n assert j1 == j2, (\"delete\"\n \" sanity check \"\n \"failed. {} != {}\".format(j1, j2))\n # row number of first that will be removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n elif tag == 'insert':\n assert i1 == i2, (\"insert \"\n \"sanity check \"\n \"failed. {} != {}\".format(i1, i2))\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" \"\n \"calling beginInsertRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n else:\n raise ValueError(\"Invalid tag: {}\".format(tag))", "def __iter__(self):\n\n yield from self._traverse_forward(self.root)", "def dfs_walk(node: ast.AST) -> Iterator[ast.AST]:\n stack = [node]\n while stack:\n node = stack.pop()\n stack.extend(reversed(list(ast.iter_child_nodes(node))))\n yield node", "def __getitem__(self, i: int) -> 'Tree':\n ...", "def walktree(classes, children, parent):\r\n results = []\r\n classes.sort(key=attrgetter('__module__', '__name__'))\r\n for c in classes:\r\n results.append((c, c.__bases__))\r\n if c in children:\r\n results.append(walktree(children[c], children, c))\r\n return results", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def __iter__(self):\n if self.root:\n return self.root.inorder()", "def _preorder_traverse_to_list_helper(self, node, depth):\n\t\t#visit node\n\t\tl = []\n\t\tif (node):\n\t\t\tl.append(node.value())\n\t\telse:\n\t\t\tl.append(None)\n\n\t\t#anon function for this thing\n\t\tfakechild = lambda:self._preorder_traverse_to_list_helper(None, depth + 1)\n\n\t\t#call on children\n\t\tif (node):\n\t\t\tif (node.lchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.lchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (lchild)\n\t\t\t\t\tl += fakechild()\n\t\t\tif (node.rchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.rchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (rchild)\n\t\t\t\t\tl += fakechild()\n\t\telse:\n\t\t\tif (depth < self._depth):\n\t\t\t\t#recurse with None for empty children (lchild) and (rchild)\n\t\t\t\t#l += fakechild() #need to call twice?\n\t\t\t\tl += fakechild()\n\t\treturn l", "def recursively_compare_tree_against_html(self, func):\n def inner(obj, node):\n # invoke comparator function\n func(obj=obj, node=node)\n\n # filter\n child_nodes = self.get_children_of_node(node)\n\n # same number of object children and html child nodes\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # loop over children and call recursive compare on them\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n inner(obj=child_obj, node=child_node)\n\n # call inner() with root elements\n inner(obj=self.document.root, node=self.soup.body)", "def _initialize_trees(self):", "def _traverse_node_tree(self, cur_node, search_node_list):\n for _, sub_node in cur_node.get_children():\n sub_nodes = []\n self._traverse_node_tree(sub_node, sub_nodes)\n sub_node_dict = {\n 'name': sub_node.node_name,\n 'type': sub_node.node_type,\n 'is_dynamic_shape_node': sub_node.is_dynamic_shape_node,\n 'nodes': sub_nodes\n }\n search_node_list.append(sub_node_dict)", "def __next__(self):\r\n self.pointer += 1\r\n if self.pointer > self.root.size_tree:\r\n raise StopIteration\r\n\r\n return self.select(self.pointer)", "def _traverse_in_order_recursive(self, node, visit):\n # Traverse left subtree, if it exists\n if node is not None:\n self._traverse_in_order_recursive(node.left_child, visit)\n # Visit this node's data with given function\n visit(node.data)\n # Traverse right subtree, if it exists\n self._traverse_in_order_recursive(node.right_child, visit)", "def preorder_visit(t: Tree, act: Callable[[Tree], Any]) -> None:\n act(t)\n for child in t.children:\n preorder_visit(child, act)", "def _get_tree(root: spacy.tokens.Token, depth: int, token_filter: types.FunctionType) -> [spacy.tokens.Token]:\n if depth == 0:\n return [root] if token_filter(root) else []\n\n result = []\n # for tokens on the left of the root, whose head is root\n for child in filter(token_filter, root.lefts):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n result.append(root)\n # for tokens on the right of the root, whose head is root\n for child in filter(token_filter, root.rights):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n return result", "def ctxTraverse(*args, down: bool=True, left: bool=True, right: bool=True, up: bool=True,\n **kwargs)->None:\n pass", "def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whether tree is rooted\n root = node if pos == 'root' else node.parent if pos == 'basal' else None\n rooted = None if pos == 'derived' else (\n True if len(root.children) == 2 else False)\n\n if rooted:\n if pos == 'root':\n raise ValueError('Cannot walk from root of a rooted tree.')\n elif pos == 'basal':\n sibling = [x for x in node.siblings()][0]\n\n # direction of walking\n move = (('bottom' if src is sibling else 'top' if src in children\n else 'n/a') if rooted and pos == 'basal'\n else ('down' if src is parent else 'up' if src in children\n else 'n/a'))\n if move == 'n/a':\n raise ValueError('Source and node are not neighbors.')\n\n # create a new node\n res = TreeNode(node.name)\n\n # determine length of the new node\n res.length = (node.length if move == 'down'\n else src.length + node.length if move == 'bottom'\n else src.length) # up or top\n\n # determine support of the new node\n res.support = (node.support if move in ('down', 'bottom')\n else src.support)\n\n # append children except for src (if applies)\n res.extend([walk_copy(c, node) for c in children if c is not src])\n\n # append parent if walking up (except at root)\n if move == 'up' and pos != 'root':\n res.append(walk_copy(parent, node))\n\n # append sibling if walking from one basal node to another\n if move == 'top':\n res.append(walk_copy(sibling, node))\n\n return res", "def traverse_breadth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extend(node.children)", "def UCT(rootstate, itermax, verbose=False):\n\n rootnode = Node(state=rootstate)\n\n for i in range(itermax):\n node = rootnode\n state = rootstate.Clone()\n\n # Select\n while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal\n node = node.UCTSelectChild()\n state.DoMove(node.move)\n\n # Expand\n expand = True\n while expand and node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)\n m = random.choice(node.untriedMoves)\n # print(\"[Expand] Untried move %s, %s, %s\" % (m[0], m[1], m[2]))\n expand = not state.DoMove(m)\n node = node.AddChild(m, state) # add child and descend tree\n\n # Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function\n while state.GetMoves() != []: # while state is non-terminal\n state.DoMove(random.choice(state.GetMoves()))\n\n # Backpropagate\n while node != None: # backpropagate from the expanded node and work back to the root node\n node.Update(state.GetResult()) # state is terminal. Update node with result from POV of node.playerJustMoved\n node = node.parentNode\n\n # Output some information about the tree - can be omitted\n if (verbose):\n print(rootnode.TreeToString(0))\n else:\n print(rootnode.ChildrenToString())\n\n return sorted(rootnode.childNodes, key=lambda c: c.visits)[-1].move # return the move that was most visited", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def walk_tree(visitor, data_structure):\n if isinstance(data_structure, dict):\n for key in data_structure.keys():\n data_structure[key] = walk_tree(visitor, data_structure[key])\n elif isinstance(data_structure, list):\n for i in xrange(len(data_structure)):\n data_structure[i] = walk_tree(visitor, data_structure[i])\n else:\n data_structure = visitor(data_structure)\n return data_structure", "def testInsertDeep(self):\n\n #insert\n for i in xrange(randint(50, 180)):\n self.s.insert(randint(-2147483648,2147483647), i)\n\n #walk through the tree\n self.assertIsNotNone(self.s._root)\n self.assertIsNone(self.s._root.parent)\n self.assertIsNotNone(self.s._root.left)\n self.assertIsNotNone(self.s._root.right)\n\n def traversalHelper(n):\n if not n:\n return\n self.assertTrue((n.parent.left is n) or (n.parent.right is n))\n traversalHelper(n.left)\n traversalHelper(n.right)\n\n traversalHelper(self.s._root.left)\n traversalHelper(self.s._root.right)", "def _traverse(self, word):\n node = self.root\n for i in (ord(x)-97 for x in word):\n if not node.data[i]: return None\n node = node.data[i]\n return node", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def traverse_tree(pid,nodes):\n\n for child in get_children(pid):\n nodes.update(traverse_tree(child,nodes))\n nodes.add(pid)\n\n return nodes", "def _walk(self, element):\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n for sublist_element in sublist:\n for recursive_elem in self._walk(sublist_element):\n yield recursive_elem", "def by_level_traversal(self) -> Queue:\n # initialize Queue objects\n new_q = Queue()\n last_q = Queue()\n\n #binary search tree == empty\n if self.root is None:\n return last_q\n\n #root in enque.q\n new_q.enqueue(self.root)\n\n # iterate for processing\n while not new_q.is_empty():\n working_node = new_q.dequeue()\n if working_node is not None:\n last_q.enqueue(working_node)\n new_q.enqueue(working_node.left)\n new_q.enqueue(working_node.right)\n\n return last_q", "def _traverse(node):\n all_words = []\n if node.is_leaf:\n return node.actual_word\n for key, value in node.children.items():\n curr_word = Trie._traverse(value)\n all_words = all_words + curr_word\n return all_words", "def __init__(self):\n self._root = None\n self._count = 0", "def _apply_tree_policy(self, root, state):\n visit_path = [root]\n working_state = state.clone()\n current_node = root\n while not working_state.is_terminal() and current_node.explore_count > 0:\n if not current_node.children:\n # For a new node, initialize its state, then choose a child as normal.\n legal_actions = working_state.legal_actions()\n # Reduce bias from move generation order.\n self._random_state.shuffle(legal_actions)\n player_sign = -1 if working_state.current_player() != self.player else 1\n current_node.children = [SearchNode(action, player_sign)\n for action in legal_actions]\n\n if working_state.is_chance_node():\n # For chance nodes, rollout according to chance node's probability\n # distribution\n outcomes = working_state.chance_outcomes()\n action_list, prob_list = zip(*outcomes)\n action = self._random_state.choice(action_list, p=prob_list)\n chosen_child = next(c for c in current_node.children\n if c.action == action)\n else:\n # Otherwise choose node with largest UCT value\n chosen_child = max(\n current_node.children,\n key=lambda c: c.uct_value(current_node.explore_count, self.uct_c, # pylint: disable=g-long-lambda\n self.child_default_value))\n\n working_state.apply_action(chosen_child.action)\n current_node = chosen_child\n visit_path.append(current_node)\n\n return visit_path, working_state", "def walk(folder: str, filesystem: Filesystem, branch: str = 'all',\n leaf: str = 'all') -> Iterator[Tuple[str, str, str]]:\n for current_branch in filesystem.list_folders(folder):\n if branch not in ('all', current_branch):\n continue\n\n branch_folder = filesystem.join(folder, current_branch)\n for current_leaf in filesystem.list_folders(branch_folder):\n\n if leaf not in ('all', current_leaf):\n continue\n\n leaf_folder = filesystem.join(branch_folder, current_leaf)\n\n yield current_branch, current_leaf, leaf_folder", "def traverse(name, furtherPath):", "def walk_tree(top_most_path, callback):\n for file in os.listdir(top_most_path):\n pathname = os.path.join(top_most_path, file)\n mode = os.stat(pathname)[ST_MODE]\n if S_ISDIR(mode):\n # It's a directory, recurse into it\n walk_tree(pathname, callback)\n elif S_ISREG(mode):\n # It's a file, call the callback function\n callback(pathname)\n else:\n # Unknown file type, print a message\n print(\"Skipping %s\" % pathname)", "def walk_tree(self, path, topdown=True):\n if isinstance(path, File):\n # Called with File object as an argument\n root = path\n path = root.path\n else:\n root = File(path)\n\n files, dirs = [], []\n\n try:\n for item in os.listdir(path):\n file_path = os.path.join(path, item)\n\n if self.path_ignore and self.path_ignore.match(file_path):\n # Skip excluded paths\n lg.debug(\"Ignoring path %s\" % file_path)\n continue\n\n try:\n f_object = File(file_path, seen=root.already_seen)\n except UnsupportedFileType as e:\n lg.warn('%s ..skipping' % e)\n continue\n except OSError as e:\n if e.errno == errno.ENOENT:\n # File already removed, go on\n lg.debug('File already removed: %s' % e)\n continue\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n continue\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to continue\n lg.exception(e)\n continue\n\n if f_object.directory is True:\n dirs.append(f_object)\n else:\n files.append(f_object)\n except OSError as e:\n # Exceptions that may come from os.listdir()\n if e.errno == errno.ENOENT:\n # Directory doesn't exist, go on\n pass\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n pass\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to go on\n lg.exception(e)\n pass\n\n if topdown:\n yield root, dirs, files\n\n for item in dirs:\n for x in self.walk_tree(item):\n yield x\n\n if not topdown:\n yield root, dirs, files", "def iter_tree(self):\n yield self\n for c in self.children:\n for ci in c.iter_tree:\n yield ci", "def test_compiler_parse_tree(compiler, patch):\n patch.object(Compiler, 'subtree')\n tree = Tree('start', [Tree('command', ['token'])])\n compiler.parse_tree(tree)\n compiler.subtree.assert_called_with(Tree('command', ['token']),\n parent=None)", "def test_iter_children():\n builder = TreeBuilder()\n builder.create_root(0)\n\n data = list(range(2, 15, 3))\n for datum in data:\n builder.add_child(datum)\n t = builder.build()\n\n for i, child in enumerate(t):\n assert child.data == data[i]", "def walk_depth_first(\n root: DOMNode,\n filter_type: type[WalkType] | None = None,\n *,\n with_root: bool = True,\n) -> Iterable[DOMNode] | Iterable[WalkType]:\n from textual.dom import DOMNode\n\n stack: list[Iterator[DOMNode]] = [iter(root.children)]\n pop = stack.pop\n push = stack.append\n check_type = filter_type or DOMNode\n\n if with_root and isinstance(root, check_type):\n yield root\n while stack:\n node = next(stack[-1], None)\n if node is None:\n pop()\n else:\n if isinstance(node, check_type):\n yield node\n if node.children:\n push(iter(node.children))", "def tree_contains(T, x):", "def _refind_nodes(self, reSearchItems, root=None, sortByDepth=False):\n\n reListOfSearchItems = list(reSearchItems)\n\n if root == None:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for x in self.ParentMap.keys() if ReParent.match(x.tag)]\n\n else:\n Out = [root]\n\n\n while len(reListOfSearchItems) > 0:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for root in Out for x in root.iter() if ReParent.match(x.tag)]\n\n if sortByDepth == False: return Out\n\n TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)\n return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]", "def walk(self): # FileObj.walk\n yield self", "def test_tree_mode3(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_and.remove_child(xp_2)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def walk(top):\r\n yield top\r\n for name in os.listdir(top):\r\n name = os.path.join(top, name)\r\n if os.path.isdir(name) and not os.path.islink(name):\r\n for dir in walk(name):\r\n yield dir" ]
[ "0.6518105", "0.6321883", "0.6208348", "0.6140773", "0.6119242", "0.60756904", "0.5974666", "0.5938595", "0.5925983", "0.5857213", "0.58487475", "0.5806037", "0.5800146", "0.5788099", "0.5784158", "0.57694286", "0.57677877", "0.57413566", "0.5722563", "0.5715842", "0.56998444", "0.56946856", "0.56785583", "0.56704086", "0.56625193", "0.5660761", "0.56557494", "0.5643834", "0.56391716", "0.56254166", "0.5619228", "0.56175005", "0.56173563", "0.5615186", "0.560805", "0.5604464", "0.5591875", "0.5577241", "0.5577241", "0.55699235", "0.5566492", "0.5565722", "0.5553594", "0.554557", "0.5541481", "0.5538995", "0.5537276", "0.5536281", "0.5534386", "0.55248266", "0.55005664", "0.5472308", "0.5471461", "0.5466062", "0.546475", "0.5464711", "0.54615206", "0.5459285", "0.5453234", "0.5435923", "0.54339576", "0.54314965", "0.5431477", "0.54310036", "0.54265416", "0.54234403", "0.54199576", "0.54057497", "0.54050744", "0.5403995", "0.538918", "0.53874445", "0.5385987", "0.5381353", "0.537934", "0.53751874", "0.53739643", "0.53732264", "0.5358195", "0.5357889", "0.53518844", "0.53512454", "0.5349099", "0.5345819", "0.53404653", "0.5333872", "0.5332093", "0.5328094", "0.5327285", "0.53265077", "0.5322971", "0.532154", "0.5310718", "0.530949", "0.5306167", "0.52955925", "0.5294931", "0.52938247", "0.5292023", "0.52851856", "0.52778065" ]
0.0
-1
Return data (tuple of classes, params) for a given host.
def get_host_data(hostname, gettype='walk'): filteredNodes = Node.objects.filter(hostname=hostname) if (filteredNodes.count() == 1): node = filteredNodes[0] exclusions = get_exclusions(node) if gettype == 'work': (classes, params) = work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'optwork': (classes, params) = optimized_work_tree(node, exclusions=exclusions) return (classes, params) elif gettype == 'classwork': (classes, params) = work_tree2(node, exclusions=exclusions) return (classes, params) elif gettype == 'walk': (classes, params) = walk_tree(node, exclusions=exclusions) return (classes, params) else: return ({}, {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_host_data(self):\n\n raise NotImplementedError", "def get_host_variables(self, host):\n vars = {}\n for i in self.parsers:\n vars.update(i.get_host_variables(host))\n return vars", "def loadAllHostinfo():\n hidata={}\n str=\"\"\n keytypes=loadHostinfoKeys()\n keylist=sorted(hostinfo.keys())\n keylist.remove('hostname')\n for k in keylist:\n \tstr+=\" -p %s \" % k\n f=os.popen('/app/hostinfo/bin/hostinfo --noheader --csv %s' % str)\n data=f.read()\n f.close()\n strfd=cStringIO.StringIO(data)\n reader=csv.reader(strfd)\n\n for line in reader:\n \thost=line.pop(0)\n\thidata[host]={}\n\tfor key in keylist:\n\t data=line.pop(0)\n\t if not data:\n\t \tcontinue\n\t if keytypes[key]=='list':\n\t\thidata[host][key]=data.split(',')\n\t else:\n\t\thidata[host][key]=data\n\n return hidata,keytypes", "def get_host_data_fields(self):\n\n raise NotImplementedError", "def host_info(self, host):\n\n endpoint = '/Domain/Host/Info'\n\n params = {\n 'Host' : host,\n }\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response", "def getConfigs(self, host):\n raise \"not implemented\"", "def stats_get(self, host):\n\n s = self.get_stats(host, 'get')\n\n data = {\n 'missing_total': s['missing_total'],\n 'exists_total': s['exists_total'],\n 'current': s['current'],\n 'total': s['total']\n }\n\n return data", "def host(self, host):\n if host in self.hosts_:\n vals = defaultdict(list)\n for k, value in [(x.key.lower(), x.value) for x in self.lines_\n if x.host == host and x.key.lower() != \"host\"]:\n vals[k].append(value)\n flatten = lambda x: x[0] if len(x) == 1 else x\n return {k: flatten(v) for k, v in vals.items()}\n return {}", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)", "def get_defaultvalues(host):\n return get_obj_defaultvalues(OBJT_HOST, host)", "def get_host_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_HOST_INFO)", "def get_dataset(data_pars=None, task_type=\"train\", **kw):\n # log(data_pars)\n data_type = data_pars.get('type', 'ram')\n cols_ref = cols_ref_formodel\n\n if data_type == \"ram\":\n # cols_ref_formodel = ['cols_cross_input', 'cols_deep_input', 'cols_deep_input' ]\n ### dict colgroup ---> list of colname\n\n cols_type_received = data_pars.get('cols_model_type2', {} ) ##3 Sparse, Continuous\n\n if task_type == \"predict\":\n d = data_pars[task_type]\n Xtrain = d[\"X\"]\n Xtuple_train = get_dataset_tuple(Xtrain, cols_type_received, cols_ref)\n return Xtuple_train\n\n if task_type == \"eval\":\n d = data_pars[task_type]\n Xtrain, ytrain = d[\"X\"], d[\"y\"]\n Xtuple_train = get_dataset_tuple(Xtrain, cols_type_received, cols_ref)\n return Xtuple_train, ytrain\n\n if task_type == \"train\":\n d = data_pars[task_type]\n Xtrain, ytrain, Xtest, ytest = d[\"Xtrain\"], d[\"ytrain\"], d[\"Xtest\"], d[\"ytest\"]\n\n ### dict colgroup ---> list of df\n Xtuple_train = get_dataset_tuple(Xtrain, cols_type_received, cols_ref)\n Xtuple_test = get_dataset_tuple(Xtest, cols_type_received, cols_ref)\n log2(\"Xtuple_train\", Xtuple_train)\n\n return Xtuple_train, ytrain, Xtuple_test, ytest\n\n\n elif data_type == \"file\":\n raise Exception(f' {data_type} data_type Not implemented ')\n\n raise Exception(f' Requires Xtrain\", \"Xtest\", \"ytrain\", \"ytest\" ')", "def fetch_host_caps(self, host):\n e = host.executor()\n cmd_cpuinfo = (\n 'grep', 'vendor_id', '/proc/cpuinfo', '|',\n 'sort', '|',\n 'uniq', '|',\n 'cut', '-d:', '-f2',\n )\n with e.session() as ss:\n # Find vendor\n rc, out, err = ss.run_cmd(cmd_cpuinfo)\n vendor = out.strip()\n if rc or not vendor:\n raise CpuModelError(\"Can not resolve host's cpuinfo: %s\" % err)\n\n # List cpu models\n vds_caps = host.vds_client(cmd=\"Host.getCapabilities\")\n vds_caps = dict() if not vds_caps else vds_caps\n cpu_flags = vds_caps.get(\"cpuFlags\", \"\").split(\",\")\n models = [i for i in cpu_flags if \"model_\"in i]\n if not models:\n logger.warning(\"Can not resolve host's models: %s\", err)\n models = [\n MIN_MODEL.get(self._id_to_vendor(vendor))\n ]\n logger.warning(\n \"Setting minimal cpu model for %s: %s\", vendor, models[0])\n return {\n 'models': models,\n 'vendor': vendor,\n }", "def host(self, host):\n for p, c in self.configs_:\n if host in c.hosts_:\n return c.host(host)\n return {}", "def _get_vm_instance_data(self, services, deployment, deployed_app):\n internal_service, external_service = self._get_internal_external_services_set(\n services\n )\n\n data = [\n VmDetailsProperty(key=\"Image\", value=self._get_image(deployment)),\n VmDetailsProperty(\n key=\"Replicas\", value=self._get_replicas(deployment, deployed_app)\n ),\n VmDetailsProperty(\n key=\"Ready Replicas\", value=self._get_ready_replicas(deployment)\n ),\n VmDetailsProperty(\n key=\"Internal IP\", value=self.get_internal_ip(internal_service)\n ),\n VmDetailsProperty(\n key=\"Internal Ports\", value=self._get_service_ports(internal_service)\n ),\n VmDetailsProperty(\n key=\"External IP\", value=self.get_external_ip(external_service)\n ),\n VmDetailsProperty(\n key=\"External Ports\",\n value=self._get_external_service_ports(external_service),\n ),\n ]\n\n return data", "def dispatch_host(name, data):\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)", "def restructure_host_cpu_data(host):\n init_cpu_counts(host)\n host.sockets = len(host.nodes or [])\n host.hyperthreading = False\n host.physical_cores = 0\n if not host.cpus:\n return\n host.cpu_model = host.cpus[0].cpu_model\n cpu_list = sorted(host.cpus, key=_sort_by_coreid)\n for cpu in cpu_list:\n inode = pecan.request.dbapi.inode_get(inode_id=cpu.forinodeid)\n cpu.numa_node = inode.numa_node\n if cpu.thread == 0:\n host.physical_cores += 1\n elif cpu.thread > 0:\n host.hyperthreading = True\n function = cpu.allocated_function or get_default_function(host)\n host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))\n host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))", "def extract_device_information(self, host_dict):\n self.host_list = []\n if self.args.hostname is None:\n try:\n hosts_val = self.main_file[\"hosts\"]\n except KeyError as ex:\n self.logger.error(\n colorama.Fore.RED\n + \"\\nERROR occurred !! Hostname not given properly %s\" % str(ex),\n extra=self.log_detail,\n )\n # raise Exception(ex)\n except Exception as ex:\n self.logger.error(\n colorama.Fore.RED + \"\\nERROR occurred !! %s\" % str(ex),\n extra=self.log_detail,\n )\n # raise Exception(ex)\n else:\n # when group of devices are given, searching for include keyword in\n # hosts in main.yaml file\n self.get_hosts_list(hosts_val, host_dict)\n else:\n # login credentials are given from command line\n host_dict[\"0\"] = {\n \"device\": self.args.hostname,\n \"username\": self.args.login,\n \"passwd\": self.args.passwd,\n }\n self.host_list.append(self.args.hostname)", "def get_dataset(params):\r\n module_name, class_name = params.dataset.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def __getitem__(self, host):\n if IS_PY2:\n assert type(host) in (str, unicode), 'Wrong type for [host], should be a string [was {0}]'.format(\n type(host))\n else:\n assert type(host) is str, 'Wrong type for [host], should be a string [was {0}]'.format(type(host))\n return self._scan_result['scan'][host]", "def get_host(self, host):\n for droplet in self.do.droplets:\n if droplet[\"ip_address\"] == host:\n return {\"do_{}\".format(k): v for k, v in droplet.iteritems()}\n return {}", "def host(self, host: str, fields: str = None) -> dict:\n endpoint = f\"/api/host/{host}\" if host else \"/api/host/\"\n ret = self._request(\n endpoint=endpoint,\n params={\"fields\": fields} if fields else {},\n )\n return ret", "def multiple_device_details(\n self, hosts, config_data, pre_name, action, post_name):\n res_obj = []\n self.host_list = []\n host_dict={}\n\n first_entry = hosts[0]\n if 'include' in first_entry:\n devices_file_name = first_entry['include']\n if os.path.isfile(devices_file_name):\n lfile = devices_file_name\n else:\n lfile = os.path.join(\n expanduser(get_path(\n 'DEFAULT',\n 'test_file_path')),\n devices_file_name)\n login_file = open(lfile, 'r')\n dev_file = yaml.load(login_file)\n gp = first_entry.get('group', 'all')\n\n dgroup = [i.strip().lower() for i in gp.split(',')]\n for dgp in dev_file:\n if dgroup[0].lower() == 'all' or dgp.lower() in dgroup:\n for val in dev_file[dgp]:\n hostname = list(val)[0]\n self.log_detail = {'hostname': hostname}\n if val.get(hostname) is not None and hostname not in host_dict:\n host_dict[hostname] = deepcopy(val.get(hostname))\n self.host_list.append(hostname)\n else:\n for host in hosts:\n try:\n hostname = host['device']\n self.log_detail = {'hostname': hostname}\n except KeyError as ex:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! KeyError 'device' key not found\",\n extra=self.log_detail)\n except Exception as ex:\n self.logger.error(\n colorama.Fore.RED +\n \"ERROR!! %s\" %\n ex,\n extra=self.log_detail)\n else:\n if hostname not in host_dict:\n self.host_list.append(hostname)\n host_dict[hostname] = deepcopy(host)\n\n for (hostname, key_value) in iteritems(host_dict):\n username = key_value.get('username')\n password = key_value.get('passwd')\n key_value = self.get_values(key_value)\n t = Thread(\n target=self.connect,\n args=(\n hostname,\n username,\n password,\n pre_name,\n config_data,\n action,\n post_name),\n kwargs= key_value\n )\n t.start()\n t.join()\n if action == \"snap\":\n if not self.snap_q.empty():\n res_obj.append(self.snap_q.get())\n elif action in [\"snapcheck\", \"check\"]:\n if not self.q.empty():\n res_obj.append(self.q.get())\n else:\n res_obj.append(None)\n\n return res_obj", "def get_host_info(search_keyword, starbucks_data, city_info):\n host_data = []\n\n payload = {\n \"query_type\": \"RQBXY\",\n \"pagesize\": \"20\",\n \"pagenum\": '',\n \"qii\": \"true\",\n \"cluster_state\": \"5\",\n \"need_utd\": \"true\",\n \"utd_sceneid\": \"1000\",\n \"div\": \"PC1000\",\n \"addr_poi_merge\": \"true\",\n \"is_classify\": \"true\",\n \"zoom\": \"14\",\n \"longitude\": starbucks_data['longitude'],\n \"latitude\": starbucks_data['latitude'],\n \"range\": \"1000\",\n \"city\": city_info[1][0],\n \"keywords\": search_keyword,\n }\n\n for page_num in range(1, 3):\n payload['pagenum'] = page_num\n poi_list = request_amap_poi_info(payload, 'https://www.amap.com/place/' + starbucks_data['amap_key'])\n\n if not poi_list:\n print('request host list fail with %s' % page_num)\n continue\n\n for poi in poi_list:\n if not (poi.get('longitude', '') or poi.get('latitude', '') or starbucks_data['longitude'] or starbucks_data['latitude']):\n distance = None\n else:\n distance = geo_distance(poi.get('longitude', ''), poi.get('latitude', ''),starbucks_data['longitude'], starbucks_data['latitude'])\n\n data = {\n 'starbucks_key': starbucks_data['amap_key'],\n 'keyword': search_keyword,\n 'city': poi.get('cityname'),\n 'name': poi.get('name'),\n 'longitude': poi.get('longitude'),\n 'latitude': poi.get('latitude'),\n 'address': poi.get('address'),\n 'tel': poi.get('tel'),\n 'mean_price': '',\n 'distance': distance\n }\n domain_list = poi.get('domain_list')\n for domain in domain_list:\n if domain.get('name', '') == 'price':\n price_raw = domain.get('value', '')\n # price_raw = \"<font color='#90969a'>人均:</font><font color='#f84b57'>¥</font><font color='#f84b57'>114</font>\"\n try:\n data['mean_price'] = re.findall('<.*>人均:<.*>¥<.*>([0-9]+)</font>', price_raw)[0]\n except:\n data['mean_price'] = None\n break\n host_data.append(data)\n\n print('【%s】的【%s】的周边的【%s】菜系,第【%d】页爬取完毕' % (city_info[1], starbucks_data['name'], search_keyword, page_num))\n return host_data", "def stats_search(self, host):\n\n s = self.get_stats(host, 'search')\n\n data = {\n 'query_total': s['query_total'],\n 'fetch_time_in_millis': s['query_time_in_millis'],\n 'fetch_total': s['fetch_total'],\n 'query_time_in_millis': s['fetch_time_in_millis'],\n 'open_contexts': s['open_contexts'],\n 'fetch_current': s['fetch_current'],\n 'query_current': s['query_current']\n }\n\n return data", "def get_prepared_data(cls, ext_stations=None):\n ext_stations = ext_stations or StationDAO.get_all_with_prices()\n features = (cls.get_station_features(row) for row in ext_stations)\n classes = (cls.get_category(row) for row in ext_stations)\n return features, classes", "def _nodeinfo_endpoint(host):\n zkclient = context.GLOBAL.zk.conn\n nodeinfo_zk_path = '{}/{}'.format(z.ENDPOINTS, 'root')\n for node in zkclient.get_children(nodeinfo_zk_path):\n if 'nodeinfo' in node and host in node:\n data, _metadata = zkclient.get(\n '{}/{}'.format(nodeinfo_zk_path, node)\n )\n return data.decode().split(':')", "def getHostInfo():", "def get_services(host):\n services = query(\"$.host.'{host}'.service\", host=host)\n return services", "def get(self, host):\n return self.__locusts__[host]", "def _get_instance(self, meas_time, segment_value, last_meas_time):\n data = list()\n\n if self._process_type == 'soft_gen':\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n saved_dataset = self._jfile.get_current_stored_dataset()\n else:\n self._db.Query(\"\"\"SELECT report_data_set_instance_id\n FROM report_data_set_instance\n WHERE\n `element_id`= %s\n AND segment_value_id = %s\n AND measurement_time = %s\n LIMIT 0, 1\"\"\", (self._id, self._segment_value_id, meas_time))\n data_set_instance_id = self._db.record[0]['report_data_set_instance_id']\n saved_dataset = self._jfile.get_stored_dataset(data_set_instance_id)\n\n if saved_dataset:\n dataset = simplejson.loads(saved_dataset['instance'])\n data = self._outer_conn.parse_collected_data(dataset)\n \n else:\n if self._data['data_fetch_method'] == 'sql':\n sql = self._data['data_fetch_command']\n\n named_placeholders = list()\n last_meas_time_arg = dict()\n last_meas_time_arg['name'] = 'last_measurement_time'\n last_meas_time_arg['value'] = last_meas_time.strftime('%Y-%m-%d %H:%M:%S')\n last_meas_time_arg['type'] = 'DATE'\n named_placeholders.append(last_meas_time_arg)\n\n meas_time_arg = dict()\n meas_time_arg['name'] = 'measurement_time'\n meas_time_arg['value'] = meas_time.strftime('%Y-%m-%d %H:%M:%S')\n meas_time_arg['type'] = 'DATE'\n named_placeholders.append(meas_time_arg)\n\n # process segment data\n if segment_value:\n segment_arg = dict()\n segment_arg['value'] = ''\n segment_arg['type'] = ''\n segment_arg['name'] = self._segment['data_fetch_command_bind_parameter']\n if self._segment['partition_value_type'] == 'int':\n segment_arg['value'] = self._segment_value['value_int']\n segment_arg['type'] = 'INTEGER'\n elif self._segment['partition_value_type'] == 'varchar':\n segment_arg['value'] = self._segment_value['value_varchar']\n segment_arg['type'] = 'NVARCHAR'\n\n named_placeholders.append(segment_arg)\n\n data = self._outer_conn.query(sql, named_placeholders)\n self._json_fetched_data = self._outer_conn.get_json_result()\n\n elif self._data['data_fetch_method'] == 'web service':\n self._json_fetched_data = ''\n # if self._segment and self._segment_value:\n # if self._segment['partition_value_type'] == 'int':\n # subst = self._segment_value['value_int']\n # elif self._segment['partition_value_type'] == 'varchar':\n # subst = self._segment_value['value_varchar']\n # data_fetch_command_bind_parameter = self._segment['data_fetch_command_bind_parameter']\n # else:\n # data_fetch_command_bind_parameter = ''\n # subst = ''\n\n #data = self._outer_conn.query(last_meas_time, data_fetch_command_bind_parameter, subst, 'get_data', meas_time)\n\n self._json_fetched_data = self._outer_conn.get_json_result(meas_time)\n data = self._web_service_data[meas_time] \n \n \n return data", "def get_hostname_specificdata(host, index=None):\n connection, tablename = HomeNetwork.get_connection_info()\n query = 'SELECT * from {0} WHERE hostname=\"{1}\"'.format(\\\n tablename, host)\n output = pandas.read_sql_query(query, connection).to_json(orient='records')\n\n if index:\n for hostinfo in json.loads(output):\n if hostinfo[\"hostname\"] == host and index in hostinfo:\n return hostinfo[index]\n else:\n return json.loads(output)", "def backend_info_get(context, host):\n result = _backend_info_query(context, host)\n return result", "def fetch_host_feed(self, host, **args):\n return self.fetch(\"/url\", host=host, **args)", "def create_conn_data(device_data: Dict[str, Any]) -> Dict[str, Any]:\n result = {\n \"host\": device_data[\"host\"],\n \"username\": USERNAME,\n \"password\": PASSWORD,\n \"device_type\": PLATFORM,\n \"fast_cli\": True,\n }\n return result", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def _get_vms_on_host(self, host_ref):\n vm_data = []\n vm_ret = self._session._call_method(vutil,\n \"get_object_property\",\n host_ref,\n \"vm\")\n # if there are no VMs on the host, we don't need to look further\n if not vm_ret:\n return vm_data\n\n vm_mors = vm_ret.ManagedObjectReference\n result = self._session._call_method(vutil,\n \"get_properties_for_a_collection_of_objects\",\n \"VirtualMachine\", vm_mors,\n [\"config.instanceUuid\", \"runtime.powerState\",\n \"config.hardware.memoryMB\", \"config.managedBy\"])\n with vutil.WithRetrieval(self._session.vim, result) as objects:\n for obj in objects:\n vm_props = propset_dict(obj.propSet)\n # sometimes, the vCenter finds a file it thinks is a VM and it\n # doesn't even have a config attribute ... instead of crashing\n # with a KeyError, we assume this VM is not running and totally\n # doesn't matter as nova also will not be able to handle it\n if 'config.instanceUuid' not in vm_props:\n continue\n\n vm_data.append((\n vm_props['config.instanceUuid'],\n vm_props['config.hardware.memoryMB'],\n vm_props['runtime.powerState'],\n vm_props.get('config.managedBy'),\n vutil.get_moref_value(obj.obj)))\n return vm_data", "def get_host_config(address):\n\n db = connect_db.Pymysql()\n host_data = db.findall(\"host_ip\",where=\"ip\",val=address)\n if host_data:\n for i in host_data:\n port = i['port']\n hostname = i['hostname']\n tp_data = db.find_join(\"host_template\",\"host_ip\",where='ip',val=address)\n tp_name= tp_data['name']\n host_conf = eval(\"templates.\"+tp_name+\"()\")\n host_conf.hostname = hostname\n host_conf.port = port\n config_dict = {}\n\n config_dict['hostname'] = host_conf.hostname\n config_dict[\"port\"] = host_conf.port\n config_dict[\"status\"] = \"on\"\n for k,v in host_conf.services.items():\n config_dict[k]=[v.interval,v.plugin_name,0]\n\n return xpickle.dumps(config_dict)", "def get_host_vars(self, hostname, strict=False):\n _host = self.get_inv_host(hostname, strict=strict)\n if not _host:\n return {}\n return _host.get_vars()", "def get_connection_data() -> Dict[str, Any]:\n conn_info = {\n \"host\": os.environ[\"HOST\"],\n \"port\": os.environ[\"PORT\"]\n }\n return conn_info", "def stats_store(self, host):\n\n s = self.get_stats(host, 'store')\n\n data = {\n 'size_in_bytes': s['size_in_bytes'],\n 'throttle_time_in_millis': s['throttle_time_in_millis']\n }\n\n return data", "def cluster_health(self, host):\n\n h = self.call_to_cluster(host, '/_cluster/health')\n\n data = {\n 'number_of_nodes': h['number_of_nodes'],\n 'unassigned_shards': h['unassigned_shards'],\n 'timed_out': h['timed_out'],\n 'active_primary_shards': h['active_primary_shards'],\n 'relocating_shards': h['relocating_shards'],\n 'active_shards': h['active_shards'],\n 'initializing_shards': h['initializing_shards'],\n 'number_of_data_nodes': h['number_of_data_nodes']\n }\n\n return data", "def get_host_stats(self):\n status, data, errors, messages = self._make_get_request(CraftyAPIRoutes.HOST_STATS)\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)", "def _make_data(cls, data: 'Data_ARP') -> 'dict[str, Any]': # type: ignore[override]\n return {\n 'htype': data.htype,\n 'ptype': data.ptype,\n 'hlen': data.hlen,\n 'plen': data.plen,\n 'oper': data.oper,\n 'sha': data.sha,\n 'spa': data.spa,\n 'tha': data.tha,\n 'tpa': data.tpa,\n 'payload': cls._make_payload(data),\n }", "def build_params(clsite, dt):\n hp = GaugeParams()\n lp = GaugeParams()\n if clsite is not None:\n with get_sqlalchemy_conn(\"coop\") as conn:\n df = pd.read_sql(\n \"SELECT year, high, low from alldata WHERE \"\n \"station = %s and sday = %s\",\n conn,\n params=(clsite, f\"{dt:%m%d}\"),\n index_col=\"year\",\n )\n hp.minval = df[\"high\"].min()\n hp.maxval = df[\"high\"].max()\n hp.avgval = df[\"high\"].mean()\n hp.stddev = df[\"high\"].std()\n hp.ptiles = df[\"high\"].quantile(np.arange(0.1, 0.91, 0.1)).to_list()\n lp.maxval = df[\"low\"].max()\n lp.minval = df[\"low\"].min()\n lp.avgval = df[\"low\"].mean()\n lp.stddev = df[\"low\"].std()\n lp.ptiles = df[\"low\"].quantile(np.arange(0.1, 0.91, 0.1)).to_list()\n\n return hp, lp", "def _get_data(self):\n c = Connector(self.host, self.username, self.password)\n return c.getLanDevices()", "def load(cls, host):\n\n return cls(host)", "def data(self):\n\t\treturn vars(self)", "def gather_res(outputs, target_device, dim=0):\n out = outputs[0]\n args = {field: Gather.apply(target_device, dim, *[getattr(o, field) for o in outputs]) for field, v in out.__dict__.items() if v is not None}\n return type(out)(**args)", "def _get_net_and_params(self, xgraph: XGraph, last_layers: List[str]):\n # TODO Remove hardcoding parameter retrieval \n\n net = []\n params = {}\n last_layer_cnt = 1\n last_layer_tops = set([])\n\n for X in xgraph.get_layers():\n\n if X.name in last_layer_tops:\n last_layer_tops = last_layer_tops.union(tuple(X.tops))\n continue\n\n if 'Convolution' in X.type or 'Conv2DTranspose' in X.type:\n if not isinstance(X.data, xlayer.ConvData):\n raise ValueError(\n \"Invalid convolution data type: {}, should be \"\n \" xlayer.ConvData\".format(type(X.data)))\n # OIHW\n params[X.name + '_kernel'] = X.data.weights\n params[X.name + '_biases'] = X.data.biases\n elif 'Dense' in X.type:\n if not isinstance(X.data, xlayer.ConvData):\n raise ValueError(\n \"Invalid inner product data type: {}, should be \"\n \" xlayer.ConvData\".format(type(X.data)))\n # OIHW\n params[X.name + '_weights'] = X.data.weights\n params[X.name + '_biases'] = X.data.biases\n elif 'BatchNorm' in X.type:\n if not isinstance(X.data, xlayer.BatchData):\n raise ValueError(\n \"Invalid batchnorm data type: {}, should be\"\n \" xlayer.BatchData\".format(type(X.data)))\n # channels\n params[X.name + '_mu'] = X.data.mu\n params[X.name + '_variance'] = X.data.sigma_square\n params[X.name + '_gamma'] = X.data.gamma\n params[X.name + '_beta'] = X.data.beta\n elif 'Scale' in X.type:\n if not isinstance(X.data, xlayer.ScaleData):\n raise ValueError(\n \"Invalid scale data type: {}, should be\"\n \" xlayer.ScaleData\".format(type(X.data)))\n # channels\n params[X.name + '_gamma'] = X.data.gamma\n params[X.name + '_beta'] = X.data.beta\n elif 'BiasAdd' in X.type:\n assert X.data is not None\n params[X.name + '_bias'] = X.data[0]\n elif 'Eltwise' in X.type:\n if X.data != []:\n params[X.name + '_beta'] = X.data[0]\n\n net.append(X)\n\n if last_layers is not None and X.name in last_layers:\n if last_layer_cnt == len(last_layers):\n break\n else:\n last_layer_cnt += 1\n last_layer_tops = last_layer_tops.union(tuple(X.tops))\n\n return net, params", "def get_data():\n pass", "def __init__(self, host_info):\n # A group name to group map. It represents the device tree.\n self._groups = self._BuildGroupTrees(host_info.device_infos)\n\n # Build device indexes for faster lookup.\n self._device_serial_index = {}\n self._run_target_index = {}\n for group in six.itervalues(self._groups):\n for d in self._ListGroupDevices(group):\n self._device_serial_index[d.device_serial] = d\n self._run_target_index.setdefault(\n d.run_target.name, {})[d.device_serial] = d", "def create_cpu_hostcall(host_calls):\n\n _OutfeedHostCall.validate(host_calls)\n ret = {}\n for name, host_call in host_calls.items():\n host_fn, tensors = host_call\n if isinstance(tensors, (tuple, list)):\n ret[name] = host_fn(*tensors)\n else:\n # Must be dict.\n try:\n ret[name] = host_fn(**tensors)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n return ret", "def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test", "def extended_parse(self):\n\t\t## Do the initial parsing\n\t\tself.parse()\n\n\t\t## First, cycle through the hosts, and append hostgroup information\n\t\tindex = 0\n\t\tfor host in self.data['all_host']:\n\t\t\tif host.has_key('register') and host['register'] == '0': continue\n\t\t\tif not host.has_key('host_name'): continue\n\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t## Append any hostgroups that are directly listed in the host definition\n\t\t\tif host.has_key('hostgroups'):\n\t\t\t\tfor hostgroup_name in self._get_list(host, 'hostgroups'):\n\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\t\t\t\t\tif hostgroup_name not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup_name)\n\n\t\t\t## Append any services which reference this host\n\t\t\tservice_list = []\n\t\t\tfor service in self.data['all_service']:\n\t\t\t\tif service.has_key('register') and service['register'] == '0': continue\n\t\t\t\tif not service.has_key('service_description'): continue\n\t\t\t\tif host['host_name'] in self._get_active_hosts(service):\n\t\t\t\t\tservice_list.append(service['service_description'])\n\t\t\tself.data['all_host'][index]['meta']['service_list'] = service_list\n\t\t\t\t\t\n\n\t\t\t## Increment count\n\t\t\tindex += 1\n\n\t\t## Loop through all hostgroups, appending them to their respective hosts\n\t\tfor hostgroup in self.data['all_hostgroup']:\n\n\t\t\tfor member in self._get_list(hostgroup,'members'):\n\t\t\t\tindex = 0\n\t\t\t\tfor host in self.data['all_host']:\n\t\t\t\t\tif not host.has_key('host_name'): continue\n\n\t\t\t\t\t## Skip members that do not match\n\t\t\t\t\tif host['host_name'] == member:\n\n\t\t\t\t\t\t## Create the meta var if it doesn' exist\n\t\t\t\t\t\tif not self.data['all_host'][index]['meta'].has_key('hostgroup_list'):\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'] = []\n\n\t\t\t\t\t\tif hostgroup['hostgroup_name'] not in self.data['all_host'][index]['meta']['hostgroup_list']:\n\t\t\t\t\t\t\tself.data['all_host'][index]['meta']['hostgroup_list'].append(hostgroup['hostgroup_name'])\n\n\t\t\t\t\t## Increment count\n\t\t\t\t\tindex += 1\n\n\t\t## Expand service membership\n\t\tindex = 0\n\t\tfor service in self.data['all_service']:\n\t\t\tservice_members = []\n\n\t\t\t## Find a list of hosts to negate from the final list\n\t\t\tself.data['all_service'][index]['meta']['service_members'] = self._get_active_hosts(service)\n\n\t\t\t## Increment count\n\t\t\tindex += 1", "def get_data_for_task_manager(data: dict) -> dict:\n if check_host(data['hostIp']):\n command = f\"ssh user@{data['hostIp']} -i ../id_rsa 'C:\\Setup\\{data['scriptName']}'\"\n dict_from_device = check_response_from_device(start_process_on_device(command))\n if dict_from_device[\"stringFromDevice\"] == \"correct\": \n dict_from_device[\"resultRequest\"] = True\n return dict_from_device\n return dict(resultRequest=False)", "def get_hosts_list(self, hosts_val, host_dict):\n first_entry = hosts_val[0]\n if \"include\" in first_entry: # check if hosts are group based\n devices_file_name = first_entry[\"include\"]\n if os.path.isfile(devices_file_name):\n lfile = devices_file_name\n else:\n lfile = os.path.join(\n expanduser(get_path(\"DEFAULT\", \"test_file_path\")), devices_file_name\n )\n login_file = open(lfile, \"r\")\n dev_file = yaml.load(login_file, Loader=yaml.FullLoader)\n gp = first_entry.get(\"group\", \"all\")\n\n dgroup = [i.strip().lower() for i in gp.split(\",\")]\n iter = 0 # initialize the counter from 0 to keep count of hosts\n for dgp in dev_file:\n if dgroup[0].lower() == \"all\" or dgp.lower() in dgroup:\n for val in dev_file[dgp]:\n hostname = list(val)[0]\n iter += 1\n if (\n val.get(hostname) is not None\n and hostname not in self.host_list\n ):\n self.host_list.append(hostname)\n host_dict[iter] = deepcopy(val.get(hostname))\n host_dict[iter][\"device\"] = hostname\n else:\n iter = -1 # iterator keeps count of number of hosts\n for host in hosts_val:\n iter += 1\n try:\n hostname = host[\"device\"]\n self.log_detail = {\"hostname\": hostname}\n except KeyError as ex:\n self.logger.error(\n colorama.Fore.RED + \"ERROR!! KeyError 'device' key not found\",\n extra=self.log_detail,\n )\n except Exception as ex:\n self.logger.error(\n colorama.Fore.RED + \"ERROR!! %s\" % ex, extra=self.log_detail\n )\n else:\n if hostname not in self.host_list:\n self.host_list.append(hostname)\n host_dict[iter] = deepcopy(host)", "def get_vidor_eval_data(\n scores,\n boxes,\n metadata,\n class_whitelist,\n verbose=False,\n video_idx_to_name=None,\n):\n\n out_scores = defaultdict(list)\n out_labels = defaultdict(list)\n out_boxes = defaultdict(list)\n count = 0\n for i in range(scores.shape[0]):\n video_idx = int(np.round(metadata[i][0]))\n sec = int(np.round(metadata[i][1]))\n\n video = video_idx_to_name[video_idx]\n\n key = video + \",\" + \"%04d\" % (sec)\n batch_box = boxes[i].tolist()\n # The first is batch idx.\n batch_box = [batch_box[j] for j in [0, 2, 1, 4, 3]]\n\n one_scores = scores[i].tolist()\n for cls_idx, score in enumerate(one_scores):\n if cls_idx + 1 in class_whitelist:\n out_scores[key].append(score)\n out_labels[key].append(cls_idx + 1)\n out_boxes[key].append(batch_box[1:])\n count += 1\n\n return out_boxes, out_labels, out_scores", "def qhost():\n command = '%s -xml -q' % QHOST_PATH\n result_xml = subprocess.check_output([command], env=ENV, shell=True)\n hosts_element = xml.etree.ElementTree.fromstring(result_xml)\n hosts = []\n for host_element in hosts_element:\n if host_element.get('name') == 'global':\n continue\n host = {\n 'name': host_element.get('name')\n }\n queues = {}\n for host_value in host_element:\n if host_value.tag == 'hostvalue':\n host[host_value.get('name')] = host_value.text\n elif host_value.tag == 'queue':\n queue_name = host_value.get('name')\n queue = {}\n for queue_value in host_value:\n queue[queue_value.get('name')] = queue_value.text\n queues[queue_name] = queue\n host['queues'] = queues\n hosts.append(host)\n return hosts", "def get_device_param(host_id):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n device_list_param = []\n device_list_param = sqlalche_obj.session.query(\n Hosts.ip_address, Hosts.mac_address, Hosts.device_type_id, Hosts.config_profile_id).filter(Hosts.host_id == host_id).all()\n if device_list_param == None:\n device_list_param = []\n sqlalche_obj.sql_alchemy_db_connection_close()\n return device_list_param", "def get_host_interfaces(self, context, host_uuid):\n result = {}\n interfaces = self._get_cgtsclient().iinterface.list(host_uuid)\n for interface in interfaces:\n if interface.networktype != \"data\":\n continue\n providernets = interface.providernetworks\n result[interface.uuid] = {'uuid': interface.uuid,\n 'mtu': interface.imtu,\n 'vlans': '',\n 'network_type': interface.networktype,\n 'providernets': providernets}\n return result", "def get_hosts_info(self):\n result = []\n index = 0\n while index < self.host_numbers:\n host = self.get_generic_host_entry(index)\n result.append({\n 'ip': host['NewIPAddress'],\n 'name': host['NewHostName'],\n 'mac': host['NewMACAddress'],\n 'status': host['NewActive']})\n index += 1\n return result", "def get(self, **kwargs):\n if not any([i in kwargs for i in ('addr', 'address', 'host')]):\n raise TypeError('Expected addr, address, or host.')\n return self.dbget('host', kwargs)", "def extract_data(self):\n values = {}\n for injkey in self.data_sets.keys():\n values[injkey] = {}\n alldata = self.data_sets[injkey]\n paramkeys = alldata['params'].keys()\n for datakey in alldata.keys():\n if not datakey == 'params':\n values[injkey][datakey] = {}\n values[injkey][datakey]['metric_val'] = {}\n values[injkey][datakey]['metric_val']['vals'] = []\n for paramkey in paramkeys:\n values[injkey][datakey][paramkey] = {}\n values[injkey][datakey][paramkey]['vals'] = []\n trials = alldata[datakey]\n for trial_num in trials.keys():\n trial = trials[trial_num]\n values[injkey][datakey]['metric_val']['vals'] \\\n .append(trial['metric_val'])\n values[injkey][datakey]['metric_val']['type'] \\\n = trial['metric']\n values[injkey][datakey]['metric_val']['units'] \\\n = 'dimensionless'\n param_vals = trial['params']\n for param_name in param_vals.keys():\n val, units = self.parse_pint_string(\n pint_string=param_vals[param_name]\n )\n values[injkey][datakey][param_name]['vals'] \\\n .append(float(val))\n values[injkey][datakey][param_name]['units'] \\\n = units\n self.values = values", "def device_info(ipf: IPFabricClient, hostname: str, **api_options) -> Coroutine:\n filter_hostname = IPFabricClient.parse_filter(f\"hostname = {hostname}\")\n\n fut = asyncio.gather(\n ipf.fetch_devices(filters=filter_hostname, **api_options),\n ipf.fetch_device_parts(filters=filter_hostname, **api_options),\n fetch_device_interfaces(ipf, filters=filter_hostname, **api_options),\n fetch_device_vlans(ipf, filters=filter_hostname, **api_options),\n fetch_device_ipaddrs(ipf, filters=filter_hostname, **api_options),\n return_exceptions=True,\n )\n\n async def gather_result():\n res = await fut\n facts = res[0][0]\n return {\n \"hostname\": facts[\"hostname\"],\n \"facts\": facts,\n \"parts\": res[1],\n \"interfaces\": res[2],\n \"vlans\": res[3],\n \"ipaddrs\": res[4],\n }\n\n return gather_result()", "def get_host_config(self, hostid, **kwargs):\n return {}", "def create_host_requirement(self, host):\n required_image = host.get(\"image\") or self._get_image(host[\"os\"])\n return {\n \"name\": host[\"name\"],\n \"flavor\": self._get_flavor(host),\n \"image\": required_image,\n \"key_name\": self.config[\"keypair\"],\n \"network\": self._get_network_type(host),\n }", "def compute_node_get_by_host(context, host):\n session = get_session()\n with session.begin():\n service = session.query(models.Service).\\\n filter_by(host=host, binary=\"monitor-bmc\").first()\n node = session.query(models.ComputeNode).\\\n options(joinedload('service')).\\\n filter_by(deleted=False,service_id=service.id)\n return node.first()", "def run_serial(cls, hosts, command, user=None):\n results = {}\n for host in hosts:\n rcode, rout, rerr = cls.run(host, command, user)\n\n results[host] = (rcode, rout, rerr)\n\n return results", "def get_data(dataset):\n handle = dataset.open()\n data = dataset.get_data(handle, slice(0, dataset.num_examples))\n features = data[0]\n targets = data[1]\n dataset.close(handle)\n\n return features, targets", "def get_data(self, grp, class_type=\"NXdata\"):\n coll = [grp[name] for name in grp\n if isinstance(grp[name], h5py.Dataset) and\n self.get_attr(grp[name], \"NX_class\") == class_type]\n return coll", "def Params(cls):\n p = hyperparams.InstantiableParams(cls)\n p.Define('task_dict', None, 'dataset_name -> task params')\n p.Define('task_name', None, 'High level task name')\n p.Define('logdir', None, 'Log directory')\n p.Define('train_program', None, 'Train program params')\n p.Define('train_executions_per_eval', 1, '')\n p.Define('eval_programs', [], 'List of eval program params.')\n p.Define('num_splits_per_client', None, '')\n p.Define('dataset_names', [], 'List of all dataset names.')\n p.Define('emails', [], 'List of emails to send metrics.')\n p.Define('summary_exporter', None, 'The summary exporter Params.')\n p.Define('async_postprocess', True,\n 'whether to CPU postprocess asynchronously with TPU train')\n p.Define(\n 'checkpoint_to_load', None,\n 'If set, the program will initially load from this checkpoint, '\n 'ignoring train_dir. Typically used for oneoff decode.')\n\n # TODO(blee): Clean these up.\n p.Define('ml_perf', hyperparams.Params(), 'MlPerf configuration.')\n mlp = p.ml_perf\n mlp.Define('submission_metadata', None,\n 'A dictionary of static submission metadata')\n mlp.Define('benchmark_name', None, 'Benchmark name for compliance log.')\n mlp.Define('steps_per_epoch', None, 'Number of training steps per epoch.')\n mlp.Define('decoder_metric_name', None,\n 'Name of the decoder metric to report for compliance log.')\n mlp.Define('decoder_metric_success_threshold', None,\n 'Benchmark run must exceed this value to succeed.')\n mlp.Define('max_steps_to_train', None,\n 'Maximum number of steps to reach target accuracy')\n return p", "def get_host_info(self, args, get_all=False):\n return None", "def getAnsibleInfo(host):\n #First do a ping to get more results\n data = runAnsibleCommand(host.getID(), 'ping')\n if data[0]['status'] == 'UNREACHABLE!':\n return None\n #Get the actual data\n return runAnsibleCommand(host.getID(), 'setup')[0]['json']", "def host_list(self):\n try:\n scode, hosts = Rest.get('Host')\n except Exception as e:\n Console.error(e.message)\n return\n if len(hosts) == 0:\n print(\"No hosts exist\")\n return\n\n n = 1\n e = {}\n for host in hosts:\n d = {}\n d['Ip'] = str(host['Ip'])\n d['Name'] = str(host['Name'])\n d['Port'] = str(host['Port'])\n d['Swarmmode'] = str(host['Swarmmode'])\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Name', 'Port', 'Swarmmode'])))", "def process_param_infos(self, webhost_map):\n\n for path, param_node in self.param_infos.items():\n # find corresponding webhost_map entry by common path and\n # save it as \"cur_node\"\n cur_node = None\n for _, pages_node in webhost_map.items():\n for map_path, cur_node_tmp in pages_node.items():\n if map_path == path:\n cur_node = cur_node_tmp\n break\n if cur_node is not None:\n break\n\n # technically, cur_node should always be not 'None', b/c the path should\n # have been added by the call to 'process_crawled_urls'.\n # just in case it is 'None' skip the current param node to avoid errors\n if not cur_node is not None:\n continue\n\n # put the GET and POST parameters and cookies from the current param node\n # into the central web host map and avoid duplicates\n for ptype in (\"GET\", \"POST\", \"cookies\"):\n if ptype in param_node:\n if ptype in cur_node:\n cur_node[ptype] = list(set(list(param_node[ptype]) + cur_node[ptype]))\n else:\n cur_node[ptype] = list(set(param_node[ptype]))\n\n # unite instances of the current param node with the current web host map node\n if \"instances\" in param_node:\n # handle non-existent / empty instances node in aggregation webserver_map\n if \"instances\" not in cur_node:\n cur_node[\"instances\"] = []\n if not cur_node[\"instances\"]:\n cur_node[\"instances\"] = param_node[\"instances\"]\n continue\n\n for cur_instance in param_node[\"instances\"]:\n get_params = cur_instance.get(\"GET\", {})\n post_params = cur_instance.get(\"POST\", {})\n cookies = cur_instance.get(\"cookies\", {})\n\n # skip empty instances\n if (not get_params) and (not post_params) and (not cookies):\n continue\n if ((not any(val for val in get_params.values())) and\n (not any(val for val in post_params.values())) and\n (not any(val for val in cookies.values()))):\n continue\n\n # only add the current instance, if it is not a duplicate\n if not any((inst.get(\"GET\", {}) == get_params and\n inst.get(\"POST\", {}) == post_params and\n inst.get(\"cookies\", {}) == cookies)\n for inst in cur_node[\"instances\"]):\n cur_node[\"instances\"].append(cur_instance)", "def run(cls, host, command, user=None):\n '''\n if isinstance(hosts, str):\n ssh = cls._get_ssh_connection(hosts, user)\n\n\n results = {}\n for host in hosts:\n ssh = cls._get_ssh_connection(host, user)\n results[ssh] = \"result from %s on %s\" % (command, ssh)\n '''\n if not user:\n user = cls.user\n\n ctlpersist = ''\n if cls.use_controlpersist:\n ctlpersist = \" (cp)\"\n\n # output command\n cls.log.info(\"%s@%s%s: %s\" % (user, host, ctlpersist, command))\n # run the command\n ssh = cls._get_ssh_connection(host, user)\n if not ssh:\n cls.log.error(\"ERROR: No ssh connection\")\n return None\n\n p = ssh.popen(command)\n stdout, stderr = p.communicate()\n retcode = p.returncode\n\n # output command results\n identifier = \"%s@%s\" % (user, host)\n cls._log_results(identifier, retcode, stdout, stderr)\n\n return (retcode, stdout, stderr)", "def prepare_dataset(data, record_dp_func, use_tqdm=False):\n dataset = []\n prog_type_dict = {}\n\n data_keys = data.keys()\n if use_tqdm:\n data_keys = tqdm(data_keys)\n\n for program in data_keys:\n for clazz in data[program].values():\n for method_name, method in clazz.items():\n if method.get('return', None): # if the given method returns a value\n record_dp_func(dataset, prog_type_dict, program, method_name, method['return'])\n for param_name, param_hash in method['params'].items(): # for each parameter\n record_dp_func(dataset, prog_type_dict, program, param_name, param_hash)\n\n return dataset, prog_type_dict", "def get_nodes():\n\n host = str(request.args['host'])\n days = float(request.args['days'])\n\n to_time = int(time.time())\n to_day = int(time.strftime('%Y%m%d', time.gmtime(float(to_time))))\n from_time = to_time-int(days*24*60*60)\n from_day = int(time.strftime('%Y%m%d', time.gmtime(float(from_time))))\n day_in=''\n for x in range(from_day, to_day+1):\n day_in = day_in + ',' + str(x)\n day_in=re.sub(r\"^,\", \"\", day_in)\n day_in=re.sub(r\",$\", \"\", day_in)\n query = \"SELECT * FROM metrics WHERE host='\" + str(host) + \"' and date IN (\"\n query = query + str(day_in) + \") and time>=\" + str(int(int(from_time)*1000)) + \" and time<=\"\n query = query + str(int(int(to_time)*1000)) + \" ALLOW FILTERING\"\n rows = session.execute(query);\n reply={}\n last_value={}\n for r in rows:\n if str(r.host) not in reply:\n reply[r.host]={}\n last_value[r.host]={}\n if str(r.metric) not in reply[r.host]:\n reply[r.host][r.metric]=[]\n last_value[r.host][r.metric]=int(r.value)\n continue\n real_value = (r.value-last_value[r.host][r.metric])/60\n\tlast_value[r.host][r.metric]=int(r.value)\n reply[str(r.host)][r.metric].append({ 'value': int(real_value),\n 'time': str(r.time) })\n return json.dumps(reply)", "def get_bt_smarthub_data(self):\n import btsmarthub_devicelist\n\n data = btsmarthub_devicelist.get_devicelist(router_ip=self.host, only_active_devices=True)\n devices = {}\n for device in data:\n try:\n devices[device['UserHostName']] = {\n 'ip': device['IPAddress'],\n 'mac': device['PhysAddress'],\n 'host': device['UserHostName'],\n 'status': device['Active']\n }\n except (KeyError, 'no'):\n pass\n return devices", "def get_data(self):", "def create(cls, host, **kwargs):\n\n new = cls.default_create(host)\n for key, value in kwargs.items():\n setattr(new, key, value)\n\n return new", "def get_host_stats(self, refresh=False):", "def find_host_initiators_data(module, system, host, initiator_type):\n request = 'initiators?page=1&page_size=1000&host_id={0}'.format(host.id)\n #print(\"\\nrequest:\", request, \"initiator_type:\", initiator_type)\n get_initiators_result = system.api.get(request, check_version=False)\n result_code = get_initiators_result.status_code\n if result_code != 200:\n msg = 'get initiators REST call failed. code: {0}'.format(result_code)\n module.fail_json(msg=msg)\n\n # Only return initiators of the desired type.\n host_initiators_by_type = [initiator for initiator in get_initiators_result.get_result() \\\n if initiator['type'] == initiator_type]\n\n\n #print(\"host_initiators_by_type:\", host_initiators_by_type)\n #print()\n\n # Only include certain keys in the returned initiators\n if initiator_type == 'FC':\n include_key_list = ['address', 'address_long', 'host_id', 'port_key', 'targets', 'type']\n elif initiator_type == 'ISCSI':\n include_key_list = ['address', 'host_id', 'port_key', 'targets', 'type']\n else:\n msg = 'Cannot search for host initiator types other than FC and ISCSI'\n module.fail_json(msg=msg)\n host_initiators_by_type = edit_initiator_keys(host_initiators_by_type, include_key_list)\n\n return host_initiators_by_type", "def _get_host_properties(pulp_version):\n if pulp_version < Version('3'):\n return _get_v2_host_properties(pulp_version)\n return _get_v3_host_properties(pulp_version)", "def data(self, **kw):\n return dict(params=kw)", "def data(self, **kw):\n return dict(params=kw)", "def _get_td_bu_predictions_by_year(year_data, hosts,\n parasites, splitby, para_min, host_min):\n\n td_bu_predictions = {}\n year = year_data.year.unique()[0]\n\n for host in hosts:\n\n logging.info(\"Year {0}: Beginning host {1}\".format(year, host))\n td_bu_predictions[host] = {}\n\n sub_data = year_data[year_data.speciescode == host]\n\n for parasite in parasites:\n\n logging.info(\"Year {0}: Beginning parasite {1}\".format(year, parasite))\n\n # Get finite top-down model predictions\n top_down_pred_finite = \\\n agg.get_model_predictions_from_data(sub_data,\n parasite, splitby, para_min=para_min,\n host_min=host_min, model=\"top-down\", output=False,\n finite=True, heterogeneity=True)\n\n # Get infinite top-down model predictions\n top_down_pred_infinite = \\\n agg.get_model_predictions_from_data(sub_data,\n parasite, splitby, para_min=para_min,\n host_min=host_min, model=\"top-down\", output=False,\n finite=False, heterogeneity=True)\n\n\n td_bu_predictions[host][parasite] = {}\n\n # Save all results for each site in a pandas dataframe\n for site in top_down_pred_finite.viewkeys():\n\n td_bu_predictions[host][parasite][site] = \\\n pd.DataFrame(zip(\n top_down_pred_finite[site][2],\n top_down_pred_finite[site][1],\n top_down_pred_infinite[site][1],\n np.repeat(top_down_pred_finite[site][0], len(top_down_pred_finite[site][1])),\n np.repeat(top_down_pred_infinite[site][0], len(top_down_pred_finite[site][1]))),\n columns = ['observed',\n 'finite_nbd',\n 'nbd',\n 'finite_nbd_k',\n 'nbd_k'])\n\n\n logging.info(\"Completed year {0}\".format(year))\n return (year, td_bu_predictions)", "def _get_v3_host_properties(pulp_version):\n hostname = _get_hostname()\n api_role = _get_api_role(pulp_version)\n shell_role = _get_shell_role(hostname)\n return {\n 'hostname': hostname,\n 'roles': {\n 'api': api_role,\n 'pulp resource manager': {},\n 'pulp workers': {},\n 'redis': {},\n 'shell': shell_role,\n }\n }", "def get(self, host_name): # noqa\n\n response = get_host_membership(host_name)\n\n return response.__dict__, self.state_to_http[response.status]", "def _get_data(self):\n raise NotImplementedError()", "def metadata_get(node):\n\n metadata = dict()\n\n # get parameters common to all hosting providers or platforms\n params = ['hostname', 'domain', 'provider', 'role', 'repo']\n for item in params:\n metadata[item] = hiera_get('metadata:{0}'.format(item), 'fqdn={0}'.format(node))\n # logging.debug('metadata_get {0:<10} {1}'.format(item, metadata[item]))\n\n # build fqdn from hieradata\n metadata['fqdn'] = '{0}.{1}'.format(metadata['hostname'], metadata['domain'])\n\n # get parameters unique to a particular provider or platform\n if metadata['provider'] == 'aws':\n params = ['subnet', 'secgroup', 'keypair', 'ami', 'type', 'region']\n for item in params:\n metadata[item] = hiera_get('metadata:aws:{0}'.format(item), 'fqdn={0}'.format(node))\n # logging.debug('metadata_get {0:<10} {1}'.format(item, metadata[item]))\n\n return metadata", "def get_ports_services(host):\n services_per_host =[]\n for h in host:\n services = h.findAll(\"service\")\n for service in services:\n port_service = check_if_unicode(service['name'])\n # print port_service\n services_per_host.append(port_service)\n return services_per_host", "def collect_compute_info(self, ctxt, host_id, host_info):\n cctxt = self.client.prepare(server=DEFAULT_SERVER, timeout=RPC_TIMEOUT)\n cctxt.cast(ctxt, \"collect_compute_info\", host_id=host_id, host_info=host_info)", "def load_droplet_variables_for_host(self):\n host = int(self.args.host)\n droplet = self.manager.show_droplet(host)\n info = self.do_namespace(droplet)\n return {'droplet': info}", "def get_data(self):\n pass", "def get_data(self):\n pass", "def _basic_data_info(X, y):\n num_samples, num_feats = X.shape # start with X properties\n\n # Compute distribution\n classes, counts, percs = _class_distribution(y)\n num_classes = classes.size\n\n # Return data info dictionary\n output_dic = {\n \"Num_samples\": num_samples,\n \"Num_feats\": num_feats,\n \"Num_classes\": num_classes,\n \"classes\": classes,\n \"counts\": counts,\n \"percs\": percs\n }\n\n return output_dic", "def data(self) -> dict[str, Any]:\n raise NotImplementedError()", "def connect_with_host_data(self, host: Host):\n host_obj = self.content.load_host(host.instanceId)\n\n if host_obj.connectionString:\n print_light_grey('Found host data, trying to connect...')\n\n # Has a bounce host.\n if host_obj.connectionString.bounce_host:\n bounce_host = DiscoverHost(self.account_obj, bounce=True).get_bounce()\n\n if not DoConnectAndSave(host_obj, self.account_obj).bounce_regular_connect(bounce_host):\n sys.exit(0)\n else:\n if not DoConnectAndSave(host_obj, self.account_obj).regular_connect():\n sys.exit(0)\n\n print_orange('Found host data is obsolete, trying to find a new path...')\n\n raise HostNotFound" ]
[ "0.6268357", "0.5708121", "0.5704203", "0.5604825", "0.5533057", "0.5413477", "0.54038036", "0.53823394", "0.52971464", "0.5290275", "0.5289944", "0.52613753", "0.52593875", "0.5238726", "0.52148", "0.5184951", "0.5168947", "0.51548314", "0.51526666", "0.5109809", "0.5093862", "0.50382835", "0.5014954", "0.49998134", "0.4991427", "0.4991188", "0.49762976", "0.49720556", "0.4968718", "0.4963464", "0.49552602", "0.49387574", "0.49172834", "0.48960122", "0.48784944", "0.48736545", "0.4859125", "0.48582697", "0.4837807", "0.48333153", "0.48228517", "0.48140988", "0.48103395", "0.4787923", "0.4782888", "0.47547334", "0.4753311", "0.47474074", "0.4741776", "0.47413686", "0.4731514", "0.47256243", "0.4724915", "0.47243002", "0.47221804", "0.47174957", "0.46920347", "0.4690266", "0.46827275", "0.46793762", "0.46789318", "0.4678524", "0.46767148", "0.46684432", "0.46668187", "0.46546882", "0.46536943", "0.46514976", "0.4640548", "0.4638072", "0.46357217", "0.46338156", "0.46336165", "0.46296164", "0.46215075", "0.4615896", "0.46067795", "0.46048647", "0.46039802", "0.46005985", "0.45934492", "0.45917946", "0.4591078", "0.45862782", "0.45775995", "0.4574587", "0.4574587", "0.457296", "0.45709887", "0.45705736", "0.45495185", "0.45471954", "0.4542983", "0.45423418", "0.45363316", "0.45357147", "0.45357147", "0.45334002", "0.45318386", "0.45306256" ]
0.652414
0
Adds a node entry definition if there is no lower depth definition. Raises RuntimeError if the depth matches.
def add_entry(self, key, value, depth): current = self.entries.get(key, None) if current is None or current.depth > depth: self.entries[key] = NodeEntry(key, value, depth) elif current.depth == depth: raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_entry(self, entry): # Hashmap.add_entry\n\n if entry.hexdigest in self.contentHash:\n self.contentHash[entry.hexdigest].append(entry)\n else:\n self.contentHash[entry.hexdigest] = [ entry ]\n\n if entry.depth < self.minDepth:\n self.minDepth = entry.depth", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def add_node(self, node):\r\n self.undeclared_nodes.append(node)", "def init_recursion_depth_entry(self):\n vcmd = (self.frame.register(self.validate_integer), '%P')\n # input validation clarification\n # https://stackoverflow.com/questions/4140437/interactively-validating-entry-widget-content-in-tkinter\n self.entries[\"ent_recursion_depth\"] = Entry(\n self.frame, width=2,\n validate='key', validatecommand=vcmd)\n self.labels[\"lbl_recursion_depth\"] = Label(\n self.frame, text=\"Recursion Depth (int)\")\n self.entries[\"ent_recursion_depth\"].grid(\n row=0, column=1, sticky=W, pady=(30, 0))\n self.labels[\"lbl_recursion_depth\"].grid(\n row=0, column=0, sticky=W, pady=(30, 0))", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError(\"root exists\")\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def _add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._root = self._Node(e)\n self._size = 1\n return self._root", "def _add(self, root, element, currentDepth):\n # When adding an element from the actual node, all elements less important\n # than the actual node are ALWAYS in the right branch, but the most importants\n # are on the left branch\n if root.data < element:\n if root.left == None:\n root.left = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.left\n else:\n # print \"Going to left branch at depth\", currentDepth\n return self._add(root.left, element, currentDepth + 1)\n else:\n if root.right == None:\n # print \"Adding new right leave\", element\n root.right = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.right\n else:\n # print \"Going to right branch at depth\", currentDepth\n return self._add(root.right, element, currentDepth + 1)", "def add_root(self, e):\n if self._root is not None:\n raise ValueError('Root exists')\n self._size = 1\n self._root = self._Node(e)\n return self._make_position(self._root)", "def add(self):\r\n value = int(self.value_entry.get())\r\n self.value_entry.delete(0, tk.END)\r\n self.value_entry.focus_force()\r\n\r\n self.root.add_node(value)\r\n self.draw_tree()", "def add_node(self, name):\n if not name in self._main_dictionary:\n self._main_dictionary[name] = set()", "def add_line_info(root_node):\n class AddLineNumbers(BottomUpVisitor):\n def __init__(self):\n BottomUpVisitor.__init__(self, strict_line_order=True, make_unique=True)\n def visit_one_node(self, node, lineno=None):\n# print(node, lineno, getattr(node, 'lineno', None))\n if not hasattr(node, 'lineno'):\n node.lineno = lineno\n else:\n if node.lineno != lineno:\n print(node, lineno, node.lineno)\n print(astor.dump(root_node))\n assert False\n BottomUpVisitor.visit_one_node(self, node, lineno)\n AddLineNumbers().visit(root_node)", "def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError", "def add_edge(self, parent, child):\r\n if child not in self.undeclared_nodes:\r\n raise LookupError(\"Node does not exist in undeclared nodes\")\r\n tree_node_parent = self.find_node(parent)\r\n tree_node_child = TreeNode(child)\r\n tree_node_child.parent = tree_node_parent\r\n tree_node_parent.children.append(tree_node_child)\r\n self.undeclared_nodes.remove(child)", "def test_tree_two_nodes_left_has_depth_one(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def add_node(self, node: Node) -> None:\n with scandir(node.path) as it:\n for entry in it:\n if entry.name.startswith('.') or entry.name.startswith('__'):\n continue\n if entry.is_dir():\n if len(node.children) > 50:\n pass\n else:\n node.children.append(Node(node, entry))\n else:\n node.files.append(entry)\n for child in node.children:\n self.add_node(child)\n if child.depth > self.depth:\n self.depth = child.depth", "def set_recursion_depth_entry(self, recursion_depth):\n self.entries[\"ent_recursion_depth\"].delete(0, END)\n self.entries[\"ent_recursion_depth\"].insert(\n 0, str(recursion_depth))", "def add_node(self, val):\n if val not in self:\n self.setdefault(val, {})", "def definition(self):\n\n if getattr(self, \"_definition_guard\", False):\n raise NodeDefinitionError(\"node definition has a circular dependency\")\n\n if not getattr(self, \"_traits_initialized_guard\", False):\n raise NodeDefinitionError(\"node is not yet fully initialized\")\n\n try:\n self._definition_guard = True\n\n nodes = []\n refs = []\n definitions = []\n\n def add_node(node):\n for ref, n in zip(refs, nodes):\n if node == n:\n return ref\n\n # get base definition\n d = node._base_definition\n\n if \"inputs\" in d:\n # sort and shallow copy\n d[\"inputs\"] = OrderedDict([(key, d[\"inputs\"][key]) for key in sorted(d[\"inputs\"].keys())])\n\n # replace nodes with references, adding nodes depth first\n for key, value in d[\"inputs\"].items():\n if isinstance(value, Node):\n d[\"inputs\"][key] = add_node(value)\n elif isinstance(value, (list, tuple, np.ndarray)):\n d[\"inputs\"][key] = [add_node(item) for item in value]\n elif isinstance(value, dict):\n d[\"inputs\"][key] = {k: add_node(v) for k, v in value.items()}\n else:\n raise TypeError(\"Invalid input '%s' of type '%s': %s\" % (key, type(value)))\n\n if \"attrs\" in d:\n # sort and shallow copy\n d[\"attrs\"] = OrderedDict([(key, d[\"attrs\"][key]) for key in sorted(d[\"attrs\"].keys())])\n\n # get base ref and then ensure it is unique\n ref = node.base_ref\n while ref in refs:\n if re.search(\"_[1-9][0-9]*$\", ref):\n ref, i = ref.rsplit(\"_\", 1)\n i = int(i)\n else:\n i = 0\n ref = \"%s_%d\" % (ref, i + 1)\n\n nodes.append(node)\n refs.append(ref)\n definitions.append(d)\n\n return ref\n\n # add top level node\n add_node(self)\n\n # finalize, verify serializable, and return\n definition = OrderedDict(zip(refs, definitions))\n definition[\"podpac_version\"] = podpac.__version__\n json.dumps(definition, cls=JSONEncoder)\n return definition\n\n finally:\n self._definition_guard = False", "def definition(self):\n\n if getattr(self, \"_definition_guard\", False):\n raise NodeDefinitionError(\"node definition has a circular dependency\")\n\n if not getattr(self, \"_traits_initialized_guard\", False):\n raise NodeDefinitionError(\"node is not yet fully initialized\")\n\n try:\n self._definition_guard = True\n\n nodes = []\n refs = []\n definitions = []\n\n def add_node(node):\n for ref, n in zip(refs, nodes):\n if node == n:\n return ref\n\n # get base definition\n d = node._base_definition\n\n if \"inputs\" in d:\n # sort and shallow copy\n d[\"inputs\"] = OrderedDict([(key, d[\"inputs\"][key]) for key in sorted(d[\"inputs\"].keys())])\n\n # replace nodes with references, adding nodes depth first\n for key, value in d[\"inputs\"].items():\n if isinstance(value, Node):\n d[\"inputs\"][key] = add_node(value)\n elif isinstance(value, (list, tuple, np.ndarray)):\n d[\"inputs\"][key] = [add_node(item) for item in value]\n elif isinstance(value, dict):\n d[\"inputs\"][key] = {k: add_node(v) for k, v in value.items()}\n else:\n raise TypeError(\"Invalid input '%s' of type '%s': %s\" % (key, type(value)))\n\n if \"attrs\" in d:\n # sort and shallow copy\n d[\"attrs\"] = OrderedDict([(key, d[\"attrs\"][key]) for key in sorted(d[\"attrs\"].keys())])\n\n # get base ref and then ensure it is unique\n ref = node.base_ref\n while ref in refs:\n if re.search(\"_[1-9][0-9]*$\", ref):\n ref, i = ref.rsplit(\"_\", 1)\n i = int(i)\n else:\n i = 0\n ref = \"%s_%d\" % (ref, i + 1)\n\n nodes.append(node)\n refs.append(ref)\n definitions.append(d)\n\n return ref\n\n # add top level node\n add_node(self)\n\n # finalize, verify serializable, and return\n definition = OrderedDict(zip(refs, definitions))\n definition[\"podpac_version\"] = podpac.__version__\n json.dumps(definition, cls=JSONEncoder)\n return definition\n\n finally:\n self._definition_guard = False", "def add_item_definition():\n nonlocal guid\n nonlocal guid_stack\n nonlocal tree\n\n current_leaf_add(guid, {}, tree, guid_stack)\n guid_stack.append(guid)\n guid += 1\n\n # Wrapping this current_leaf_add is defensive coding so we don't\n # crash on malformed glm files.\n if len(full_token) > 1:\n # Do we have a clock/object or else an embedded configuration\n # object?\n if len(full_token) < 4:\n # Add the item definition.\n current_leaf_add(full_token[0], full_token[-2], tree,\n guid_stack)\n elif len(full_token) == 4:\n # We likely have an embedded/nested object.\n current_leaf_add('omfEmbeddedConfigObject',\n full_token[0] + ' ' +\n list_to_string(full_token), tree,\n guid_stack)\n else:\n # Something is wrong.\n raise UserWarning('Malformed GridLAB-D model. Token: {}'\n .format(' '.join(full_token)))\n\n # All done.", "def add_entry(self, new_entry):\n existing_entry = self._entries.get(new_entry.key)\n if existing_entry is not None:\n existing_entry.add_menge(new_entry.get_menge())\n for occ in new_entry.occurrences:\n existing_entry.add_occurrence(occ)\n return existing_entry\n else:\n self._entries[new_entry.key] = new_entry\n self._order.append(new_entry.key)\n return None", "def add_node(self, new_node):\n current = self.root\n\n while True:\n\n if current is None:\n current = new_node\n return\n\n if new_node.data < current.data:\n current = current.left\n else:\n current = current.right", "def addTree(self, depth, fanout):\n isSwitch = depth > 0\n if isSwitch:\n node = self.addSwitch('s%s' % self.switchNum)\n self.switchNum += 1\n for _ in range(fanout):\n child = self.addTree(depth - 1, fanout)\n self.addLink(node, child)\n else:\n node = self.addHost('h%s' % self.hostNum)\n self.hostNum += 1\n return node", "def add_node(self, node):\n index = self._node_index.setdefault(node.ntype, dict())\n if node.ext_id not in index:\n index.setdefault(node.ext_id, node)\n self._type_list.setdefault(node.ntype, list()).append(node)", "def addroot(head, curchange):\n ellipsisroots[head].add(curchange)\n # Recursively split ellipsis heads with 3 roots by finding the\n # roots' youngest common descendant which is an elided merge commit.\n # That descendant takes 2 of the 3 roots as its own, and becomes a\n # root of the head.\n while len(ellipsisroots[head]) > 2:\n child, roots = splithead(head)\n splitroots(head, child, roots)\n head = child # Recurse in case we just added a 3rd root", "def add_node(self, n):\r\n keys = self.d.keys()\r\n #check for node in graph\r\n if n not in keys:\r\n self.d.update({str(n): set()})", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(Node, self).add_node(node)", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(__class__, self).add_node(node)", "def do_add_node(self, line=''):\n self.fibbing.add_node()", "def add_refdepth(self, d, symbol = None):\n if d is None: return # special case, for adding back initial ref_depth value, which can be None\n if self.ref_depth is None: # when ref_depth is still uninitialized, just use 'd'\n self.ref_depth = d\n else: # otherwise, use 'd' if it's lower than the current value\n self.ref_depth = min(self.ref_depth, d)\n #if DEBUG: print(' ', symbol or '', d, 'ref_depth =', self.ref_depth)", "def check_or_create_root(self):\n self.root = self.db_handler.get_or_create_indexed_node(\"root\", \n \"root_name\", \"ndn\", {\"component\":\"ndn\"})\n if not self.root:\n raise NoRootException(\"cannot locate root name (ndn)\")\n\n self.root.add_labels(LABEL_COMPONENT)", "def add_node(self, node):", "def _place_in_ancestor(self, node, active_node):\n parent = active_node\n while parent is not None:\n if parent.depth < node.depth:\n parent.add_child(node)\n \n return parent\n\n parent = parent.parent\n else:\n # This should never be reached because NemoRoot has a depth of -1\n raise NemoException('\\nIncorrect indentation\\n' + \\\n 'at:\\n\\t%s\\n' % active_node + \\\n 'Followed by:\\n\\t%s\\n' % node + \\\n 'Parent:\\n\\t%s' % parent )", "def __check_input__(self):\n # | - __check_input__\n tmp = set(self.tree_level_labels)\n input_diff = tmp.symmetric_difference(self.level_entries.keys())\n if not input_diff == set():\n undefined_labels = []\n for i in input_diff:\n undefined_labels.append(i)\n\n print(\"\\n\")\n message = \"Did not fill out level entries dict properly\" + \"\\n\"\n message += \"The following properties need to be defined\" + \"\\n\"\n message += str(undefined_labels)\n raise ValueError(message)\n # __|", "def add_left(self, n, e):\n if self.left(n) is not None:\n raise ValueError('Left child exists')\n newest = self._Node(e, n) # new node with n as parent\n n._left = newest\n self._size += 1\n return newest", "def depthFirstAddOne(node, notOkTree, isRoot=False):\n if not isRoot:\n e = notOkTree\n prev = e\n for label in node.heirarchy:\n label = cleanLabel(label)\n e = prev.find(label)\n if e is None:\n e = newElement(prev, label)\n prev = e\n addOne(e, 'transcripts')\n for c in node.children:\n depthFirstAddOne(c, notOkTree)", "def add_node(self, val):\n if val in self._g:\n raise ValueError('Node already exists.')\n self._g[val] = []", "def add_entry_unsafe ( self, hook ):\n if hook.event:\n self.get_subdir ( hook.event ) [hook.name] = hook\n else:\n raise AssertionError ( \"hook.event is not set.\" )\n return True", "def add_level_node(self, level, node):\n self.levels[level].append(node)", "def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument\r\n # pylint: disable=no-member\r\n if depth == 0:\r\n self.load_item.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)\r\n else:\r\n self.load_item.side_effect = lambda x: ContainerModuleFactory(descriptor_cls=VerticalDescriptor, depth=depth - 1)", "def add_child(self, node):\n\n\t\tif Defaults == node.__class__:\n\t\t\tself.__defaults = node\n\t\telif Variables == node.__class__:\n\t\t\tself.__variables = node\n\t\telif Servers == node.__class__:\n\t\t\tself.__servers = node\n\t\telif FileSets == node.__class__:\n\t\t\tself.__filesets = node\n\t\telif Targets == node.__class__:\n\t\t\tself.__targets = node\n\t\telse:\n\t\t\traise DepFileParsingError()\n\n\t\treturn True", "def add(self, key):\n node, parent = Treap._find_node(key, self.root)\n if node:\n node.n += 1\n else:\n heap_id = self.rand.randrange(self.max_heap_id)\n node = Node(key, heap_id)\n if parent:\n node.parent = parent\n parent.child[node.key > parent.key] = node\n else:\n self.root = node\n\n self._prioritize(node)", "def _add_last_node(self):\n for entry in self._entry_nodes:\n last_node = CFGNode(CFGNodeType.END)\n last_node.set_func_owner(entry.get_func_name())\n self._add_last_node_visit(entry.get_func_first_node(), last_node, {})", "def add(self, node):\n if str(node.getPosition()) in self._history:\n # duplicate entry\n return\n self._history[str(node.getPosition())] = True\n self._insort(node)", "def check_add_child_node(data):\n\n # check nodeDisplay\n \"\"\"\n\n :rtype :\n \"\"\"\n if 'nodeDisplay' not in data:\n raise ValueError(\"No nodeDisplay in given node.\")\n\n # check nodeDescription\n if 'nodeDescription' not in data:\n raise ValueError(\"No nodeDescription in given node.\")\n\n # check nodeTags\n if 'nodeTags' not in data:\n data[\"nodeTags\"] = []\n\n # check nodeParents\n if 'nodeParents' not in data or len(data[\"nodeParents\"]) == 0:\n raise ValueError(\"No nodeParents in given node.\")\n else:\n parent = data[\"nodeParents\"][0]\n if '_id' not in parent:\n raise ValueError(\"Malformed node parent array: lack of parent node id \\\"_id\\\"\")\n else:\n parent_node = Nodes().retrieveById(parent[\"_id\"])\n if parent_node.status_code == 404:\n raise ValueError(\n \"Parent node information does not exist in database: parent _id=%s\" % parent[\"_id\"])\n else:\n return parent_node", "def _add_node(data, entry, flux, name=None, show_compound_img=False):\r\n\r\n entry_type = entry.type\r\n graphics = entry.graphics[0]\r\n\r\n node_name = name or graphics.name\r\n\r\n node_data = {'id': entry.id,\r\n 'name': node_name[:10] + '...',\r\n 'full_name': node_name,\r\n 'label': node_name,\r\n 'content': node_name,\r\n 'size': 5,\r\n 'x': graphics.x,\r\n 'y': graphics.y,\r\n 'flux': flux,\r\n 'cumflux': abs(flux)}\r\n\r\n # not applicable yet\r\n if entry_type == 'compound' and show_compound_img:\r\n node_data.update({\r\n 'type': 'rectangle',\r\n 'backgroundImage': ''.join([Kegg.BASE_URL, 'get/',\r\n entry.name[4:], '/image']),\r\n 'borderWidth': 0,\r\n\r\n })\r\n\r\n data['nodes'].append(node_data)", "def add_node(self, node, parent):\n if node not in self.map.edges:\n self.map.edges[node] = []\n if parent not in self.map.edges:\n self.map.edges[parent] = [node]\n else:\n self.map.edges[parent].append(node)", "def __post_init__(self) -> None:\n arity = self.root.arity\n length = len(self.children)\n if arity != length:\n raise ValueError(\n 'Incorrect number of child terms: '\n f'Expected {arity}, found {length}'\n )", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def add_node(self, val):\n if val not in self:\n self.setdefault(val, [])", "def validate_graph(self, entry):\n check_fields(entry, ['name', 'nodes'])\n for node_name in entry['nodes']:\n node = entry['nodes'][node_name]\n if len(node) < 1 or 'components' not in node:\n raise exceptions.BadInputError(f\"invalid entry for {node_name}: {node}\")\n\n self.graphs.append(entry)", "def insert(self, key):\n if self.root is None:\n self.root = self.Node(key)\n else:\n self.root = self.root.insert(key)", "def _add_node(self, parent, model, relation, reverse, related_name,\n accessor_name, nullable, depth):\n # Reverse relationships\n if reverse and '+' in related_name:\n return\n\n node_hash = self._nodes.get(model, None)\n\n # don't add node if a path with a shorter depth exists. this is applied\n # after the correct join has been determined. generally if a route is\n # defined for relation, this will never be an issue since there would\n # only be one path available. if a route is not defined, the shorter\n # path will be found\n if not node_hash or node_hash['depth'] > depth:\n if node_hash:\n node_hash['parent'].remove_child(model)\n\n node = ModelTreeNode(model, parent, relation, reverse,\n related_name, accessor_name, nullable, depth)\n\n self._nodes[model] = {\n 'parent': parent,\n 'depth': depth,\n 'node': node,\n }\n\n node = self._find_relations(node, depth)\n parent.children.append(node)", "def _validate_enamldef(self, node, lexer):\n ident_names = set()\n\n def check_id(name, node):\n if name in ident_names:\n msg = \"redeclaration of identifier '%s'\"\n msg += \" (this will be an error in Enaml version 1.0)\"\n syntax_warning(msg % name, FakeToken(lexer, node.lineno))\n ident_names.add(name)\n\n # validate the identifiers\n ChildDef = enaml_ast.ChildDef\n TemplateInst = enaml_ast.TemplateInst\n stack = list(reversed(node.body))\n while stack:\n node = stack.pop()\n if isinstance(node, ChildDef):\n if node.identifier:\n check_id(node.identifier, node)\n stack.extend(reversed(node.body))\n elif isinstance(node, TemplateInst):\n idents = node.identifiers\n if idents is not None:\n for name in idents.names:\n check_id(name, idents)\n if idents.starname:\n check_id(idents.starname, idents)", "def addEntryPoint(self, address: ghidra.program.model.address.Address) -> None:\n ...", "def create_hierarchy(self):\n\t\tif self.level is not None:\n\t\t\treturn\n\t\t\n\t\tself.size = 0\n\t\tsubtype = self.subtype.type\n\t\tif subtype.level is None:\n\t\t\tif self.subtype.size == 0:\n\t\t\t\traise ParserException(\"Loop in the definition of '%s' and '%s' detected!\" % (self.name, self.subtype.name))\n\t\t\tsubtype.create_hierarchy()\n\t\t\n\t\tself.level = subtype.level + 1\n\t\tself.size = subtype.size", "def add_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n je.Editor(self.session, self.source.tbl, self.source)", "def addtree(self, dct) -> None:\n namelst = dct['name'].split('\\\\')\n # print('nlst {}'.format(namelst))\n n_n = self\n for curname in namelst:\n nextlevel = n_n.child_dct.get(curname, None)\n if nextlevel is None:\n nextlevel = n_n.child_dct[curname] = LocNode(curname)\n n_n = nextlevel\n n_n.setval(dct)", "def insert_definition(self, definition):\r\n self.definitions.insert(definition)", "def add_node(self, node):\n if node not in self.nodes:\n self.nodes.append(node)", "def addLevel(self):\n pass", "def insert(self, path_step, handler):\n if path_step not in self.children:\n self.children[path_step] = RouteTrieNode(handler)", "def add_node(self, n):\n self.node_dict.setdefault(n, OrderedDict())", "def add_function_entry(self, name=None):\n return self._build_op('function_entry', [], name=name)", "def insert(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n if self.contains(val):\n raise ValueError('Node already in tree.')\n new_node = Node(val)\n if self._size == 0:\n self._root = new_node\n self._max_depth = 1\n self._rbal = 1\n self._lbal = 1\n else:\n current_depth = 1\n current_node = self._root\n while val is not current_node._data:\n current_depth += 1\n if val < current_node._data:\n if current_node._lkid:\n current_node = current_node._lkid\n else:\n current_node._lkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n elif val > current_node._data:\n if current_node._rkid:\n current_node = current_node._rkid\n else:\n current_node._rkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n self._size += 1", "def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument\r\n # pylint: disable=no-member\r\n if depth == 0:\r\n self.get_module.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)\r\n else:\r\n self.get_module.side_effect = lambda x: ContainerModuleFactory(descriptor_cls=VerticalDescriptor, depth=depth - 1)", "def add_node (self, node):\n raise NotImplementedError", "def from_definition(cls, definition):\n\n if \"podpac_version\" in definition and definition[\"podpac_version\"] != podpac.__version__:\n warnings.warn(\n \"node definition version mismatch \"\n \"(this node was created with podpac version '%s', \"\n \"but your current podpac version is '%s')\" % (definition[\"podpac_version\"], podpac.__version__)\n )\n\n if len(definition) == 0:\n raise ValueError(\"Invalid definition: definition cannot be empty.\")\n\n # parse node definitions in order\n nodes = OrderedDict()\n for name, d in definition.items():\n if name == \"podpac_version\":\n continue\n\n if \"node\" not in d:\n raise ValueError(\"Invalid definition for node '%s': 'node' property required\" % name)\n\n # get node class\n module_root = d.get(\"plugin\", \"podpac\")\n node_string = \"%s.%s\" % (module_root, d[\"node\"])\n module_name, node_name = node_string.rsplit(\".\", 1)\n try:\n module = importlib.import_module(module_name)\n except ImportError:\n raise ValueError(\"Invalid definition for node '%s': no module found '%s'\" % (name, module_name))\n try:\n node_class = getattr(module, node_name)\n except AttributeError:\n raise ValueError(\n \"Invalid definition for node '%s': class '%s' not found in module '%s'\"\n % (name, node_name, module_name)\n )\n\n # parse and configure kwargs\n kwargs = {}\n for k, v in d.get(\"attrs\", {}).items():\n kwargs[k] = v\n\n for k, v in d.get(\"inputs\", {}).items():\n kwargs[k] = _lookup_input(nodes, name, v)\n\n for k, v in d.get(\"lookup_attrs\", {}).items():\n kwargs[k] = _lookup_attr(nodes, name, v)\n\n if \"style\" in d:\n kwargs[\"style\"] = Style.from_definition(d[\"style\"])\n\n for k in d:\n if k not in [\"node\", \"inputs\", \"attrs\", \"lookup_attrs\", \"plugin\", \"style\"]:\n raise ValueError(\"Invalid definition for node '%s': unexpected property '%s'\" % (name, k))\n\n nodes[name] = node_class(**kwargs)\n\n return list(nodes.values())[-1]", "def check_for_root(self):\n if self.root is None:\n raise ValueError(\"root is NoneType\")", "def test_tree_two_nodes_right(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node", "def _add_left(self, p, e):\n node = self._validate(p)\n if node._left is not None:\n raise ValueError('Left child exists')\n self._size += 1\n node._left = self._Node(e, node) # node is its parent\n return self._make_position(node._left)", "def add_ancestry_checker(name, function):\n ancestry_checker_registry[name] = AncestryChecker(function)", "def add_node(self, node):\n frame = self.stack[-1]\n curr_node, index, line = frame\n variants = self.get_variants()\n # adding to the end of the variant\n if len(line) == index + 1:\n line.append(node)\n # adding new variant\n elif variants:\n # check that node doesn't exist yet\n for variant in variants:\n if len(variant) and variant[0] == node:\n raise CursorError(\"Node already exists.\")\n variants.append([node])\n # forking the simple variant\n else:\n if line[index +1] == node:\n raise CursorError(\"Node already exists.\")\n variants = []\n variants.append(line[index + 1:])\n variants.append([node])\n while len(line) > index + 1:\n line.pop()\n line.append(variants)", "def bounded_insert(self, time, tailnumber):\n if self.root is None: \n node = self.insert(time, tailnumber)\n return node\n\n if self.simple is False: \n conflict = self.find_conflict(time)\n if conflict is not None: \n new_time = conflict.key + self.wait_time\n self.bounded_insert(new_time, tailnumber)\n else: \n node = self.insert(time, tailnumber)\n return node \n else: \n conflict = self.find_conflict(time)\n if conflict is None: \n node = self.insert(time, tailnumber)", "def insert(self,node,key):\n position=self.find(node,key)\n if position.key==key:\n print(\"node already present\")\n elif position.key>key:\n n=Node(key)\n position.setLeftChild(n)\n n.setParent(position)\n print(n.getParent())\n else:\n n=Node(key)\n position.setRightChild(n)\n n.setParent(position)", "def make_root(sv, lig, fun, argtext):\r\n if not fun in sv.Object: # new function or dict\r\n nod=add_object(sv, fun) # create node for function root\r\n nod.isdefined=True # flag object as defined\r\n nod.isfunction=True # flag object as function\r\n nod.nature=Lst[:]\r\n\r\n # examine arguments to identify user-defined function or dict\r\n if detect_user_function(argtext):\r\n nod.isuserfunc=True # a user-defined function\r\n else: \r\n nod.isdict=True # a new dict\r\n\r\n # store arguments (first argument for a dict)\r\n nod.arguments=[argtext] \r\n \r\n else: # fun already exists\r\n # add arguments for a dict, not for a user-defined function \r\n nod=sv.Object[fun]\r\n if nod.isuserfunc:\r\n print(\"\\n\", Err_redef_name) # *** Error: Node is already defined *** \r\n print(lig)\r\n raise ReferenceError\r\n nod.arguments+=[argtext] # add arguments to existing dict\r", "def set_root(self):\n try:\n _check_call(_LIB.TreeliteTreeBuilderSetRootNode(\n self.tree.handle,\n ctypes.c_int(self.node_key)))\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a root')", "def add(self, value):\n try:\n if not self.root:\n self.root = Node(value)\n else:\n node = self.root\n while node:\n if node.value > value:\n if not node.left:\n node.left = Node(value)\n break\n node = node.left\n else:\n if not node.right:\n node.right = Node(value)\n break\n node = node.right\n except:\n print(\"something went wrong please try again\")", "def addChild(node):", "def _expand_unknown(self):\n# global unknown\n self.tree.item('unknown', open=False, \\\n values=[self._count_children('unknown'), ''])", "def test_tree_with_one_leaf_node_left_of_right_depth(balanced_3_nodes):\n balanced_3_nodes.insert(13)\n assert balanced_3_nodes.depth() == 2", "def insert(self, path_parts, handler):\n current_node = self.root\n for part in path_parts: # Traverse to the deepest node in the path\n current_node = current_node.children[part] # defaultdict auto adds it if it doesn't already exist\n current_node.handler = handler # Assign the leaf a handler", "def make_defined(sv, lines):\r\n for num, lig in enumerate(lines): # browse program lines \r\n if not lig.startswith(When) and not lig.startswith(Col): # neither condition nor value -> defined name\r\n\r\n # remove equivalence codes but keep track of them (not yet exploited)\r\n equiv=None \r\n if lig.startswith(Equal+Special): \r\n lig=lig[len(Equal+Special):] # remove equivalence code \r\n equiv=lig # flag object for equivalence\r\n lines[num]=lig # store abridged line\r\n\r\n # detect duplicate names \r\n if lig in sv.Object: \r\n print(\"\\n\", Err_redef_name) # *** Error: Node is already defined *** \r\n print(lig)\r\n raise ReferenceError\r\n \r\n # create nodes while preserving the order \r\n nod=add_object(sv, lig) # create object\r\n nod.isdefined=True # flag object as defined\r\n nod.equivalent=equiv # equivalence flag\r\n\r\n # detect functions and create root node. Save arguments\r\n here, argtext, last=findblock(lig) # find bracketed expression\r\n if argtext: # create root for function or dict\r\n make_root(sv, lig, lig[:here], argtext.strip(Space))\r\n\r\n # verify syntax\r\n verify_name_syntax(sv, lig, here, argtext, last)", "def __addToLevel(self, head, value):\n\n #if DEBUG: print('\\t__addToLevel({})'.format(value))\n\n cur = head\n \n if cur.next == None:\n output = self.__insert(cur,value)\n return output\n \n #cur = cur.next\n\n while cur:\n if cur.next == None or \\\n cur.val == value or\\\n cur.next.val > value:\n output = self.__insert(cur,value)\n #output = cur\n break\n cur = cur.next\n return output", "def test_add_znode(self):\n z = self.test_start_empty()\n self.test_start_one_value(z)", "def _add_left(self, p, e):\n node = self._validate(p)\n if node._left is not None:\n raise ValueError('Left child exists')\n self._size += 1\n node._left = self._Node(e, node) # node is its parent\n return self._make_position(node._left)", "def find_or_create(self, h, **kwargs):\n curr = self.root\n h_len, root_history_len = len(h), len(self.root.h)\n\n for step in range(root_history_len, h_len):\n curr = curr.get_child(h[step])\n if curr is None:\n return self.add(h, **kwargs)\n return curr", "def test_insert_no_parent(tree):\n with pytest.raises(ValueError):\n assert tree.insert(1)", "def delete_node_at_beginning(self):\n\t\tif self.root is None:\n\t\t\traise EmptyRootException(\"ERROR: No node available in list. Please insert node in list.\")\n\t\tcurrent_node = self.root\n\t\tself.root = current_node.next\n\t\tself.root.prev = None\n\t\tself.display_nodes()", "def insert(self, key, value=None):\n if key in self.nodes:\n return None\n else:\n new_node = Node(key, value)\n (self.nodes)[key] = new_node \n current = self.root\n last = current\n\n if current is None:\n self.root = self.nodes[key]\n self.root.height = 0\n return new_node\n\n while (current is not None):\n if new_node.key > current.key:\n last = current\n current = current.right\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n else:\n last = current\n current = current.left\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n\n if new_node.key > last.key:\n last.right = new_node\n new_node.parent = last\n else:\n last.left = new_node\n new_node.parent = last\n\n self.root.height = self.get_height_tree()\n return new_node", "def hasnode(self, uid):\n\n raise NotImplementedError", "def add_node_field(self,name,data,on_exists='fail'):\n if name in np.dtype(self.node_dtype).names:\n if on_exists == 'fail':\n raise GridException(\"Node field %s already exists\"%name)\n elif on_exists == 'pass':\n return\n elif on_exists == 'overwrite':\n self.nodes[name] = data\n else:\n self.nodes=recarray_add_fields(self.nodes,\n [(name,data)])\n self.node_dtype=self.nodes.dtype", "def insert(self, key, val=None):\n self.root = self._insert(self.root, key, val) # Returns root of resulting tree after insertion - update it\n self.n += 1", "def _insert_in_tree(self, k: str, current_node: str) -> int:\n dist_current_node = self.distance_function(\n self.hash_dict[k], self.dict_all[current_node].node_value\n )\n condition_insert_current_node_child = (\n not self.dict_all[current_node].children\n ) or (\n dist_current_node not in list(self.dict_all[current_node].children.values())\n )\n if condition_insert_current_node_child:\n self.dict_all[current_node].children[k] = dist_current_node\n self.dict_all[k] = BkTreeNode(\n k, self.hash_dict[k], parent_name=current_node\n )\n else:\n for i, val in self.dict_all[current_node].children.items():\n if val == dist_current_node:\n node_to_add_to = i\n break\n self._insert_in_tree(k, node_to_add_to)\n return 0", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def testAppendDuplicateDecision(self):\n def append():\n self.node.append_child(self.color_decisions[0])\n\n append()\n\n self.assertEqual(\n [self.color_decisions[0]],\n self.node.color_decisions\n )\n\n append()\n\n self.assertEqual(\n [self.color_decisions[0]],\n self.node.color_decisions\n )\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n append\n )", "def insert(self, key, val):\n if self.root is None:\n self.root = self.Node(key, val)\n else:\n self.root.insert(key, val)" ]
[ "0.55656815", "0.5325866", "0.5322028", "0.52112687", "0.5193852", "0.5172305", "0.5172305", "0.51599175", "0.5064102", "0.49744448", "0.4960249", "0.49437156", "0.49024606", "0.48839802", "0.48804682", "0.4869079", "0.48573893", "0.48446208", "0.48397067", "0.48307618", "0.48307618", "0.48272246", "0.48117214", "0.4761298", "0.475108", "0.47383538", "0.47268257", "0.4723984", "0.47208777", "0.470479", "0.4696681", "0.4695395", "0.46753067", "0.4675088", "0.4662464", "0.46521318", "0.46365553", "0.46324438", "0.4629889", "0.46182495", "0.4614272", "0.4595318", "0.45871502", "0.45862728", "0.45787802", "0.45673868", "0.45667896", "0.4554823", "0.45513812", "0.45369452", "0.45355535", "0.45354313", "0.452937", "0.452826", "0.45177704", "0.45105758", "0.45055833", "0.44992524", "0.44966003", "0.44945747", "0.44918263", "0.44914678", "0.44836324", "0.4477104", "0.44717", "0.44663012", "0.44631064", "0.44609916", "0.44607875", "0.44574758", "0.4457128", "0.44489777", "0.4445995", "0.44385973", "0.44365272", "0.44321546", "0.44292483", "0.44218853", "0.441968", "0.4419419", "0.44116935", "0.44020364", "0.43964234", "0.43940368", "0.43926457", "0.43891197", "0.43787518", "0.4378591", "0.4378331", "0.43754914", "0.43651235", "0.43633434", "0.4361508", "0.4360727", "0.43563053", "0.43556133", "0.4353324", "0.43493733", "0.43472606", "0.43403503" ]
0.6691731
0
Adds all the entries in objs at the current depth.
def add_entries(self, objs, keyname, valuename, depth): add_entry = self.add_entry for obj in objs: key = getattr(obj, keyname, None) if key is None: continue value = getattr(obj, valuename, None) add_entry(key, value, depth)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addAll(self, objs):\n self.getSession().add_all(objs)\n self.commit() # paranoially\n return objs", "def add(self, fetchables, depth=1):\n if fetchables:\n if isinstance(fetchables, collections.Sequence):\n for fetchable in fetchables:\n self.add(fetchable, depth)\n else:\n log.debug(\"Adding to queue: %s (depth=%s)\", fetchables, depth)\n self.q.append((fetchables, depth))", "def populate_objects(self):\n\t\t\n\t\t# Don't populate if already done\n\t\tif self.objects:\n\t\t\treturn\n\t\t\n\t\tself.object_dirs = []\n\t\tdir_regex = re.compile(\"^[0-9a-f]{2}$\")\n\t\tfile_regex = re.compile(\"^[0-9a-f]{38}$\")\n\t\t\n\t\t# Get list of object dirs\n\t\tfor o_dir in os.listdir(self.objects_root):\n\t\t\to_dir_path = os.path.join(self.objects_root, o_dir)\n\t\t\tif re.match(dir_regex, o_dir) and os.path.isdir(o_dir_path):\n\t\t\t\t# Looks like an object dir so far\n\t\t\t\tself.object_dirs.append((o_dir, o_dir_path))\n\t\t\n\t\t# Get list of object files\n\t\tfor o_dir, o_dir_path in self.object_dirs:\n\t\t\tfor o_file in os.listdir(o_dir_path):\n\t\t\t\to_file_path = os.path.join(o_dir_path, o_file)\n\t\t\t\tif re.match(file_regex, o_file) and os.path.isfile(o_file_path):\n\t\t\t\t\t# Looks like an object file so far\n\t\t\t\t\tself.objects.append(\n\t\t\t\t\t\tGitLooseObject(\n\t\t\t\t\t\t\tid = o_dir + o_file,\n\t\t\t\t\t\t\tpath = o_file_path\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def add_all(self, objects):\n self.lock.acquire()\n self.__Session.add_all(objects)\n self.__Session.commit()\n self.lock.release()", "def addObjects(self):\n\n self.root = self.addRoot()\n vTemp = transform.getOffsetPosition(self.root, [0, 1, 0])\n self.top_loc = self.addLoc(\"top\", self.root, vTemp)\n centers = [self.root, self.top_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [0, -1, 0])\n self.bottom_loc = self.addLoc(\"bottom\", self.root, vTemp)\n centers = [self.root, self.bottom_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [1, 0, 0])\n self.ext_loc = self.addLoc(\"ext\", self.root, vTemp)\n centers = [self.root, self.ext_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [-1, 0, 0])\n self.int_loc = self.addLoc(\"int\", self.root, vTemp)\n centers = [self.root, self.int_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)", "def _add_all_to_tree(elms, trie):\n for elm in elms:\n tokens = tokenize(elm.name)\n for token in tokens:\n trie.add(token, elm)", "def _iter_add(self, root):\n stack = [root]\n while stack:\n nodes = stack.pop()\n for node in nodes:\n if node in self._members:\n continue\n self._members.add(node)\n\n if isinstance(node, tf.Tensor):\n stack.append((node.op,))\n elif isinstance(node, tf.Operation):\n stack.append(node.inputs)", "def add_scene_objects(self, obj_tid_catids):\n self._scene_objects.extend(obj_tid_catids)\n # for oid, scene_object in scene.objects.items():\n # if scene_object.label in ('book', 'wall', 'floor'):\n # self._ignored_cats.add(scene_object.label)\n # continue\n # try:\n # cat = TRANSLATIONS_CATEGORIES[scene_object.label]\n # except KeyError:\n # cat = scene_object.label\n #\n # try:\n # cat_id = CATEGORIES[cat]\n # self._scene_objects.append((scene_object, idx_t, cat_id))\n # except KeyError:\n # self._ignored_cats.add(cat)", "def addObjectsToGroup(self):\n\t\tmc.delete( self.objects, ch = True )\n\t\tmc.parent( self.objects, self.grp.name )\n\t\tmc.makeIdentity( self.objects, apply=True,t=1,r=1,s=1,n=2)\n\t\t#self.lockObjects()", "def addToTree(self, name, path, objtype, objs):\n\t\titem = None\n\t\timageSize = (16, 16)\n\t\til = wx.ImageList(imageSize[0], imageSize[1])\n\t\tfolderIndex = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, imageSize))\n\t\tfolderOpenIndex = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, imageSize))\n\t\tfileIndex = il.Add(wx.ArtProvider_GetBitmap(wx.ART_REPORT_VIEW, wx.ART_OTHER, imageSize))\n\n\t\t#if objtype in [\"lif\", \"lei\", \"txt\", \"ome.tif\"]:\n\t\t#\tpath = path + name\n\t\t\n\t\tfor i in range(0, len(objs)):\n\t\t\tif not path in self.items:\n\t\t\t\tself.items[path] = 1\n\t\t\telse:\n\t\t\t\tself.items[path] += 1\n\t\t\n\t\tfor i in objs:\n\t\t\tself.dataUnitToPath[i] = path\n\n\t\tif objtype == \"lsm\":\n\t\t\tif not self.lsmfiles:\n\t\t\t\tself.lsmfiles = self.tree.AppendItem(self.root, \"LSM files\")\n\t\t\t\tself.tree.SetPyData(self.lsmfiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.lsmfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.lsmfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.lsmfiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\") \n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\n\t\telif objtype in [\"txt\",\"lei\"]:\n\t\t\tif not self.leicafiles:\n\t\t\t\tself.leicafiles = self.tree.AppendItem(self.root, \"Leica files\")\n\t\t\t\tself.tree.SetPyData(self.leicafiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.leicafiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.leicafiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded) \n\n\t\t\titem = self.leicafiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\t\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\t\n\t\telif objtype == \"oif\":\n\t\t\tif not self.oiffiles:\n\t\t\t\tself.oiffiles = self.tree.AppendItem(self.root, \"Olympus files\")\n\t\t\t\tself.tree.SetPyData(self.oiffiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.oiffiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.oiffiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.oiffiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\t\n\t\telif objtype == \"pic\":\n\t\t\tif not self.bioradfiles:\n\t\t\t\tself.bioradfiles = self.tree.AppendItem(self.root, \"BioRad files\")\n\t\t\t\tself.tree.SetPyData(self.bioradfiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.bioradfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.bioradfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.bioradfiles\n\t\t\tself.tree.Expand(item)\n\t\t\n\t\telif objtype == \"hdr\":\n\t\t\tif not self.interfilefiles:\n\t\t\t\tself.interfilefiles = self.tree.AppendItem(self.root, \"Interfile files\")\n\t\t\t\tself.tree.SetPyData(self.interfilefiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.interfilefiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.interfilefiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\titem = self.interfilefiles\n\t\t\tself.tree.Expand(item)\n\t\t\t\n\t\telif objtype == \"bxd\":\n\t\t\tif not self.bxdfiles:\n\t\t\t\tself.bxdfiles = self.tree.AppendItem(self.root, \"BioImageXD files\")\n\t\t\t\tself.tree.SetPyData(self.bxdfiles, \"1\") \n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.bxdfiles\n\t\t\tself.tree.Expand(item) \n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\n\t\t\tself.tree.SetPyData(item, \"2\") \n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\telif objtype == \"bxc\":\n\t\t\tif not self.bxdfiles:\n\t\t\t\tself.bxdfiles = self.tree.AppendItem(self.root, \"BioImageXD files\")\n\t\t\t\tself.tree.SetPyData(self.bxdfiles, \"1\") \n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.bxdfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.bxdfiles\n\t\t\tself.tree.Expand(item)\n\t\t\t\n\t\telif objtype == \"lif\":\n\t\t\tif not self.liffiles:\n\t\t\t\tself.liffiles = self.tree.AppendItem(self.root, \"LIF files\")\n\t\t\t\tself.tree.SetPyData(self.liffiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.liffiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.liffiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.liffiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\t\t\t\n\t\telif objtype in [\"mrc\",\"st\"]:\n\t\t\tif not self.mrcfiles:\n\t\t\t\tself.mrcfiles = self.tree.AppendItem(self.root, \"MRC files\")\n\t\t\t\tself.tree.SetPyData(self.mrcfiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.mrcfiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.mrcfiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.mrcfiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\telif objtype == \"ome.tif\":\n\t\t\tif not self.ometiffiles:\n\t\t\t\tself.ometiffiles = self.tree.AppendItem(self.root, \"OME-TIFF files\")\n\t\t\t\tself.tree.SetPyData(self.ometiffiles, \"1\")\n\t\t\t\tself.tree.SetItemImage(self.ometiffiles, folderIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t\tself.tree.SetItemImage(self.ometiffiles, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\t\titem = self.ometiffiles\n\t\t\tself.tree.Expand(item)\n\t\t\titem = self.tree.AppendItem(item, name)\n\t\t\tself.tree.Expand(item)\n\t\t\tself.tree.SetPyData(item, \"2\")\n\t\t\tself.tree.SetItemImage(item, folderOpenIndex, which = wx.TreeItemIcon_Expanded)\n\n\t\tself.tree.Expand(item)\n\t\tselected = 0\n\t\tfor obj in objs:\n\t\t\tadded = self.tree.AppendItem(item, obj.getName())\n\t\t\t\t\n\t\t\tresampledims = obj.dataSource.getResampleDimensions()\n\t\t\tif resampledims and resampledims != (0, 0, 0):\n\t\t\t\tself.markRed([added], \"*\")\n\t\t\tself.tree.SetPyData(added, obj) \n\t\t\tself.tree.SetItemImage(added, fileIndex, which = wx.TreeItemIcon_Normal)\n\t\t\t#self.tree.SetItemImage(added,folderOpenIndex,which=wx.TreeItemIcon_Expanded)\n\t\t\tself.tree.EnsureVisible(added)\n\t\t\tself.dataUnitItems.append(added)\n\t\t\t\n\t\t\tif len(self.items.keys()) == 1 and not selected:\n\t\t\t\tself.tree.UnselectAll()\n\t\t\t\tself.tree.SelectItem(added, 1)\n\t\t\t\tselected = 1\n\t\t\t\tlib.messenger.send(None, \"tree_selection_changed\", obj)\n\t\t\t\n\t\tself.tree.Expand(self.root)\n\t\tconf = Configuration.getConfiguration()\n\t\tlst = self.items.keys()\n\t\tconf.setConfigItem(\"FileList\", \"General\", lst)\n\t\tconf.writeSettings()", "def add_objects(self, objects):\n requests = []\n for obj in objects:\n requests.append({\"action\": \"addObject\", \"body\": obj})\n request = {\"requests\": requests}\n return self.batch(request)", "def update(self):\n for object in reversed(self.addList):\n self.objects.append(object)\n self.addList.remove(object)\n\n for object in reversed(self.removeList):\n self.objects.remove(object)\n self.removeList.remove(object)\n\n self.objects = sorted(self.objects,key=priority)\n\n for object in self.objects:\n object.update()", "def _build_tree(self, root, obj):\n\n if obj is None:\n return\n\n for attr_name in obj.__class__.__ordered__:\n if attr_name.startswith('_'):\n continue\n\n attr = getattr(obj.__class__, attr_name)\n\n if isinstance(attr, XmlElementProperty):\n element = root.add_child(attr.name)\n self._build_tree(element, getattr(obj, attr_name))\n elif isinstance(attr, XmlAttributeProperty):\n value = getattr(obj, attr_name)\n if value is not None:\n root.add_attribute(attr.name, value)", "def _add_objects(self, object_list):\n\n object_types = set([t for _, t in object_list])\n if not object_types.issubset(self.types):\n # for debugging\n s = \"The types found in the problem file must be a subset of the types listed in the domain file\\n\"\n s += \"Domain types: %s\" % str(self.types) + \"\\n\"\n s += \"Problem types: %s\" % str(object_types)\n raise ValueError(s)\n\n for obj, t in object_list:\n self.objects.add(obj)\n\n if t not in self.type_to_obj:\n self.type_to_obj[t] = set([])\n self.type_to_obj[t].add(obj)\n\n self.obj_to_type[obj] = set([])\n k = t\n while k in self.parent_types:\n self.obj_to_type[obj].add(k)\n k = self.parent_types[k]", "def addtree(self, dct) -> None:\n namelst = dct['name'].split('\\\\')\n # print('nlst {}'.format(namelst))\n n_n = self\n for curname in namelst:\n nextlevel = n_n.child_dct.get(curname, None)\n if nextlevel is None:\n nextlevel = n_n.child_dct[curname] = LocNode(curname)\n n_n = nextlevel\n n_n.setval(dct)", "def union(self, *objects):\n roots = [self[x] for x in objects]\n # Find the heaviest root according to its weight.\n heaviest = max(roots, key=lambda r: self.weights[r])\n for r in roots:\n if r != heaviest:\n self.weights[heaviest] += self.weights[r]\n self.parents[r] = heaviest", "def fill_octree(self):\n if len(self.children) <= 0:\n self.generate_octants()\n for point in self.points:\n self.append_point(point)\n self.points = np.array([])", "def add_children(self, children: dict) -> None:\n for child in children:\n self.children[child.move] = child", "def __iadd__(self, obj):\n if not vedo.utils.is_sequence(obj):\n obj = [obj]\n for a in obj:\n if a:\n self.AddPart(a)\n return self", "def add_entries(self, *entries: Entry):\n for entry in entries:\n self.add_entry(entry)", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def addAll(self, *args):\n pass", "def extend(self, objects: Iterable[Any]) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.extend(list(map(panel, objects)))\n self.objects = new_objects", "def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)", "def append(self, subnodes):\n if not hasattr(subnodes, \"__iter__\"):\n subnodes = [subnodes]\n\n for subnode in subnodes:\n try:\n if not issubclass(type(subnode), pyfdt.FdtNop):\n index = self.index(subnode.name)\n item = self.pop(index)\n else:\n item = None\n except ValueError:\n item = None\n\n if isinstance(item, pyfdt.FdtNode) and isinstance(\n subnode, pyfdt.FdtNode\n ):\n item.merge(subnode)\n subnode = item\n\n super().append(subnode)", "def expand(obj):\r\n if isinstance(obj, list):\r\n for i,o in enumerate(obj):\r\n obj[i] = expand(o)\r\n elif isinstance(obj, dict):\r\n if 'paging' in obj:\r\n current = obj\r\n i = 0\r\n while 'next' in current['paging']:\r\n i += 1\r\n logger.info('...{}'.format(i))\r\n current = GraphQuery.request_until_success(\r\n current['paging']['next']\r\n )\r\n obj['data'].extend(current['data'])\r\n return obj", "def add(self, item):\r\n self.root = self.recurse_add(self.root, item)", "def add_object(self, obj):\n\t\tself.objects.append(obj)", "def addAll(self,*args, **kwargs):\n pass", "def add_ents(self, ents: Iterable['Entity']) -> None:\n ents = list(ents)\n self.entities.extend(ents)\n for item in ents:\n self.by_class[item['classname'].casefold()].add(item)\n self.by_target[item['targetname', ''].casefold() or None].add(item)\n if 'nodeid' in item:\n try:\n node_id = int(item['nodeid'])\n except (TypeError, ValueError):\n pass\n else:\n item['nodeid'] = str(self.node_id.get_id(node_id))", "def add(self, *objects):\n for obj in objects:\n if isinstance(obj, Resource):\n self.resources.append(obj)\n if self.name == '':\n self.name = self.resources[0].name\n elif isinstance(obj, Context):\n self.contexts.add(obj)\n elif isinstance(obj, Summary):\n self.summary = obj\n elif isinstance(obj, Results):\n self.results = obj\n else:\n raise TypeError('cannot add type {0} to check'.format(\n type(obj)), obj)\n return self", "def merge(self, list):\n for n in list:\n self.add_child(n)", "def add_objects(self,\n objects: list) -> None:\n\n for obj in objects:\n if obj[\"type\"] not in SUPPORTED_OBJECTTYPES_FOR_ADDING:\n raise ValueError(f\"object has unknown type {obj['type']}.\"\n f\"Currently only 'wel' is supported.\")\n\n if obj[\"type\"] == \"wel\":\n lay = obj[\"position\"][\"lay\"][\"result\"]\n row = obj[\"position\"][\"row\"][\"result\"]\n col = obj[\"position\"][\"col\"][\"result\"]\n pumping_rates = [obj[\"flux\"][flux_period][\"result\"] for flux_period in obj[\"flux\"]]\n\n self.add_well(lay=lay, row=row, col=col, pumping_rates=pumping_rates)", "def add_operations_from(self, obj):\n\n for name in dir(obj):\n op = getattr(obj, name)\n if isinstance(op, Operation):\n self.add_operation(op)", "def _populate_attributes(self, obj, traverse_list=True):\n for key, value in obj.__dict__.items():\n if isinstance(value, dict):\n obj.__dict__[key] = self._reconstruct_object(value)\n elif isinstance(value, list):\n obj.__dict__[key] = [self._reconstruct_object(details) for details in value]\n if traverse_list:\n # Iterate through each season in the list of seasons\n for season in obj.__dict__[key]:\n self._populate_attributes(season, traverse_list=False)", "def add_sources(self, obj):\n page = self.context['request'].GET.get('page', 1)\n per_page = settings.PAGINATE_VIEW_FEATURE\n if self.context.get('include_child_pages'):\n # Paginate the full descendant tree\n child_queryset = self.get_all_descendants(obj, per_page)\n paginated_child_features = Paginator(child_queryset, per_page)\n obj.page_child_features = paginated_child_features.page(page)\n obj.child_features = obj.page_child_features.object_list\n else:\n # Jut the row-level descendants, but un-paginated\n child_queryset = self.get_row_descendants(obj)\n obj.child_features = list(child_queryset.all())\n\n # Load the remaining related instances\n reference_pks = set(obj.references.values_list('id', flat=True))\n support_pks = set(obj.supports.values_list('id', flat=True))\n for feature in obj.child_features:\n reference_pks.update(\n feature.references.values_list('id', flat=True))\n support_pks.update(feature.supports.values_list('id', flat=True))\n\n obj.all_references = list(CachedQueryset(\n Cache(), Reference.objects.all(), sorted(reference_pks)))\n obj.all_supports = list(CachedQueryset(\n Cache(), Support.objects.all(), sorted(support_pks)))\n\n section_pks = set()\n for reference in obj.all_references:\n section_pks.add(reference.section.pk)\n obj.all_sections = list(CachedQueryset(\n Cache(), Section.objects.all(), sorted(section_pks)))\n\n specification_pks = set()\n for section in obj.all_sections:\n specification_pks.add(section.specification.pk)\n obj.all_specs = list(CachedQueryset(\n Cache(), Specification.objects.all(), sorted(specification_pks)))\n\n maturity_pks = set()\n for specification in obj.all_specs:\n maturity_pks.add(specification.maturity.pk)\n obj.all_maturities = list(CachedQueryset(\n Cache(), Maturity.objects.all(), sorted(maturity_pks)))\n\n version_pks = set()\n for support in obj.all_supports:\n version_pks.add(support.version.pk)\n obj.all_versions = list(CachedQueryset(\n Cache(), Version.objects.all(), sorted(version_pks)))\n\n browser_pks = set()\n for version in obj.all_versions:\n browser_pks.add(version.browser.pk)\n obj.all_browsers = list(CachedQueryset(\n Cache(), Browser.objects.all(), sorted(browser_pks)))", "def addChildren(self, values):\r\n for i, value in enumerate(values):\r\n newScope = copy(self.scope)\r\n newScope.index = i\r\n setattr(newScope, self.entryName, value)\r\n self.componentsLoader.loadAll(self.tokens, scope=newScope, onto=self.widget)", "def add_objects_from_layer(self, layer):\n\n objects = layer.get_allowed_geometry()\n\n typ_plural = layer.path[1]\n typ_sofi = gs.plural_to_sofi[typ_plural]\n\n for obj in objects:\n\n # !! REFACTOR TO CALL PROGRAMATICALLY -> ELIMINATE CONDITIONALS !!\n\n if typ_plural in gs.point_elements:\n\n self.add_node(obj, typ_sofi, layer)\n\n if typ_plural in gs.line_elements:\n\n self.add_line_element(obj, typ_sofi, layer)\n\n if typ_plural in gs.spring_elements:\n\n self.add_spring_sn(obj, typ_sofi, layer) \n\n if typ_plural in gs.area_elements:\n\n self.add_area_element(obj, typ_sofi, layer) \n\n return self", "def deep_append_tags(self, tags):\n\n self.append_tags(tags)\n for child in self.all_children():\n child.append_tags(tags)", "def _addChildren(self, pid, chunks):\n if chunks[pid].type in [0, -1]:\n self._addEntity(pid, chunks)\n else:\n self._addPredicate(pid, chunks)", "def sync_all_children_to_redis(self):\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(self.pk)\n # First, we make sure the key gets destroyed if it exists\n conn.delete(key)\n # Now we add the keys of the children to the list\n children = self.get_all_children_seq()\n for child in children:\n conn.lpush(key, child.pk)", "def append(self, tree):\n self.insert(len(self), tree)", "def loadObjectNodes(self):\n #print \"Trying to dynamically load objects from storage\"\n for name, module in self.object_modules.iteritems():\n #print \"Loading object names for object type: \" + name\n object_dir = os.path.join(self.save_location, name)\n #grab the object names from the filenames and use them to populate\n # the lists of objects\n if os.path.exists(object_dir) and os.listdir(object_dir) != []:\n self.objects[name] = [game_objects.ObjectUtilities.ObjectNode(self, partition(filename, '.')[0], module) for filename in os.listdir(object_dir)]\n self.objects[name].sort()\n\t else:\n\t\tself.objects[name] = []\n #print \"Object list:\"\n #for o in self.objects[name]:\n # print o\n #alert listeners to happy initialization\n self.sendODBEvent(ODBInitialize())", "def add(self, obj: model.IdentifiableArtefact):\n for field, field_info in direct_fields(self.__class__).items():\n # NB for some reason mypy complains here, but not in __contains__(), below\n if isinstance(\n obj, get_args(field_info.outer_type_)[1], # type: ignore [attr-defined]\n ):\n getattr(self, field)[obj.id] = obj\n return\n raise TypeError(type(obj))", "def add(obj):", "def SaveObjects(self):\n print \"Saving objects!\"\n for type, module in self.object_modules.iteritems():\n print \"Saving objects of type: %s\" % type\n for node in self.objects[type]:\n if node.modified:\n print \"\\tSaving %s - %s\" % (type, node.name)\n node.SaveObject()", "def add(self, obj: T) -> None:\n self._items.append(obj)\n self._size += 1", "def extend(self, panes: Iterable[Any]) -> None:\n new_objects, new_names = self._to_objects_and_names(panes)\n objects = list(self)\n objects.extend(new_objects)\n self._names.extend(new_names)\n self.objects = objects", "def update(self):\n map(lambda x: x.update(), self._children.values())", "def save_all(self, objects):\n self.session.add_all(objects)\n self.session.commit()", "def add_children(self, *args):\r\n self.children.extend(args)\r\n return self", "def postOrderMerge(self):\n\n seen = {}\n roots = self.roots()\n\n # Run the merge starting at each root. Note that seen\n # is presisted throughout.\n for root in roots:\n self.postOrderMergeHelper(root, seen)\n\n # Clean out any dead nodes\n self.clean()", "def add_to_space(self, *objects):\n for obj in objects:\n self.space.add(obj)\n if isinstance(obj, pm.Body):\n self.bodies.append(obj)\n elif isinstance(obj, pm.Shape):\n self.shapes.append(obj)\n elif isinstance(obj, pm.Constraint):\n pass\n else:\n raise TypeError(\n f\"don't know how to handle object '{obj}' of type \"\n f\"'{type(obj)}' in class '{type(self)}'\")", "def push_all(self, contacts):\n for ell in contacts:\n self.push(ell)", "def populate_cmdsets(job, cmdsets, depth):\n if len(cmdsets) < depth:\n cmdsets.append(set())\n cmdsets[depth-1].add(job.command)\n if len(job.dependencies) == 0:\n return cmdsets\n for j in job.dependencies:\n cmdsets = populate_cmdsets(j, cmdsets, depth+1)\n return cmdsets", "def add_sorted(self, val):\n if self.root is None:\n self.root = TreeNode(val)\n else:\n self._add_sorted(val, self.root)", "def merge_trees(data, new_data):\n for key, val in new_data.items():\n if isinstance(val, dict):\n if key not in data:\n data[key] = new_data[key]\n else:\n merge_trees(data[key], new_data[key])\n\n else:\n if key not in data:\n data[key] = val\n else:\n data[key] = data[key] + val", "def _fetchObjectChildren(self, obj, obj_path):\n obj_children = []\n path_strings = []\n tree_items = []\n\n is_attr_list = [False] * len(obj_children)\n\n # Object attributes\n # Needed to handle errors while getting object's attributes\n # Related with spyder-ide/spyder#6728 and spyder-ide/spyder#9959\n for attr_name in dir(obj):\n try:\n attr_value = getattr(obj, attr_name)\n obj_children.append((attr_name, attr_value))\n path_strings.append('{}.{}'.format(obj_path, attr_name)\n if obj_path else attr_name)\n is_attr_list.append(True)\n except Exception:\n # Attribute could not be get\n pass\n assert len(obj_children) == len(path_strings), \"sanity check\"\n\n for item, path_str, is_attr in zip(obj_children, path_strings,\n is_attr_list):\n name, child_obj = item\n tree_items.append(TreeItem(child_obj, name, path_str, is_attr))\n\n return tree_items", "def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)", "def add(self, obj):\n self._pkcache[obj.pk] = obj\n for ctype in obj._content_types:\n self._typecache[ctype][obj.pk] = True", "def _set_level_depth(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)", "def merge_object(self, obj):\n for key, value in obj.lines.items():\n if key not in self.lines:\n self.lines[key] = value\n self.lines[key] = self.lines[key] + value", "def copy_children(self):\n\n # Create a group\n self.fileh.create_group('/', 'agroup')\n # Create several objects there\n for i in range(10):\n # Create a new array\n self.fileh.create_array('/agroup', 'array' + str(i), self.a1)\n # Excercise copy_children\n for i in range(self.nobjects):\n # Create another group for destination\n self.fileh.create_group('/', 'anothergroup' + str(i))\n # Copy children from /agroup to /anothergroup+i\n self.fileh.copy_children('/agroup', '/anothergroup' + str(i))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print((\"Time for Undo, Redo (copy_children):\", undo, \"s, \",\n redo, \"s\"))", "def append( self, obj ):\n self[obj.getType()] = obj\n obj.setParent( self.parent )\n return obj", "def _place_objs(self, (screen_width, screen_height)):\n for x_pos in xrange(0, screen_width, self.itter_width):\n self.objects.put(Grass((x_pos, 0), self.width, self.height))", "def all_objects(self):\n objs = OrderedDict()\n objs[self.name] = self\n for o in self._sub_objs:\n if not o.enabled:\n continue\n objs.update(o.all_objects())\n return objs", "def add_children(self, *args):\r\n self._children.extend(args)\r\n return self", "def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i+1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i+1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host+1].properties['children'].append(i+1)\n except KeyError:\n pass", "def get_objects(dirname):\n\n objects = os.listdir(dirname)\n temp_map = []\n\n for obj in objects:\n\n fpath = os.path.join(dirname, obj)\n\n if fpath[0:2] == \"./\":\n fpath = fpath[2:]\n\n # if the object is a file, store it as a file\n if os.path.isfile(fpath):\n\n temp_map.append({\"name\": fpath,\n \"is_file\": True,\n \"children\": []})\n\n # else, assume the object is a directory\n else:\n\n children_map = get_objects(fpath)\n temp_map.append({\"name\": fpath,\n \"is_file\": False,\n \"children\": children_map})\n\n return temp_map", "def append(self, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.append(panel(obj))\n self.objects = new_objects", "def add_object(self, obj):\n self._objects.append(obj)", "def _class_list(parent, section, objects, refs):\n\n sec = etree.SubElement(parent, section, count=str(len(objects)))\n\n for cls, objs in _class_count(objects):\n obj = etree.SubElement(sec, \"Object\", type=cls, count=str(len(objs)))\n if refs:\n _class_list(obj, \"Referrers\", gc.get_referrers(*objs), False)", "def _addVisitor(self, type_obj, visitor_list):\n for v in visitor_list:\n type_obj.addVisitor(v)", "def make_tree(self, l):\n\t\tfor el in l:\n\t\t\tself.insert(el)", "def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i + 1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i + 1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host + 1].properties['children'].append(i + 1)\n except KeyError:\n pass", "def add(self, obj):\n self.objects.append(obj)\n if obj.gravity == 0:\n obj.gravity = self.gravity\n if obj.gravity_z == 0:\n obj.gravity_z = self.gravity_z", "def add_tags_recursive(self, tags2add: List[str]) -> None:\n self.tags += tags2add\n for data in self._child_data.values():\n data.add_tags_recursive(tags2add)", "def add_children(self, dep_var, parents):\n for parent in parents:\n if dep_var not in self.variables[parent].children:\n self.variables[parent].children.append(dep_var)", "def increment_depth(self):\r\n self.depth = self.depth + 1", "def append(self,object):\n if isinstance(object,Portfolio):\n self.portfolios_set.append(object)\n for fund in object.funds:\n self.funds_set.append(fund)\n else:\n self.funds_set.append(object)", "def _refresh_registry(cls) -> None:\n cls.objects_dict.clear()\n\n # Add new object instances to the registry.\n for name, clazz in inspect.getmembers(\n objects, predicate=inspect.isclass):\n if name == 'BaseObject':\n continue\n\n ancestor_names = [\n base_class.__name__ for base_class in inspect.getmro(clazz)]\n\n assert 'BaseObject' in ancestor_names\n cls.objects_dict[clazz.__name__] = clazz", "def add(self, *paths):\n\n for path in paths:\n self._add_one(path)", "def push(self,obj):\n self.head = Node(obj,0,self.head)", "def append(self, obj):\r\n self.record_count += 1\r\n \r\n if type(obj) == dict:\r\n self._probe_record(obj)\r\n else:\r\n self._probe_row(obj)", "def add_all(self, *values):\n for value in values:\n self.add(value)", "def add(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n path = os.path.normpath(os.path.relpath(path, args.base))\n if path not in args.cache:\n args.cache.append(path)\n args.update = True\n return", "def merge_objects(self, mujoco_objects):\n self.n_objects = len(mujoco_objects)\n self.mujoco_objects = mujoco_objects\n self.objects = [] # xml manifestation\n self.max_horizontal_radius = 0\n for obj_name, obj_mjcf in mujoco_objects.items():\n self.merge_asset(obj_mjcf)\n # Load object\n obj = obj_mjcf.get_collision(name=obj_name, site=True)\n obj.append(new_joint(name=obj_name, type=\"free\", damping=\"0.0005\"))\n self.objects.append(obj)\n self.worldbody.append(obj)\n\n self.max_horizontal_radius = max(\n self.max_horizontal_radius, obj_mjcf.get_horizontal_radius()\n )", "def make_recursive(obj):\n if isinstance(obj, list):\n for i, l in enumerate(obj):\n obj[i] = AttrDict.make_recursive(l)\n elif isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = AttrDict.make_recursive(v)\n return AttrDict(obj)\n return obj", "def append(self, obj):\r\n raise NotImplementedError", "def get_nested_structs(*objs):\n\n deps = []\n for obj in objs:\n if isinstance(obj, schema.File):\n deps.extend(get_nested_structs(*obj.struct_list))\n deps.extend(get_nested_structs(*obj.typespace_list))\n deps.extend(get_nested_structs(*obj.trait_list))\n elif isinstance(obj, schema.SchemaObjectList):\n deps.extend(get_nested_structs(*obj))\n elif isinstance(obj, schema.Schema):\n deps.extend(get_nested_structs(*obj.vendor_list))\n elif isinstance(obj, schema.Vendor):\n deps.extend(get_nested_structs(*obj.struct_list))\n deps.extend(get_nested_structs(*obj.typespace_list))\n deps.extend(get_nested_structs(*obj.trait_list))\n elif isinstance(obj, schema.Trait):\n deps.extend(get_nested_structs(*obj.struct_list))\n elif isinstance(obj, schema.Typespace):\n deps.extend(get_nested_structs(*obj.struct_list))\n elif isinstance(obj, schema.Struct):\n deps.append(obj)\n return deps", "def reindexObject( self, object, idxs=[], recursive=None ):\n obs = [ object ]\n if recursive:\n obs.extend( getContainedObjects(object) )\n for ob in obs:\n #portal_log( self, 'CatalogTool', 'reindexObject', 'path', ( ob.physical_path(), idxs ) )\n _CatalogTool.reindexObject( self, self.wrapOb(ob), idxs )", "def add_node(self, node: Node) -> None:\n with scandir(node.path) as it:\n for entry in it:\n if entry.name.startswith('.') or entry.name.startswith('__'):\n continue\n if entry.is_dir():\n if len(node.children) > 50:\n pass\n else:\n node.children.append(Node(node, entry))\n else:\n node.files.append(entry)\n for child in node.children:\n self.add_node(child)\n if child.depth > self.depth:\n self.depth = child.depth", "def entries(self, data, depth=0):\n return self._entries(self.map(data))", "def _add_rooms(self):\r\n rooms = self.model.get_all_rooms()\r\n\r\n for room in rooms:\r\n self._add_room(room)", "def find_nearby_nodes_bf_graph(self, objs, dep_limit = 2, output = {}, obj_limit = 100):\n\t\tself.layers+=1\n\t\tif self.layers > dep_limit or len(objs) == 0:\n\t\t\tself.root_logger.info('LAYER ' + str(self.layers - 1) + ' DONE\\n')\n\t\t\tif len(objs) == 0:\n\t\t\t\tself.root_logger.info('ALL CONNECTED OBJECTS FOUND')\n\t\t\telse:\n\t\t\t\tself.layers -= 1\n\t\t\treturn output\n\t\t\n\t\tif (self.layers == 1):\n\t\t\toutput = {}\n\t\t\ti = self.get_node_info(objs[0].split()[1], objs[0].split()[0])\n\t\t\toutput[0] = {'pointers_from': [], 'type': objs[0].split()[0], 'id': objs[0].split()[1], 'name': i[0], 'status': i[1], 'deleted': i[2], 'type_full': i[3]}\n\t\t\tself.existing_nodes[objs[0]] = 0\n\t\telse:\n\t\t\tself.root_logger.info('LAYER ' + str(self.layers - 1) + ' DONE. SEARCHING LAYER ' + str(self.layers) + '...\\n')\n\t\tworking_objects = []\n\t\tfor obj in objs:\n\t\t\tcurrent = self.existing_nodes[obj]\n\t\t\tsuccess_counter = len(output) - 1 - current # used to keep track of next available index\t\n\t\t\tparts = obj.split() # each obj in the list stored as \"objecttype objectid\" (delimited by a space)\n\t\t\tfor obj_type in self.pointers_to[obj.split()[0]]: # adding parent nodes that the current object points to\n\t\t\t\tif obj_type == 'order_' or obj_type == 'user_': #since ids stored under user_id and order_id fields\n\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"id' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\telse:\n\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"_id' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\n\t\t\t\tself.queries[sql_query] = True\n\t\t\t\tself.cur.execute(sql_query)\n\n\t\t\t\tresult = self.cur.fetchall()\n\t\t\t\t\n\n\t\t\t\tif len(result) != 0 and result[0][0] != None and (obj_type + \" \" + result[0][0]) in self.existing_nodes:\n\t\t\t\t\tif self.existing_nodes[obj] not in output[self.existing_nodes[obj_type + \" \" + result[0][0]]]['pointers_from']:\n\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + result[0][0]]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\telif len(result) != 0 and result[0][0] != None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tinfo = self.get_node_info(result[0][0], obj_type, pointer = obj)\n\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + result[0][0])\n\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\tif (current + success_counter) in output:\n\t\t\t\t\t\t\toutput[current + success_counter]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [self.existing_nodes[obj]], 'id': result[0][0], 'type': obj_type, 'name': info[0], 'status': info[1], 'deleted': info[2], 'type_full': info[3]}\n\n\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\treturn output\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpass\n\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif obj_type == 'user_' or obj_type == 'order_':\n\t\t\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"ids' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"_ids' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\tself.cur.execute(sql_query)\n\t\t\t\t\t\t\n\t\t\t\t\t\tresults = json.loads(self.cur.fetchall()[0][0]).keys()\n\t\t\t\t\t\tself.queries[sql_query] = True\n\t\t\t\t\t\tfor r in results:\n\t\t\t\t\t\t\tif (obj_type + \" \" + r) in self.existing_nodes:\n\t\t\t\t\t\t\t\tif self.existing_nodes[obj] not in output[self.existing_nodes[obj_type + \" \" + r]]['pointers_from']:\n\t\t\t\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + r]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tinformation = self.get_node_info(r, obj_type, pointer = obj)\n\t\t\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + r)\n\t\t\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\t\t\tif (current + success_counter) in output:\n\t\t\t\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + r]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [self.existing_nodes[obj]], 'id': r, 'type': obj_type, 'name': information[0], 'status': information[1], 'deleted': information[2], 'type_full': information[3]}\n\t\t\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\t\t\treturn output\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpass\n\t\t\ttry: #adding child nodes that point to the object\n\t\t\t\tfor obj_type in self.pointed_to_by[obj.split()[0]]:\n\t\t\t\t\tif parts[0] == 'user_' or parts[0] == 'order_':\n\t\t\t\t\t\tsql_query = \"SELECT obj->>'id', obj->>'name', obj->>'status', obj->>'deleted', obj->>'type_full' FROM \" + obj_type + \" WHERE obj->>'\" + parts[0] + \"id'='\" + parts[1] + \"'\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tsql_query = \"SELECT obj->>'id', obj->>'name', obj->>'status', obj->>'deleted', obj->>'type_full' FROM \" + obj_type + \" WHERE obj->>'\" + parts[0] + \"_id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\n\t\t\t\t\tself.cur.execute(sql_query)\n\t\t\t\t\tresults = self.cur.fetchall()\n\t\t\t\t\tself.queries[sql_query] = True\n\n\t\t\t\t\tfor r in results:\n\t\t\t\t\t\tif r[0] != None:\n\t\t\t\t\t\t\tif (obj_type + \" \" + r[0]) in self.existing_nodes:\n\t\t\t\t\t\t\t\tif self.existing_nodes[obj_type + \" \" + r[0]] not in output[current]['pointers_from']:\n\t\t\t\t\t\t\t\t\toutput[current]['pointers_from'].append(self.existing_nodes[obj_type + \" \" + r[0]])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + r[0])\n\t\t\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\t\t\toutput[current]['pointers_from'].append(current + success_counter)\n\t\t\t\t\t\t\t\tif (current + success_counter) not in output:\n\t\t\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [], 'id': r[0], 'type': obj_type, 'name': r[1], 'status': r[2], 'deleted': r[3], 'type_full': r[4]}\n\t\t\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\t\t\treturn output\n\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\n\t\treturn self.find_nearby_nodes_bf_graph(working_objects, dep_limit, output, obj_limit)", "def add(self, obj: object) -> None:\n self._contains.append(obj)", "def create_object(self):\n i = 0\n for i in range(0, self.objects_numbers):\n self.list_objects.insert(i, Obj(self, i))", "def push(self, obj):\n pass", "def load_recursive_state_dict(x, obj):\n if hasattr(obj, 'load_state_dict'):\n obj.load_state_dict(x)\n if isinstance(x, (tuple, list)):\n for xx, oo in zip(x, obj):\n load_recursive_state_dict(xx, oo)\n if isinstance(x, dict):\n for k in objs.keys():\n load_recursive_state_dict(xx[k], oo[k])", "def traverse(self, recursive=False):\n out = []\n for i in range(len(self.keys)):\n if recursive == True and self.refs[i] != None:\n out.extend(self.refs[i].traverse(recursive=True))\n out.append[self.values[i]]\n if recursive == True:\n out.extend(self.refs[i+1].traverse(recursive=True))\n return out" ]
[ "0.61764467", "0.60474694", "0.57324225", "0.56951404", "0.55937594", "0.55879956", "0.558788", "0.54297394", "0.5391662", "0.53283435", "0.53044546", "0.52996117", "0.5273503", "0.5259931", "0.52462256", "0.5240757", "0.52057797", "0.51749694", "0.5160668", "0.5155546", "0.5145461", "0.510169", "0.5095795", "0.5082552", "0.5081789", "0.5071895", "0.5062649", "0.5053789", "0.5050436", "0.4997458", "0.49870348", "0.49843118", "0.49834138", "0.49679562", "0.49227992", "0.4922334", "0.4915983", "0.49096182", "0.4908442", "0.4905806", "0.4899865", "0.48854363", "0.48815277", "0.48694733", "0.48646522", "0.48508516", "0.48475778", "0.48461157", "0.484522", "0.48434693", "0.484113", "0.48393366", "0.48390132", "0.48271486", "0.4825601", "0.48228985", "0.48135108", "0.48077482", "0.48069602", "0.4802225", "0.47996798", "0.47940728", "0.47784066", "0.47756186", "0.4773948", "0.4770314", "0.47658736", "0.47585946", "0.47536635", "0.47506773", "0.47490752", "0.47467935", "0.4741791", "0.47404894", "0.47317046", "0.4727154", "0.47195953", "0.4706162", "0.4705516", "0.47023305", "0.46993312", "0.46971962", "0.46962547", "0.46909252", "0.46908966", "0.46810976", "0.4674463", "0.4671212", "0.46633407", "0.4663196", "0.4662346", "0.465981", "0.4656733", "0.46553877", "0.46553788", "0.4641541", "0.46415365", "0.46391717", "0.4610883", "0.4606166" ]
0.76109475
0
Returns the entries as a key => value dict.
def as_dict(self): return dict((key, value) for key, value, depth in self.entries.itervalues())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d", "def GetMap(entries):\n map = {}\n for entry in entries:\n map[entry['key']] = entry['value']\n return map", "def getitems(self):\n return {k:self.get(k) for k in self.keys}", "def asDictionary (self) -> Dictionary:\n\n Logging.trace(\">>\")\n result = dict(self._keyToStringValueMap)\n Logging.trace(\"<<: %r\", result)\n return result", "def to_dict(self):\n return {key: getattr(self, key) for key in self.keys}", "def asPyDict(self):\n fieldDict = dict()\n for kvp in self.keyvaluepair_set.all():\n fieldDict[kvp.key] = kvp.value\n return fieldDict", "def items(self):\n return ((key, value) for (key, value) in zip(self.__keys, self.__vals))", "def _as_dict(self):\n return dict(self.items())", "def to_dict(self) -> Dict[str, Any]:\n\n data = self._entry.to_dict()\n del data[\"item-hash\"]\n data[\"item\"] = [self._blob.to_dict()]\n\n return data", "def items(self):\n return [(kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all()]", "def _tuples_to_dict(self, tuples):\n d = {}\n for key, value in tuples:\n d[key] = value\n return d", "def entry_dict(cls, feed_entry):\n return {\n 'id': feed_entry['id'],\n 'link': feed_entry['link'],\n 'published': pd.to_datetime(feed_entry['published']),\n 'title': feed_entry['title'],\n }", "def get_dict(self):\n return {key: value for key, value in zip(self._words, self._vecs)}", "def as_dict(self):\n for k, v in zip(self._input_names, self._flattened_inputs):\n yield k, v", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def as_dict(self):\n return dict(self.items())", "def lstToDict(key, value):\n return dict(zip(key, value))", "def dict() -> Dict[str, Pin]:", "def to_dict(self):\r\n try:\r\n # Create the dictionary, converting each attribute to a\r\n # string.\r\n dict_entry = {}\r\n dict_entry[\"id\"] = str(self.id)\r\n dict_entry[\"title\"] = str(self.title)\r\n dict_entry[\"date\"] = str(self.date)\r\n dict_entry[\"time\"] = str(self.time)\r\n dict_entry[\"datetime\"] = str(self.datetime)\r\n dict_entry[\"duration\"] = str(self.duration)\r\n dict_entry[\"notes\"] = str(self.notes)\r\n dict_entry[\"recurring\"] = str(self.recurring)\r\n dict_entry[\"rec_interval\"] = (\r\n io_utils.build_dict_string(self.rec_interval))\r\n dict_entry[\"rec_total\"] = str(self.rec_total)\r\n dict_entry[\"rec_child_seq\"] = str(self.rec_child_seq)\r\n dict_entry[\"rec_parent\"] = str(self.rec_parent)\r\n dict_entry[\"info\"] = io_utils.build_dict_string(self.info)\r\n return dict_entry\r\n except Exception as err:\r\n _z_exc(\"logentry.py/to_dict\", err)\r\n # end try\r", "def entries(self):\n if self.preload_metadata and not self._entries:\n self._entries = dict((self._decode_name(entry.key), entry)\n for entry in self.bucket.list())\n return self._entries", "def asdict():\n pass", "def _to_dict_tree(self):\n return DictTree(self.entries)", "def items(self):\n return list(zip(self.keys(), self.values()))", "def items(self):\n return list(zip(self.keys(), self.values()))", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def _dictfetchall(self):\n return [dict(zip([col[0] for col in self.cursor.description], row)) \\\n for row in self.cursor.fetchall()]", "def make_dict(keys, values):\n\n return dict(zip(keys, values))", "def dictionary(self):\n data = {}\n for i, col in enumerate(self.columns):\n key = col.get_display_tag(sort_attributes=True)\n if key and (not key in data) and (i < len(self.values)):\n data[key] = self.values[i]\n return data", "def from_thread_result_to_dictionary(returned_result):\n keys = []\n values = []\n\n for returned_result_item in returned_result:\n keys.append(returned_result_item[0])\n values.append(returned_result_item[1])\n\n dictionary = dict(zip(keys, values))\n return dictionary", "def value_map(self):\n return {attr: val for attr, val in zip(self.__slots__, self._values(to_str=True))}", "def as_dict(self) -> Dict[str, Any]:\n return {\n column_title: cell.get_value()\n for column_title, cell in self.column_title_to_cell.items()\n }", "def values(self) -> Dict[str, Any]:\n return self.dict.copy()", "def to_dict(self) -> dict:", "def GetDict(self):\n # CL actions are be stored in self._cl_action_list instead of\n # in self._metadata_dict['cl_actions'], because _cl_action_list\n # is potentially a multiprocess.lis. So, _cl_action_list needs to\n # be copied into a normal list.\n temp = self._metadata_dict.copy()\n temp['cl_actions'] = list(self._cl_action_list)\n\n # Similarly, the per-board dicts are stored in a flattened form in\n # _per_board_dict. Un-flatten into nested dict.\n per_board_dict = {}\n for k, v in self._per_board_dict.items():\n board, key = k.split(':')\n board_dict = per_board_dict.setdefault(board, {})\n if key:\n board_dict[key] = v\n\n temp['board-metadata'] = per_board_dict\n return temp", "def asStringMap (self) -> StringMap:\n\n Logging.trace(\">>\")\n result = dict(self._keyToValueMap)\n Logging.trace(\"<<: %r\", result)\n return result", "def get_values_from_dict(self, values):\n return dict()", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def items(self):\n return [(k, self[k]) for k in self.keys()]", "def __create_level_entries_dict__(self,\n tree_level_labels,\n tree_level_values,\n ):\n # | - create_level_entries_dict\n level_entries_dict = {}\n for index, variable in enumerate(tree_level_labels):\n level_entries_dict[variable] = tree_level_values[index]\n\n return(level_entries_dict)\n # __|", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def items(self):\n return [(key, self[key]) for key in self.keys()]", "def tag_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT keyword \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.tag_dict.values()", "def dict(self):\r\n d = {\r\n \"key\": self.field,\r\n \"value_count\": self.value_count,\r\n \"record_count\": self.record_count,\r\n \"value_ratio\": self.value_ratio,\r\n \"storage_types\": list(self.storage_types),\r\n \"null_count\": self.null_count,\r\n \"null_value_ratio\": self.null_value_ratio,\r\n \"null_record_ratio\": self.null_record_ratio,\r\n \"empty_string_count\": self.empty_string_count,\r\n \"unique_storage_type\": self.unique_storage_type\r\n }\r\n\r\n if self.distinct_overflow:\r\n d[\"distinct_overflow\"] = self.distinct_overflow,\r\n d[\"distinct_values\"] = []\r\n else:\r\n d[\"distinct_values\"] = list(self.distinct_values)\r\n\r\n return d", "def to_dict(self):\n data = {}\n for key, value in self.__dict__.items():\n try:\n data[key] = value.to_dict()\n except AttributeError:\n data[key] = value\n return data", "def get_key_values(self):\n return self.key_values", "def to_dict(self) -> List[Dict[str, Any]]:\n return [x.to_dict() for x in self.inputs]", "def as_dict(self, ordered=False):\n if ordered:\n return OrderedDict(zip(self.keys, self.values))\n return dict(zip(self.keys, self.values))", "def iteritems(self):\n return iter((kvp.key, kvp.value) for kvp in self.keyvaluepair_set.all())", "def asdict(self) -> dict[str, Any]:\n return {\n w.name: getattr(w, \"value\", None)\n for w in self._list\n if w.name and not w.gui_only\n }", "def as_dict(self) -> Dict[str, Any]:\n return {\n column_title: cell.value\n for column_title, cell in self.column_title_to_cell.items()\n }", "def _as_dict(self):\n local = dict((key, value) for key, value in self)\n joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])\n local.update(joined)\n return local", "def to_dictionary(self):\n return {'pubkey': self.pubkey.to_dictionary(), 'T': self.T,\n 'C': self.C.to_dictionary(), 'D': self.D.to_dictionary(), 'sigma': self.sigma.to_dictionary()}", "def convert(data):\n return {k: [d[k] for d in data] for k in data[0].keys()}", "def to_dict(cls):\n return dict((item.name, item.number) for item in iter(cls))", "def values(self):\n return [self[name] for name in self.keys()]", "def as_dict(self):\r\n return {self.words[i]: self.vectors[i] for i in range(self.n)}", "def entries():\n\n\treturn [entry.value for entry in db.session.query(Entry).all()]", "def as_dict(self):\n d = {}\n for name, competition, sid in self.get_queryset().values_list('name', 'competition', 'id'):\n d[(name, competition)] = sid\n return d", "def resultsToArray(self):\n data = {}\n for item in self.data_array:\n data[item[0]] = [item[1]]\n return data", "def as_dict(self):\n return {k: v for k, v in vars(self).items()}", "def list(self):\n return {\n k: json.loads(v)\n for k, v in iteritems(self._db.hgetall(self.index))\n }", "def row_to_dict(keys):\n return lambda row: dict(izip(keys, row))", "def values(self) -> Dict[str, Any]:\n pass", "def values(self):\n return [self[k] for k in self.keys()]", "def prettify_invites(self):\n return {k: dict(v) if isinstance(v, defaultdict) else v for (k, v) in self.cached_invites.items()}", "def to_dict(self):", "def _as_dict(self):\r\n local = dict((key, value) for key, value in self)\r\n joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)\r\n if not k[0] == '_'])\r\n local.update(joined)\r\n return local", "def as_dict(self):\n return dict(self.as_OD())", "def dict_values(self):\n return self.__dict__", "def to_dictionary(self):\n dict_contents = [\"id\", \"size\", \"x\", \"y\"]\n new_dict = {}\n for key in dict_contents:\n new_dict[key] = getattr(self, key)\n return new_dict", "def valuemap(self):\n\n values = self.qualifiers.get('values')\n valuemap = self.qualifiers.get('valuemap')\n\n final_mapping = {}\n if values and valuemap:\n raw_mapping = dict(zip(valuemap, values))\n final_mapping = {}\n for raw_key, raw_value in raw_mapping.items():\n final_mapping[raw_key.strip()] = raw_value.strip()\n return final_mapping", "def as_dict(self):\n return {key: value for key,value in self.address.items()}", "def convertToDict(self): \n out = dict()\n out[\"Value\"] = self.value \n out[\"Odds\"] = self.odds \n out[\"Path\"] = self.path\n out[\"Curated\"] = self.curated \n out[\"Edit Distance\"] = self.edit_distance \n out[\"Edit Distance Stem\"] = self.edit_distance_stem\n out[\"Source ID\"] = self.source_id\n out[\"Match\"] = self.match \n out[\"Offset Start\"] = self.offset_start \n out[\"Offset End\"] = self.offset_end\n return out", "def to_dict(self, deep=True):\n return {var: data for var, data in self.items(deep)}", "def pairs_as_dict(pairs: Iterable[Tuple[Any, Any]]) -> Dict[Any, List[Any]]:\n d = defaultdict(list)\n for p in pairs:\n d[p[0]].append(p[1])\n return d", "def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1", "def items(self):\n return zip(self._keys, self._values)", "def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict", "def items(self):\n acc = []\n for k in self.keys():\n pm = self._maps[k]\n acc.append((k,pm))\n return acc", "def get_many(self, keys: Iterable, version: Optional[int] = None) -> Dict[str, Any]:\n d = {}\n for k in keys:\n val = self.get(k, version=version)\n if val is not None:\n d[k] = val\n return d", "def todict(self):\n d = dict()\n for k in self:\n v = dict.__getitem__(self, k)\n if isinstance(v, udict):\n v = v.todict()\n d[k] = v\n return d", "def convertToDict(self): \n out = dict() \n out[\"Title\"] = self.title \n editions = []\n for edition in self.editions.values(): \n editions.append(edition.convertToDict(withTitle = False))\n out[\"Editions\"] = editions\n return out", "def make_dict(unused_s, unused_l, toks):\n result = {}\n key_value_pairs = chunks(toks, 2)\n for key_value_pair in key_value_pairs:\n result[key_value_pair[0]] = key_value_pair[1]\n return result", "def get_entries(order):\n users_entries = {}\n for item in order.items.all():\n entries_per_order = []\n entries = Entries.objects.filter(orderItem=item.id)\n for ent in entries:\n entries_per_order.append(ent.ticket_number)\n n_order = {\n item.id: entries_per_order\n }\n users_entries.update(n_order)\n return users_entries", "def as_dict(self):\n rv = {\n 'id': self.id,\n 'name': self.name,\n 'contributes': self.contributes,\n 'hint': self.hint,\n 'values': [],\n }\n for value in self.values:\n if isinstance(value, GroupingComponent):\n rv['values'].append(value.as_dict())\n else:\n # this basically assumes that a value is only a primitive\n # and never an object or list. This should be okay\n # because we verify this.\n rv['values'].append(value)\n return rv", "def to_listing_dict(self) -> dict:\n data = super().to_listing_dict()\n return data", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def values(self):\n return [self[key] for key in self.keys()]", "def return_as_dictionary(self):\n out_put_dict = {}\n out_put_dict['productCode'] = self.product_code\n out_put_dict['description'] = self.description\n out_put_dict['marketPrice'] = self.market_price\n out_put_dict['rentalPrice'] = self.rental_price\n\n return out_put_dict", "def as_dict(self):\n return dict(self._d)", "def read_keyValues():\n # Create the list of CIs from our data\n ci = db.session.query(CI).order_by(CI.id).all()\n app.logger.debug(pformat(ci))\n # Serialize the data for the response\n ci_schema = CISchema(many=True)\n data = ci_schema.dump(ci)\n keyValues = []\n for d in data:\n keyValuePair = {}\n keyValuePair[\"key\"] = d.get(\"id\")\n keyValuePair[\"value\"] = d.get(\"value\")\n keyValues.append(keyValuePair)\n print(keyValues)\n return keyValues", "def dicts(self, value=None):\n if value is None:\n return [dict(zip(self.keys, line)) for line in self.data]\n return [dict(zip(self.keys, line)) for line in self.data if value in line]", "def metadata(self):\n self.data_as_dict = {}\n for ele in self.data:\n self.data_as_dict[ele.name] = ele.value\n return self.data_as_dict", "def as_dict(self):\n d = {\n 'name': self.name,\n 'description': self.description,\n 'reset': self.reset,\n 'width': self.width,\n 'lsb': self.lsb,\n 'access': self.access,\n 'hardware': self.hardware,\n 'enums': [enum.as_dict() for enum in self.enums]\n }\n d.update(self.etc)\n return d", "def to_python(self):\r\n mapping = {}\r\n for row in self.rows:\r\n mapping[row[0]] = _format_python_value(row[1])\r\n return mapping", "def values(self) -> Dict[str, Any]:\n all_values = {}\n for name in self.names():\n idx = self.hyperparams[name][1]\n hp_type = self.hyperparams[name][0]\n if hp_type == 'object':\n all_values[name] = self.hyperparams[name][2][idx]\n else:\n all_values[name] = idx\n\n return all_values", "def to_dict_query(self) -> list:\n return [row.to_dict() for row in self.all()]", "def lists(self):\n return dict.items(self)", "def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d" ]
[ "0.7613547", "0.7377054", "0.67987955", "0.63767034", "0.6352516", "0.6342387", "0.63202107", "0.6266719", "0.62579256", "0.6247313", "0.623333", "0.6207049", "0.62052894", "0.61866677", "0.61613494", "0.61424756", "0.613092", "0.61234504", "0.6120027", "0.6097869", "0.60933405", "0.6086907", "0.6079275", "0.6079275", "0.6063583", "0.6059403", "0.6043224", "0.60295224", "0.60128665", "0.59941995", "0.59821993", "0.5968159", "0.59581333", "0.59544605", "0.59512913", "0.5943126", "0.5933928", "0.59280246", "0.5926065", "0.5915037", "0.5915037", "0.5890634", "0.588684", "0.58823997", "0.58778995", "0.58771956", "0.58722275", "0.586911", "0.5867071", "0.58670235", "0.58642614", "0.5860705", "0.5860362", "0.58572567", "0.5851346", "0.58510196", "0.58472073", "0.58444625", "0.5841231", "0.5838496", "0.5834974", "0.5832081", "0.58315784", "0.582132", "0.5820823", "0.5818756", "0.5817994", "0.58169436", "0.5816026", "0.5806632", "0.58059245", "0.5805226", "0.58031213", "0.5801629", "0.5799083", "0.5789787", "0.57897013", "0.57796866", "0.5778975", "0.57694924", "0.57686824", "0.57651585", "0.5761838", "0.57615584", "0.5761423", "0.5755515", "0.5750983", "0.5750983", "0.5750983", "0.5745134", "0.5742267", "0.5736512", "0.5734631", "0.5734275", "0.57309884", "0.5720787", "0.57158124", "0.5713441", "0.5712022", "0.57049704" ]
0.73771703
1
This appears to be totally unused, and leftover from testing. It may, in fact, be better/more efficient than the used walk_tree() method.
def work_tree2(obj, **kwargs): if 'exclusions' in kwargs: exclusions = kwargs['exclusions'] else: exclusions = Exclusions([], [], []) #groups_done = {} classes = NodeResults(nodetype='classes') params = NodeResults(nodetype='params') if hasattr(obj, 'hostname') and not hasattr(obj, 'name'): obj.name = obj.hostname to_index = [(obj, 1)] # loop opts index_pop = to_index.pop index_extend = to_index.extend egroups, eclasses, eparams = exclusions add_classes = classes.add_entries add_params = params.add_entries while to_index: (obj, depth) = index_pop() #objname = obj.name #if objname in groups_done and groups_done[objname] <= depth: #continue try: objclasses = obj.classes.exclude(classname__in=eclasses) add_classes(objclasses, "classname", "classparams", depth) objparams = obj.parameters.exclude(paramkey__in=eparams) add_params(objparams, "paramkey", "paramvalue", depth) except RuntimeError, e: return ("Fail", "Fail") # or just let it bubble up to the caller #groups_done[objname] = depth depth += 1 children = [(group, depth) for group in obj.groups.exclude(name__in=egroups)] index_extend(children) return classes.as_dict(), params.as_dict() # or (classes.entries, params.entries)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n files = [\n (os.lstat(os.path.join(root, fi)).st_size, os.path.join(root, fi), os.lstat(os.path.join(root, fi)).st_ino) for fi\n in files if (os.lstat(os.path.join(root, fi)).st_size > self.size)]\n self.fileList.extend(files)\n if len(self.excludeList) > 0:\n dirnames[:] = [dir for dir in dirnames if dir not in self.excludeList]\n if not self.cross_mount_points:\n dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))]", "def _traverse_tree(self):\n if not self.children:\n yield self\n for child in self.children:\n yield from child._traverse_tree()", "def test_render_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n node, expect, withtags = self.tree_case_1()\n actual = render_tree(node, get_children)\n assert expect == actual, (expect, actual)\n\n node, expect, withtags = self.tree_case_2()\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call render_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in render_tree (visited was set to {} as default\n # parameter)\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)", "def walk(node):\r\n from collections import deque\r\n todo = deque([node])\r\n while todo:\r\n node = todo.popleft()\r\n todo.extend(iter_child_nodes(node))\r\n yield node", "def walk(self):\n pass", "def test_Tree():", "def walk(d):\n for parent, key, leaf in _walk({}, None, d):\n yield (d, parent, key, leaf)", "def improve_tree(tree, freq_dict):\n # todo", "def traverse_depth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extendleft(reversed(node.children))", "def walkTree(self):\n if self.parentId:\n print self.parentId, self.id, self.value\n for child in self.children.itervalues():\n child.walkTree()", "def test_print_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n save_stdout = sys.stdout\n\n try:\n node, expect, withtags = self.tree_case_1()\n\n IOStream = io.StringIO\n sys.stdout = IOStream()\n print_tree(node, get_children)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n\n # Test that explicitly setting prune to zero works\n # the same as the default (see above)\n node, expect, withtags = self.tree_case_2(prune=0)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 0)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 0, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n\n # Test output with prune=1\n node, expect, withtags = self.tree_case_2(prune=1)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 1)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call print_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in print_tree (visited was set to {} as default\n # parameter)\n sys.stdout = IOStream()\n print_tree(node, get_children, 1)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 1, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n finally:\n sys.stdout = save_stdout", "def traverse_tree(self, root):\n\n\t\tself.pre_stage()\n\t\troot.visit(self)\n\t\tself.post_stage()", "def traverse(self):\n if self.root is None:\n return ''\n return self.root.traverse()", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def file_walker(root,**kwargs):\n\n # Get our keyword argunents, and do some initialization.\n max_depth=kwargs.get('depth',None)\n if max_depth==None:\n max_depth=sys.maxsize # I don't think we'll hit this limit in practice.\n follow_links=kwargs.get('follow_links',True)\n prune=compile_filename_patterns(kwargs.get('prune',[]))\n ignore=compile_filename_patterns(kwargs.get('ignore',[]))\n report_dirs=kwargs.get('report_dirs',False)\n if report_dirs not in (False,True,'first','last'):\n raise ValueError(\"report_dirs=%r is not one of False, True, 'first', or 'last'.\"%(report_dirs,))\n stack=[(0,root)] # Prime our stack with root (at depth 0).\n been_there=set([os.path.abspath(os.path.realpath(root))])\n dir_stack=[] # Stack of paths we're yielding after exhausting those directories.\n\n while stack:\n depth,path=stack.pop()\n if report_dirs in (True,'first'):\n yield path+os.sep\n elif report_dirs=='last':\n dir_stack.append(path+os.sep)\n flist=os.listdir(path)\n flist.sort()\n dlist=[]\n # First, let the caller iterate over these filenames.\n for fn in flist:\n p=os.path.join(path,fn)\n if os.path.isdir(p):\n # Just add this to this path's list of directories for now.\n dlist.insert(0,fn)\n continue\n pat,mat=first_match(fn,ignore)\n if not pat:\n yield p\n # Don't dig deeper than we've been told to.\n if depth<max_depth:\n # Now, let's deal with the directories we found.\n for fn in dlist:\n p=os.path.join(path,fn)\n # We might need to stack this path for our fake recursion.\n if os.path.islink(p) and not follow_links:\n # Nope. We're not following symlinks.\n continue\n rp=os.path.abspath(os.path.realpath(p))\n if rp in been_there:\n # Nope. We've already seen this path (and possibly processed it).\n continue\n m=None\n pat,mat=first_match(fn,prune)\n if pat:\n # Nope. This directory matches one of the prune patterns.\n continue\n # We have a keeper! Record the path and push it onto the stack.\n been_there.add(rp)\n stack.append((depth+1,p))\n while dir_stack:\n yield dir_stack.pop()", "def walk(node):\n\n traversed_nodes.append(node)\n \n # Do something with node value...\n print node.value\n\n # Recurse on each child node\n for child_node in node.child_nodes:\n if child_node not in traversed_nodes:\n walk(child_node)", "def test_list_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def visit(self):\n self.tree = self.recursive_visit(self.tree)\n # assert self.current_line == self.tree.absolute_bounding_box.bottom_right.line", "def tree_probe(self, **kwargs):\n\n def nextSpinner(b_cursorToNextLine):\n \"\"\"Provide a rotating spinner to indicate activity by using a closure.\n\n Returns:\n inner : inner function\n \"\"\"\n spinner = '\\\\|/-'\n pos = 0\n def inner(b_cursorToNextLine):\n nonlocal pos, spinner\n if pos>=len(spinner): pos = 0\n if self.toConsole():\n self.dp.qprint('Probing filesystem... {}'.format(spinner[pos]), end = '')\n if not b_cursorToNextLine:\n self.dp.qprint('\\r', end = '', syslog = self.args['syslog'])\n else:\n self.dp.qprint('\\n', end = '', syslog = self.args['syslog'])\n pos += 1\n return inner\n return inner\n\n def path_shorten(str_path, length = 80) -> str:\n \"\"\"Shorten a Path string\n\n Returns:\n string : a shortened path\n \"\"\"\n if length < 0:\n length = os.get_terminal_size().columns + length\n if len(str_path) > length:\n l_parts = list(pathlib.PurePath(str_path).parts)\n l_copy = l_parts.copy()\n max = len(l_parts)\n offset = -1\n center = max // 2\n while len(str_path) > length:\n offset += 1\n l_shorten = [i % (max + 1) for i in range( center - offset,\n center + offset + 1)]\n for prt in l_shorten: l_copy[prt] = '...'\n str_path = str(pathlib.PurePath(*l_copy))\n return str_path\n\n def elements_flash(l_el, debugLevel):\n \"\"\"\n Flash elements in the passed list at the debugLevel\n \"\"\"\n if self.toConsole():\n for el in l_el:\n self.dp.qprint('%s (%d)\\033[K\\r' % \\\n (path_shorten(el, - len(str(len(l_el))) - 4), len(l_el)),\n level = debugLevel,\n end = '',\n syslog = self.args['syslog'])\n\n\n str_topDir = \".\"\n l_dirs = []\n l_files = []\n b_status = False\n str_path = ''\n l_dirsHere = []\n l_filesHere = []\n b_cursorToNextLine = False\n\n for k, v in kwargs.items():\n if k == 'root': str_topDir = v\n\n if int(self.verbosityLevel) >= 2:\n b_cursorToNextLine = True\n spinner = nextSpinner(b_cursorToNextLine)\n index:int = 0\n for root, dirs, files in pftree.walklevel(str_topDir,\n self.maxdepth,\n followlinks = self.b_followLinks):\n b_status = True\n if self.verbosityLevel >= 2: spinner(b_cursorToNextLine)\n str_path = root.split(os.sep)\n l_dirs.append(root)\n if self.verbosityLevel >= 2: elements_flash(l_dirs, 2)\n if index:\n l_filesHere = [root + '/' + y for y in files]\n else:\n l_filesHere = [root + '/' + y for y in dirs]\n if len(self.str_inputFile):\n l_hit = [s for s in l_filesHere if self.str_inputFile in s]\n if l_hit:\n l_filesHere = l_hit\n else:\n l_filesHere = []\n l_files.append(l_filesHere)\n if self.verbosityLevel >= 3: elements_flash(l_filesHere, 3)\n if self.toConsole() and self.verbosityLevel >=2:\n self.dp.qprint(\"\\033[A\" * 1,\n end = '',\n syslog = self.args['syslog'],\n level = 2 )\n index += 1\n if self.toConsole() and self.verbosityLevel >= 2:\n self.dp.qprint('Probing complete! ', level = 1)\n return {\n 'status': b_status,\n 'l_dir': l_dirs,\n 'l_files': l_files\n }", "def _get_internals(tree):\r\n y = tree.yea\r\n n = tree.nay\r\n a = tree.abstain\r\n if (y.is_leaf == False):\r\n internal_nodes.append(y)\r\n _get_internals(y)\r\n if (n.is_leaf == False):\r\n internal_nodes.append(n)\r\n _get_internals(n)\r\n if (a.is_leaf == False):\r\n internal_nodes.append(a)\r\n _get_internals(a)\r\n return", "async def leaf_it(d):\n async for _parent, _key, leaf in _walk({}, None, d):\n yield leaf", "def walk_tree(top):\n nodes = [top]\n for dirpath, dirnames, filenames in os.walk(top):\n for dirname in dirnames:\n nodes.append(os.path.join(dirpath, dirname))\n for filename in filenames:\n nodes.append(os.path.join(dirpath, filename))\n\n return nodes", "def test_tree_mode2(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar\n xp.tree_mode(True, xp)\n bar = xpb.bar\n bar.tree_mode(True, xp)\n baz = xpb.baz\n baz.tree_mode(True, xp)\n foo_bar = xpb.foo.bar\n foo_bar.tree_mode(True, xp)\n self.assertTrue(xp.is_tree_mode())\n l = [bar, foo_bar, xp, baz]\n self.assertTrue(xp in l)\n l.remove(xp)\n self.assertTrue(len(l) == 3)\n self.assertFalse(xp in l)\n xp.tree_mode(False, xp)\n self.assertFalse(xp.is_tree_mode())", "def traverse_tree(tree, thisFolder, path, submission):\n\n # Get files directly underneath this folder.\n blobs = tree.blobs\n thisFolderName = tree.name\n\n # Add this folder to the path.\n path = os.path.join(path, thisFolderName)\n print(path)\n\n for blob in blobs:\n filepath = os.path.join(path, blob.name)\n add_source_file(blob.name, thisFolder, filepath, submission)\n\n # Get folders directly underneath this folder.\n folders = tree.trees\n for folder in folders:\n srcFolderObj = add_source_folder(folder.name, thisFolder)[0]\n traverse_tree(folder, srcFolderObj, path, submission)\n\n return", "def _traverse_uast(self, root, word2ind, dok_mat):\n stack = [root]\n new_stack = []\n\n while stack:\n for node in stack:\n children = self._process_node(node, word2ind, dok_mat)\n new_stack.extend(children)\n stack = new_stack\n new_stack = []", "def __call__(self, node):\n if not node.children: return;\n ochildren = node.children;\n for n in ochildren:\n mark = self.ProperContainsMarker(n);\n if mark: raise ValueError(\"not implemented\");", "def _find_one_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping,\n ) -> Union[dict, None]:\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n return item\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def in_order_traverse(root):\n stack = deque([root])\n visited = set()\n while stack:\n node = stack.pop()\n if node is None:\n continue\n if node.index in visited:\n print(node.index, end=' ')\n continue\n visited.add(node.index)\n stack.append(node.right)\n stack.append(node)\n stack.append(node.left)", "def walk_tree(tree,\n leaf_func=lambda x: None,\n pre_nonleaf_func=lambda x: None,\n post_nonleaf_func=lambda x: None):\n tree = deepcopy(tree)\n\n def walk(node):\n # Depth First Traversal of an NLTK Tree.\n if is_leaf_node(node):\n leaf_func(node)\n else:\n pre_nonleaf_func(node)\n if len(node) > 0:\n for child in node:\n walk(child)\n post_nonleaf_func(node)\n\n walk(tree)\n return tree", "def apply(self, tree):\n raise NotImplementedError()", "def depth_first_search(self):\r\n queue = [self.root]\r\n ordered = []\r\n while queue:\r\n node = queue.pop()\r\n ordered.append(node)\r\n queue.extend(node.children)\r\n \r\n while ordered:\r\n yield ordered.pop()", "def trie_walk_yielding(root, yieldfunc, seen=[], preceder=[], level=1, level_keys=[]):\n level_keys.append(list(root.keys()))\n subtrees = [root.get(k) for k in root.keys()]\n # yield subtrees\n for i, subtree in enumerate(subtrees):\n sk = list(root.keys())[i]\n seen.append(sk)\n if subtree == {None: None}:\n # the subtree is a leaf\n yield from yieldfunc(preceder, seen, level)\n gone = seen.pop() # leaf will not be remembered (after being shown)\n if i == len(subtrees) - 1:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n if i == len(subtrees) - 1:\n if level_keys[len(preceder)][0] is None:\n while (\n level_keys[len(preceder)][0] is None\n and popped == level_keys[len(preceder)][-1]\n ):\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n elif popped == level_keys[len(preceder)][-1]:\n while popped == level_keys[len(preceder)][-1]:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n continue\n elif subtree is None:\n # the 'subtree' is a 'null child' indicating the parent is 'also a leaf'\n popped = seen.pop() # leaf will not be remembered (nor shown at all)\n yield from yieldfunc(preceder, seen, level)\n continue\n subtree_keys = list(subtree.keys())\n preceder.append(sk)\n yield from trie_walk_yielding(\n subtree, yieldfunc, seen, preceder, level + 1, level_keys\n )", "def _walk(self):\n while self._slice:\n new_slice = []\n for element in self._slice:\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n new_slice.extend(sublist)\n self._slice = new_slice", "def walk(self):\n current = self\n yield current\n while current.parent:\n current = current.parent\n yield current", "def walk(self):\n yield self\n for child in self.children:\n for descendant in child.walk():\n yield descendant", "def traverse(tree):\n nonlocal result\n\n symbol, children, *_ = tree\n\n if children:\n for c in children:\n if c[0].startswith(\"<\"):\n if not c[0].startswith(symbol_name[:-1]):\n if next_leaf(c):\n result += c[0].replace(\"<\", \"\").replace(\">\", \": \") + next_leaf_content(c) + \"\\n\"\n else:\n result += c[0].replace(\"<\", \"\").replace(\">\", \"\") + \" {\" + \"\\n\"\n traverse(c)\n result += \"}\" + \"\\n\"\n else:\n traverse(c) # do not update anything, just traverse", "def visit(self, node):", "def visit(self, node):", "def tree(self) -> None:\n tree = Tree(self.root.path)\n self.root.walk_dir(tree)", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def inOrderTreeWalk(node: TreeNode, node_flat: TreeNode):\n if node is not None:\n node_flat.right = TreeNode(node.val)\n node_flat = node_flat.right\n node_flat = inOrderTreeWalk(node.left, node_flat)\n node_flat = inOrderTreeWalk(node.right, node_flat)\n return node_flat", "def _find_all_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping\n ) -> Union[Sequence[dict], None]:\n frontier = []\n explored = set()\n found = []\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n found.append(item)\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))\n return found", "def filetree(self) -> P:\n ...", "def get_tree_size(thread, root, top, path, docs, sizes, inodes, depth=0, maxdepth=999):\n global filecount\n global skipfilecount\n global inodecount\n global dircount\n global skipdircount\n global total_doc_count\n global warnings\n\n size = 0\n size_du = 0\n dirs = 0\n files = 0\n f_count = 0\n d_count = 0\n f_skip_count = 0\n d_skip_count = 0\n tot_doc_count = 0\n parent_path = None\n size_norecurs = 0\n size_du_norecurs = 0\n files_norecurs = 0\n dirs_norecurs = 0\n \n # use alt scanner\n # try to get stat info for dir path\n if options.altscanner:\n try:\n d_stat = alt_scanner.stat(path)\n except RuntimeError as e:\n logmsg = '[{0}] ALT SCANNER ERROR: {1}'.format(thread, e)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION: {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n else:\n # try to get os stat info for dir path\n try:\n d_stat = os.stat(path)\n except OSError as e:\n logmsg = '[{0}] OS ERROR: {1}'.format(thread, e)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n \n # restore times (atime/mtime)\n if restore_times:\n res, err = set_times(path, d_stat.st_atime, d_stat.st_mtime)\n if not res:\n logmsg = 'OS ERROR setting file times for {0} (error {1})'.format(path, err)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n\n # scan directory\n try:\n logger.debug('[{0}] Scanning path {1}...'.format(thread, path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] Scanning path {1}...'.format(thread, path))\n for entry in os.scandir(path):\n logger.debug('[{0}] Scanning dir entry {1}...'.format(thread, entry.path))\n if options.vverbose:\n logger.info('[{0}] Scanning dir entry {1}...'.format(thread, entry.path)) \n \n if entry.is_symlink():\n logger.debug('[{0}] skipping symlink {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping symlink {1}'.format(thread, entry.path))\n pass\n elif entry.is_dir():\n d_count += 1\n if not dir_excluded(entry.path):\n dirs += 1\n dirs_norecurs += 1\n if maxdepth > 0:\n if depth < maxdepth:\n # recurse into subdir\n if not quit:\n s, sdu, fc, dc = get_tree_size(thread, root, top, entry.path, docs, sizes, inodes, depth+1, maxdepth)\n size += s\n size_du += sdu\n files += fc\n dirs += dc\n else:\n logger.debug('[{0}] not descending {1}, maxdepth {2} reached'.format(\n thread, entry.path, maxdepth))\n if options.verbose or options.vverbose:\n logger.info('[{0}] not descending {1}, maxdepth {2} reached'.format(\n thread, entry.path, maxdepth))\n else:\n logger.debug('[{0}] skipping dir {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping dir {1}'.format(thread, entry.path))\n d_skip_count += 1\n else:\n f_count += 1\n if not file_excluded(entry.name):\n f_stat = entry.stat()\n # restore times (atime/mtime)\n if restore_times and not options.altscanner:\n ret = set_times(entry.path, f_stat.st_atime, f_stat.st_mtime)\n if not ret:\n with crawl_thread_lock:\n warnings += 1\n\n fsize = f_stat.st_size\n # calculate allocated file size (du size)\n if IS_WIN:\n fsize_du = fsize\n elif options.altscanner:\n fsize_du = f_stat.st_sizedu\n else:\n fsize_du = f_stat.st_blocks * blocksize\n # set fsize_du to 0 if inode in inodes list (hardlink)\n if f_stat.st_ino in inodes:\n fsize_du = 0\n # add inode to inodes list if hardlink count > 1\n elif f_stat.st_nlink > 1:\n with crawl_thread_lock:\n inodes.append(f_stat.st_ino)\n fmtime_sec = time.time() - f_stat.st_mtime\n fctime_sec = time.time() - f_stat.st_ctime\n fatime_sec = time.time() - f_stat.st_atime\n\n if not exc_empty_files or (exc_empty_files and fsize > 0):\n if fsize >= minfilesize and \\\n fmtime_sec > minmtime and \\\n fmtime_sec < maxmtime and \\\n fctime_sec > minctime and \\\n fctime_sec < maxctime and \\\n fatime_sec > minatime and \\\n fatime_sec < maxatime:\n size += fsize\n size_norecurs += fsize\n size_du += fsize_du\n size_du_norecurs += fsize_du\n files += 1\n files_norecurs += 1\n # get owner and group names\n if IS_WIN:\n # for windows just set both owner and group to 0, this is what scandir returns for Windows\n # and there is no known fast way to get Windows file owner (pywin32 is slow)\n owner = f_stat.st_uid\n group = f_stat.st_gid\n else:\n owner, group = get_owner_group_names(f_stat.st_uid, f_stat.st_gid)\n \n # check for bad Unicode utf-8 characters\n try:\n if parent_path is None:\n parent_path = get_parent_path(entry.path)\n file_name = get_file_name(entry.name)\n except UnicodeError:\n if parent_path is None:\n parent_path = get_parent_path(entry.path, ignore_errors=True)\n file_name = get_file_name(entry.name, ignore_errors=True)\n logmsg = '[{0}] UNICODE WARNING {1}'.format(thread, os.path.join(parent_path, file_name))\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n # index doc dict\n data = {\n 'name': file_name,\n 'extension': os.path.splitext(entry.name)[1][1:].lower(),\n 'parent_path': parent_path,\n 'size': fsize,\n 'size_du': fsize_du,\n 'owner': owner,\n 'group': group,\n 'mtime': datetime.utcfromtimestamp(int(f_stat.st_mtime)).isoformat(),\n 'atime': datetime.utcfromtimestamp(int(f_stat.st_atime)).isoformat(),\n 'ctime': datetime.utcfromtimestamp(int(f_stat.st_ctime)).isoformat(),\n 'nlink': f_stat.st_nlink,\n 'ino': str(f_stat.st_ino),\n 'type': 'file'\n }\n\n # check if using altscanner and if any additional meta data to add to data dict\n if options.altscanner:\n try:\n extrameta_dict = alt_scanner.add_meta(entry.path, f_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # check plugins for adding extra meta data to data dict\n if plugins_enabled and plugins_files:\n for plugin in plugins:\n try:\n # check if plugin is for file doc\n if plugin.for_type('file'):\n extrameta_dict = plugin.add_meta(entry.path, f_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except (RuntimeWarning, RuntimeError) as e:\n err_message = e.args[0]\n if e.__class__ == RuntimeWarning:\n logmsg = '[{0}] PLUGIN WARNING: {1}'.format(thread, err_message)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n else:\n logmsg = '[{0}] PLUGIN ERROR: {1}'.format(thread, err_message)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n extrameta_dict = e.args[1]\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] PLUGIN EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # add file doc to docs list and upload to ES once it reaches certain size\n docs.append(data.copy())\n doc_count = len(docs)\n if doc_count >= es_chunksize:\n start_bulk_upload(thread, root, docs)\n tot_doc_count += doc_count\n docs.clear()\n\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n \n # if not excluding empty dirs is set or exclude empty dirs is set but there are files or \n # dirs in the current directory, index the dir\n if not exc_empty_dirs or (exc_empty_dirs and (files > 0 or dirs > 0)):\n # get owner and group names\n if IS_WIN:\n # for windows just set both owner and group to 0, this is what scandir returns for Windows\n # and there is no known fast way to get Windows file owner (pywin32 is slow)\n owner = d_stat.st_uid\n group = d_stat.st_gid\n else:\n owner, group = get_owner_group_names(d_stat.st_uid, d_stat.st_gid)\n \n # check for bad Unicode utf-8 characters\n try:\n file_name = get_dir_name(path)\n parent_path = get_parent_path(path)\n except UnicodeError:\n file_name = get_dir_name(path, ignore_errors=True)\n parent_path = get_parent_path(path, ignore_errors=True)\n logmsg = '[{0}] UNICODE WARNING {1}'.format(thread, os.path.join(parent_path, file_name))\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n # index doc dict\n data = {\n 'name': file_name,\n 'parent_path': parent_path,\n 'size': size,\n 'size_norecurs': size_norecurs,\n 'size_du': size_du,\n 'size_du_norecurs': size_du_norecurs,\n 'file_count': files,\n 'file_count_norecurs': files_norecurs, \n 'dir_count': dirs + 1,\n 'dir_count_norecurs': dirs_norecurs + 1,\n 'dir_depth': depth,\n 'mtime': datetime.utcfromtimestamp(int(d_stat.st_mtime)).isoformat(),\n 'atime': datetime.utcfromtimestamp(int(d_stat.st_atime)).isoformat(),\n 'ctime': datetime.utcfromtimestamp(int(d_stat.st_ctime)).isoformat(),\n 'nlink': d_stat.st_nlink,\n 'ino': str(d_stat.st_ino),\n 'owner': owner,\n 'group': group,\n 'type': 'directory'\n }\n\n # check if using altscanner and if any additional meta data to add to data dict\n if options.altscanner:\n try:\n extrameta_dict = alt_scanner.add_meta(path, d_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # check plugins for adding extra meta data to data dict\n if plugins_enabled and plugins_dirs:\n for plugin in plugins:\n # check if plugin is for directory doc\n try:\n if plugin.for_type('directory'):\n extrameta_dict = plugin.add_meta(path, d_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except (RuntimeWarning, RuntimeError) as e:\n err_message = e.args[0]\n if e.__class__ == RuntimeWarning:\n logmsg = '[{0}] PLUGIN WARNING: {1}'.format(thread, err_message)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n else:\n logmsg = '[{0}] PLUGIN ERROR: {1}'.format(thread, err_message)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n extrameta_dict = e.args[1]\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] PLUGIN EXCEPTION: {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n if depth > 0:\n # add file doc to docs list and upload to ES once it reaches certain size\n docs.append(data.copy())\n doc_count = len(docs)\n if doc_count >= es_chunksize:\n start_bulk_upload(thread, root, docs)\n tot_doc_count += doc_count\n docs.clear()\n \n else:\n with crawl_thread_lock:\n sizes[root] = data.copy()\n else:\n d_skip_count += 1\n logger.debug('[{0}] skipping empty dir {1}'.format(thread, path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping empty dir {1}'.format(thread, path))\n if dirs > 0: dirs -= 1\n\n with crawl_thread_lock:\n dircount[root] += d_count - d_skip_count\n filecount[root] += f_count - f_skip_count\n skipfilecount[root] += f_skip_count\n skipdircount[root] += d_skip_count\n total_doc_count[root] += tot_doc_count\n inodecount[root] += d_count + f_count \n\n except OSError as e:\n logmsg = '[{0}] OS ERROR: {1}'.format(thread, e)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n except RuntimeError as e:\n logmsg = '[{0}] ALT SCANNER ERROR: {1}'.format(thread, e)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n return size, size_du, files, dirs", "def traverse(self):\r\n nodes_to_visit = queue.Queue()\r\n nodes_to_visit.put(self.__rootnode)\r\n while nodes_to_visit.empty() is False:\r\n current_node = nodes_to_visit.get()\r\n yield current_node\r\n for child in current_node.children:\r\n nodes_to_visit.put(child)", "def traverseTree(mdsnode,dead_branches=False,depth=float('Nan'),current_depth=0,noisy=False,strict=False,tags=False):\n tagdict={}\n if isinstance(mdsnode,mds.tree.Tree): \n mdsnode=mdsnode.getNode(\"\\\\TOP\")\n \n name = get_mds_shortname(mdsnode) \n me = Branch(mdsnode)#put node information here if you like\n if noisy: print (\" \"*current_depth + name)\n\n #Members are data/signals, put them directly the current Node object\n #if they are arrays\n if mdsnode.getNumMembers()>0:\n leaves=mdsnode.getMembers()\n for leaf in leaves:\n leafname=get_mds_shortname(leaf)\n leafshape=get_mds_shape(leaf)\n if dead_branches or not len(leafshape) ==0:\n if noisy: print (\" \"*(current_depth+1) + leafname +\": array%s\"%str(leafshape))\n setattr(me,leafname,Leaf(leaf,strict))\n tagdict[leafname]=getattr(me,leafname)\n else:\n if noisy: print(\" \"*(current_depth+1) + leafname)\n #Children contain no immediate data, just links to more nodes. If depth is\n #not beyond limit, go down these 'branches' and add contents to the current\n #Node object\n if not depth <= current_depth and mdsnode.getNumChildren()>0:\n branches = mdsnode.getChildren()\n for b in branches:\n subname,subnode,subtags=traverseTree(b, dead_branches,depth,current_depth+1,noisy,strict)\n if len(subnode.__getDescendants__())>0:\n setattr(me,subname,subnode)\n tagdict[subname]=getattr(me,subname)\n for k,v in subtags.items(): #merge tags in\n tagdict[k]=v\n \n if current_depth==0:#we are done, returning to user\n if tags: \n for tag,obj in tagdict.items():\n setattr(me,tag,obj)\n else:\n tagbranch=Branch(mdsnode)\n for tag,obj in tagdict.items():\n setattr(tagbranch,tag,obj)\n setattr(me,'tags',tagbranch) \n return me\n return (name, me,tagdict) #else, we are still recursing back down the tree", "def walktree (self, top = \".\", depthfirst = True):\n \n names = os.listdir(top)\n if not depthfirst:\n yield top, names\n for name in names:\n try:\n st = os.lstat(os.path.join(top, name))\n except os.error:\n continue\n if stat.S_ISDIR(st.st_mode):\n for (newtop, children) in self.walktree (os.path.join(top, name), depthfirst):\n #print 'Scanning ', newtop\n yield newtop, children\n if depthfirst:\n yield top, names", "def __iter__(self):\n return self._collect(self.root, '')", "def traverse(self):\n return self.root.traverse()", "def testBinarySearchTree():\n\n \"\"\"\n Example After Deletion\n 7\n / \\\n 1 4\n\n \"\"\"\n t = BinarySearchTree()\n t.insert(8)\n t.insert(3)\n t.insert(6)\n t.insert(1)\n t.insert(10)\n t.insert(14)\n t.insert(13)\n t.insert(4)\n t.insert(7)\n\n # Prints all the elements of the list in order traversal\n print(t.__str__())\n\n if t.getNode(6) is not None:\n print(\"The label 6 exists\")\n else:\n print(\"The label 6 doesn't exist\")\n\n if t.getNode(-1) is not None:\n print(\"The label -1 exists\")\n else:\n print(\"The label -1 doesn't exist\")\n\n if not t.empty():\n print((\"Max Value: \", t.getMax().getLabel()))\n print((\"Min Value: \", t.getMin().getLabel()))\n\n t.delete(13)\n t.delete(10)\n t.delete(8)\n t.delete(3)\n t.delete(6)\n t.delete(14)\n\n # Gets all the elements of the tree In pre order\n # And it prints them\n list = t.traversalTree(InPreOrder, t.root)\n for x in list:\n print(x)", "def __call__(self, node):\n if node.children:\n if len(node.children) == 1:\n if self.TagEqual(node.children[0], node):\n #print node.ToString()\n node.tag = self.Tag(node, node.children[0]);\n lst = node.children[0].children;\n node.children = lst;", "def __iter__(self):\n yield self\n if not self.is_leaf():\n yield from self.left_subtree\n yield from self.right_subtree", "def test_tree_mode4(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_2.reparent(None)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def test_children_tree(depth_one_tree):\n assert str(depth_one_tree.root.children) == str([1, 2, 3, 4])", "def walk(self):\n if self.left is not None:\n yield from self.left.walk()\n yield self.item\n if self.right is not None:\n yield from self.right.walk()", "def test_get_children():\n builder = TreeBuilder()\n builder.create_root(1)\n builder.add_child(7)\n builder.add_child(2, move=True)\n builder.add_child(13)\n t = builder.build()\n\n assert t[0].data == 7\n assert t[1].data == 2\n assert t[1][0].data == 13", "def _auxRefreshTree(self, tree_index):\n tree_item = self.treeItem(tree_index)\n logger.debug(\"_auxRefreshTree({}): {}{}\".format(\n tree_index, tree_item.obj_path,\n \"*\" if tree_item.children_fetched else \"\"))\n\n if tree_item.children_fetched:\n\n old_items = tree_item.child_items\n new_items = self._fetchObjectChildren(tree_item.obj,\n tree_item.obj_path)\n\n old_item_names = [(item.obj_name,\n item.is_attribute) for item in old_items]\n new_item_names = [(item.obj_name,\n item.is_attribute) for item in new_items]\n seqMatcher = SequenceMatcher(isjunk=None, a=old_item_names,\n b=new_item_names,\n autojunk=False)\n opcodes = seqMatcher.get_opcodes()\n\n logger.debug(\"(reversed) \"\n \"opcodes: {}\".format(list(reversed(opcodes))))\n\n for tag, i1, i2, j1, j2 in reversed(opcodes):\n\n if 1 or tag != 'equal':\n logger.debug(\" {:7s}, a[{}:{}] ({}), b[{}:{}] ({})\"\n .format(tag, i1, i2,\n old_item_names[i1:i2], j1, j2,\n new_item_names[j1:j2]))\n\n if tag == 'equal':\n # Only when node names are equal is _auxRefreshTree\n # called recursively.\n assert i2-i1 == j2-j1, (\"equal sanity \"\n \"check failed \"\n \"{} != {}\".format(i2-i1, j2-j1))\n for old_row, new_row in zip(range(i1, i2), range(j1, j2)):\n old_items[old_row].obj = new_items[new_row].obj\n child_index = self.index(old_row, 0, parent=tree_index)\n self._auxRefreshTree(child_index)\n\n elif tag == 'replace':\n # Explicitly remove the old item and insert the new.\n # The old item may have child nodes which indices must be\n # removed by Qt, otherwise it crashes.\n assert i2-i1 == j2-j1, (\"replace sanity \"\n \"check failed \"\n \"{} != {}\").format(i2-i1, j2-j1)\n\n # row number of first removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" calling \"\n \"beginInsertRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n\n elif tag == 'delete':\n assert j1 == j2, (\"delete\"\n \" sanity check \"\n \"failed. {} != {}\".format(j1, j2))\n # row number of first that will be removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n elif tag == 'insert':\n assert i1 == i2, (\"insert \"\n \"sanity check \"\n \"failed. {} != {}\".format(i1, i2))\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" \"\n \"calling beginInsertRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n else:\n raise ValueError(\"Invalid tag: {}\".format(tag))", "def __iter__(self):\n\n yield from self._traverse_forward(self.root)", "def dfs_walk(node: ast.AST) -> Iterator[ast.AST]:\n stack = [node]\n while stack:\n node = stack.pop()\n stack.extend(reversed(list(ast.iter_child_nodes(node))))\n yield node", "def __getitem__(self, i: int) -> 'Tree':\n ...", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def __iter__(self):\n if self.root:\n return self.root.inorder()", "def walktree(classes, children, parent):\r\n results = []\r\n classes.sort(key=attrgetter('__module__', '__name__'))\r\n for c in classes:\r\n results.append((c, c.__bases__))\r\n if c in children:\r\n results.append(walktree(children[c], children, c))\r\n return results", "def _preorder_traverse_to_list_helper(self, node, depth):\n\t\t#visit node\n\t\tl = []\n\t\tif (node):\n\t\t\tl.append(node.value())\n\t\telse:\n\t\t\tl.append(None)\n\n\t\t#anon function for this thing\n\t\tfakechild = lambda:self._preorder_traverse_to_list_helper(None, depth + 1)\n\n\t\t#call on children\n\t\tif (node):\n\t\t\tif (node.lchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.lchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (lchild)\n\t\t\t\t\tl += fakechild()\n\t\t\tif (node.rchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.rchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (rchild)\n\t\t\t\t\tl += fakechild()\n\t\telse:\n\t\t\tif (depth < self._depth):\n\t\t\t\t#recurse with None for empty children (lchild) and (rchild)\n\t\t\t\t#l += fakechild() #need to call twice?\n\t\t\t\tl += fakechild()\n\t\treturn l", "def recursively_compare_tree_against_html(self, func):\n def inner(obj, node):\n # invoke comparator function\n func(obj=obj, node=node)\n\n # filter\n child_nodes = self.get_children_of_node(node)\n\n # same number of object children and html child nodes\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # loop over children and call recursive compare on them\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n inner(obj=child_obj, node=child_node)\n\n # call inner() with root elements\n inner(obj=self.document.root, node=self.soup.body)", "def _initialize_trees(self):", "def __next__(self):\r\n self.pointer += 1\r\n if self.pointer > self.root.size_tree:\r\n raise StopIteration\r\n\r\n return self.select(self.pointer)", "def _traverse_node_tree(self, cur_node, search_node_list):\n for _, sub_node in cur_node.get_children():\n sub_nodes = []\n self._traverse_node_tree(sub_node, sub_nodes)\n sub_node_dict = {\n 'name': sub_node.node_name,\n 'type': sub_node.node_type,\n 'is_dynamic_shape_node': sub_node.is_dynamic_shape_node,\n 'nodes': sub_nodes\n }\n search_node_list.append(sub_node_dict)", "def _traverse_in_order_recursive(self, node, visit):\n # Traverse left subtree, if it exists\n if node is not None:\n self._traverse_in_order_recursive(node.left_child, visit)\n # Visit this node's data with given function\n visit(node.data)\n # Traverse right subtree, if it exists\n self._traverse_in_order_recursive(node.right_child, visit)", "def preorder_visit(t: Tree, act: Callable[[Tree], Any]) -> None:\n act(t)\n for child in t.children:\n preorder_visit(child, act)", "def _get_tree(root: spacy.tokens.Token, depth: int, token_filter: types.FunctionType) -> [spacy.tokens.Token]:\n if depth == 0:\n return [root] if token_filter(root) else []\n\n result = []\n # for tokens on the left of the root, whose head is root\n for child in filter(token_filter, root.lefts):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n result.append(root)\n # for tokens on the right of the root, whose head is root\n for child in filter(token_filter, root.rights):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n return result", "def ctxTraverse(*args, down: bool=True, left: bool=True, right: bool=True, up: bool=True,\n **kwargs)->None:\n pass", "def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whether tree is rooted\n root = node if pos == 'root' else node.parent if pos == 'basal' else None\n rooted = None if pos == 'derived' else (\n True if len(root.children) == 2 else False)\n\n if rooted:\n if pos == 'root':\n raise ValueError('Cannot walk from root of a rooted tree.')\n elif pos == 'basal':\n sibling = [x for x in node.siblings()][0]\n\n # direction of walking\n move = (('bottom' if src is sibling else 'top' if src in children\n else 'n/a') if rooted and pos == 'basal'\n else ('down' if src is parent else 'up' if src in children\n else 'n/a'))\n if move == 'n/a':\n raise ValueError('Source and node are not neighbors.')\n\n # create a new node\n res = TreeNode(node.name)\n\n # determine length of the new node\n res.length = (node.length if move == 'down'\n else src.length + node.length if move == 'bottom'\n else src.length) # up or top\n\n # determine support of the new node\n res.support = (node.support if move in ('down', 'bottom')\n else src.support)\n\n # append children except for src (if applies)\n res.extend([walk_copy(c, node) for c in children if c is not src])\n\n # append parent if walking up (except at root)\n if move == 'up' and pos != 'root':\n res.append(walk_copy(parent, node))\n\n # append sibling if walking from one basal node to another\n if move == 'top':\n res.append(walk_copy(sibling, node))\n\n return res", "def traverse_breadth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extend(node.children)", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def UCT(rootstate, itermax, verbose=False):\n\n rootnode = Node(state=rootstate)\n\n for i in range(itermax):\n node = rootnode\n state = rootstate.Clone()\n\n # Select\n while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal\n node = node.UCTSelectChild()\n state.DoMove(node.move)\n\n # Expand\n expand = True\n while expand and node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)\n m = random.choice(node.untriedMoves)\n # print(\"[Expand] Untried move %s, %s, %s\" % (m[0], m[1], m[2]))\n expand = not state.DoMove(m)\n node = node.AddChild(m, state) # add child and descend tree\n\n # Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function\n while state.GetMoves() != []: # while state is non-terminal\n state.DoMove(random.choice(state.GetMoves()))\n\n # Backpropagate\n while node != None: # backpropagate from the expanded node and work back to the root node\n node.Update(state.GetResult()) # state is terminal. Update node with result from POV of node.playerJustMoved\n node = node.parentNode\n\n # Output some information about the tree - can be omitted\n if (verbose):\n print(rootnode.TreeToString(0))\n else:\n print(rootnode.ChildrenToString())\n\n return sorted(rootnode.childNodes, key=lambda c: c.visits)[-1].move # return the move that was most visited", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def testInsertDeep(self):\n\n #insert\n for i in xrange(randint(50, 180)):\n self.s.insert(randint(-2147483648,2147483647), i)\n\n #walk through the tree\n self.assertIsNotNone(self.s._root)\n self.assertIsNone(self.s._root.parent)\n self.assertIsNotNone(self.s._root.left)\n self.assertIsNotNone(self.s._root.right)\n\n def traversalHelper(n):\n if not n:\n return\n self.assertTrue((n.parent.left is n) or (n.parent.right is n))\n traversalHelper(n.left)\n traversalHelper(n.right)\n\n traversalHelper(self.s._root.left)\n traversalHelper(self.s._root.right)", "def walk_tree(visitor, data_structure):\n if isinstance(data_structure, dict):\n for key in data_structure.keys():\n data_structure[key] = walk_tree(visitor, data_structure[key])\n elif isinstance(data_structure, list):\n for i in xrange(len(data_structure)):\n data_structure[i] = walk_tree(visitor, data_structure[i])\n else:\n data_structure = visitor(data_structure)\n return data_structure", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def _traverse(self, word):\n node = self.root\n for i in (ord(x)-97 for x in word):\n if not node.data[i]: return None\n node = node.data[i]\n return node", "def traverse_tree(pid,nodes):\n\n for child in get_children(pid):\n nodes.update(traverse_tree(child,nodes))\n nodes.add(pid)\n\n return nodes", "def _walk(self, element):\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n for sublist_element in sublist:\n for recursive_elem in self._walk(sublist_element):\n yield recursive_elem", "def by_level_traversal(self) -> Queue:\n # initialize Queue objects\n new_q = Queue()\n last_q = Queue()\n\n #binary search tree == empty\n if self.root is None:\n return last_q\n\n #root in enque.q\n new_q.enqueue(self.root)\n\n # iterate for processing\n while not new_q.is_empty():\n working_node = new_q.dequeue()\n if working_node is not None:\n last_q.enqueue(working_node)\n new_q.enqueue(working_node.left)\n new_q.enqueue(working_node.right)\n\n return last_q", "def __init__(self):\n self._root = None\n self._count = 0", "def _traverse(node):\n all_words = []\n if node.is_leaf:\n return node.actual_word\n for key, value in node.children.items():\n curr_word = Trie._traverse(value)\n all_words = all_words + curr_word\n return all_words", "def traverse(name, furtherPath):", "def _apply_tree_policy(self, root, state):\n visit_path = [root]\n working_state = state.clone()\n current_node = root\n while not working_state.is_terminal() and current_node.explore_count > 0:\n if not current_node.children:\n # For a new node, initialize its state, then choose a child as normal.\n legal_actions = working_state.legal_actions()\n # Reduce bias from move generation order.\n self._random_state.shuffle(legal_actions)\n player_sign = -1 if working_state.current_player() != self.player else 1\n current_node.children = [SearchNode(action, player_sign)\n for action in legal_actions]\n\n if working_state.is_chance_node():\n # For chance nodes, rollout according to chance node's probability\n # distribution\n outcomes = working_state.chance_outcomes()\n action_list, prob_list = zip(*outcomes)\n action = self._random_state.choice(action_list, p=prob_list)\n chosen_child = next(c for c in current_node.children\n if c.action == action)\n else:\n # Otherwise choose node with largest UCT value\n chosen_child = max(\n current_node.children,\n key=lambda c: c.uct_value(current_node.explore_count, self.uct_c, # pylint: disable=g-long-lambda\n self.child_default_value))\n\n working_state.apply_action(chosen_child.action)\n current_node = chosen_child\n visit_path.append(current_node)\n\n return visit_path, working_state", "def walk(folder: str, filesystem: Filesystem, branch: str = 'all',\n leaf: str = 'all') -> Iterator[Tuple[str, str, str]]:\n for current_branch in filesystem.list_folders(folder):\n if branch not in ('all', current_branch):\n continue\n\n branch_folder = filesystem.join(folder, current_branch)\n for current_leaf in filesystem.list_folders(branch_folder):\n\n if leaf not in ('all', current_leaf):\n continue\n\n leaf_folder = filesystem.join(branch_folder, current_leaf)\n\n yield current_branch, current_leaf, leaf_folder", "def walk_tree(top_most_path, callback):\n for file in os.listdir(top_most_path):\n pathname = os.path.join(top_most_path, file)\n mode = os.stat(pathname)[ST_MODE]\n if S_ISDIR(mode):\n # It's a directory, recurse into it\n walk_tree(pathname, callback)\n elif S_ISREG(mode):\n # It's a file, call the callback function\n callback(pathname)\n else:\n # Unknown file type, print a message\n print(\"Skipping %s\" % pathname)", "def walk_tree(self, path, topdown=True):\n if isinstance(path, File):\n # Called with File object as an argument\n root = path\n path = root.path\n else:\n root = File(path)\n\n files, dirs = [], []\n\n try:\n for item in os.listdir(path):\n file_path = os.path.join(path, item)\n\n if self.path_ignore and self.path_ignore.match(file_path):\n # Skip excluded paths\n lg.debug(\"Ignoring path %s\" % file_path)\n continue\n\n try:\n f_object = File(file_path, seen=root.already_seen)\n except UnsupportedFileType as e:\n lg.warn('%s ..skipping' % e)\n continue\n except OSError as e:\n if e.errno == errno.ENOENT:\n # File already removed, go on\n lg.debug('File already removed: %s' % e)\n continue\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n continue\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to continue\n lg.exception(e)\n continue\n\n if f_object.directory is True:\n dirs.append(f_object)\n else:\n files.append(f_object)\n except OSError as e:\n # Exceptions that may come from os.listdir()\n if e.errno == errno.ENOENT:\n # Directory doesn't exist, go on\n pass\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n pass\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to go on\n lg.exception(e)\n pass\n\n if topdown:\n yield root, dirs, files\n\n for item in dirs:\n for x in self.walk_tree(item):\n yield x\n\n if not topdown:\n yield root, dirs, files", "def iter_tree(self):\n yield self\n for c in self.children:\n for ci in c.iter_tree:\n yield ci", "def test_compiler_parse_tree(compiler, patch):\n patch.object(Compiler, 'subtree')\n tree = Tree('start', [Tree('command', ['token'])])\n compiler.parse_tree(tree)\n compiler.subtree.assert_called_with(Tree('command', ['token']),\n parent=None)", "def test_iter_children():\n builder = TreeBuilder()\n builder.create_root(0)\n\n data = list(range(2, 15, 3))\n for datum in data:\n builder.add_child(datum)\n t = builder.build()\n\n for i, child in enumerate(t):\n assert child.data == data[i]", "def tree_contains(T, x):", "def walk_depth_first(\n root: DOMNode,\n filter_type: type[WalkType] | None = None,\n *,\n with_root: bool = True,\n) -> Iterable[DOMNode] | Iterable[WalkType]:\n from textual.dom import DOMNode\n\n stack: list[Iterator[DOMNode]] = [iter(root.children)]\n pop = stack.pop\n push = stack.append\n check_type = filter_type or DOMNode\n\n if with_root and isinstance(root, check_type):\n yield root\n while stack:\n node = next(stack[-1], None)\n if node is None:\n pop()\n else:\n if isinstance(node, check_type):\n yield node\n if node.children:\n push(iter(node.children))", "def walk(self): # FileObj.walk\n yield self", "def _refind_nodes(self, reSearchItems, root=None, sortByDepth=False):\n\n reListOfSearchItems = list(reSearchItems)\n\n if root == None:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for x in self.ParentMap.keys() if ReParent.match(x.tag)]\n\n else:\n Out = [root]\n\n\n while len(reListOfSearchItems) > 0:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for root in Out for x in root.iter() if ReParent.match(x.tag)]\n\n if sortByDepth == False: return Out\n\n TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)\n return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]", "def test_tree_mode3(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_and.remove_child(xp_2)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def walk(top):\r\n yield top\r\n for name in os.listdir(top):\r\n name = os.path.join(top, name)\r\n if os.path.isdir(name) and not os.path.islink(name):\r\n for dir in walk(name):\r\n yield dir" ]
[ "0.65168923", "0.63223314", "0.6207538", "0.61407065", "0.611664", "0.60764337", "0.59763837", "0.5936731", "0.5925743", "0.58564585", "0.5848043", "0.5805785", "0.5799455", "0.57884616", "0.57827556", "0.5768611", "0.57664794", "0.5741668", "0.57231015", "0.5716858", "0.5700536", "0.56934994", "0.5677115", "0.567237", "0.5662042", "0.5659794", "0.565569", "0.56438816", "0.56379914", "0.5622852", "0.5619647", "0.5616344", "0.5615245", "0.5613537", "0.5607153", "0.5603311", "0.5589889", "0.5578151", "0.5578151", "0.556977", "0.55661464", "0.5564616", "0.5553314", "0.5547305", "0.55421776", "0.5537485", "0.55371946", "0.55353653", "0.55353636", "0.55247176", "0.5500838", "0.5472716", "0.54713076", "0.5467392", "0.54656386", "0.54637814", "0.5461484", "0.54595774", "0.5453421", "0.5434643", "0.5434624", "0.54315656", "0.54311335", "0.54307675", "0.5424555", "0.5422889", "0.5421278", "0.540559", "0.54034054", "0.5401714", "0.5387913", "0.5385452", "0.53842825", "0.5380299", "0.53780323", "0.53752416", "0.5374967", "0.5373122", "0.535785", "0.5355389", "0.5351927", "0.53507066", "0.53472394", "0.5344092", "0.5340627", "0.53344864", "0.5332424", "0.5327277", "0.53271407", "0.53254294", "0.53223217", "0.53216946", "0.5310156", "0.53092945", "0.530639", "0.52944976", "0.5294121", "0.5292105", "0.52920467", "0.5286328", "0.5276838" ]
0.0
-1
This appears to be totally unused, and leftover from testing. It may, in fact, be better/more efficient than the used walk_tree() method.
def optimized_work_tree(obj, **kwargs): exclusions = kwargs.get('exclusions', {"groups": [], "classes": [], "params": []}) groups_done = {} classes = {"depths": {}, "content": {}} params = {"depths": {}, "content": {}} if hasattr(obj, 'hostname') and not hasattr(obj, 'name'): obj.name = obj.hostname to_index = [(obj, 1)] index_pop = to_index.pop index_extend = to_index.extend while to_index: (obj, depth) = index_pop() objname = obj.name if objname in groups_done and groups_done[objname] <= depth: continue objclasses = obj.classes.exclude(classname__in=exclusions['classes']) updated_classes = optimized_update_values(objclasses, "classname", "classparams", depth=depth, results=classes) objparams = obj.parameters.exclude(paramkey__in=exclusions['params']) updated_params = optimized_update_values(objparams, "paramkey", "paramvalue", depth=depth, results=params) if not updated_classes or not updated_params: return ("Fail", "Fail") groups_done[objname] = depth depth += 1 children = ((group, depth) for group in obj.groups.exclude(name__in=exclusions['groups'])) index_extend(children) params['content']['done_count'] = len(groups_done) return (classes["content"], params["content"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n files = [\n (os.lstat(os.path.join(root, fi)).st_size, os.path.join(root, fi), os.lstat(os.path.join(root, fi)).st_ino) for fi\n in files if (os.lstat(os.path.join(root, fi)).st_size > self.size)]\n self.fileList.extend(files)\n if len(self.excludeList) > 0:\n dirnames[:] = [dir for dir in dirnames if dir not in self.excludeList]\n if not self.cross_mount_points:\n dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))]", "def _traverse_tree(self):\n if not self.children:\n yield self\n for child in self.children:\n yield from child._traverse_tree()", "def test_render_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n node, expect, withtags = self.tree_case_1()\n actual = render_tree(node, get_children)\n assert expect == actual, (expect, actual)\n\n node, expect, withtags = self.tree_case_2()\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call render_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in render_tree (visited was set to {} as default\n # parameter)\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)", "def walk(node):\r\n from collections import deque\r\n todo = deque([node])\r\n while todo:\r\n node = todo.popleft()\r\n todo.extend(iter_child_nodes(node))\r\n yield node", "def walk(self):\n pass", "def test_Tree():", "def walk(d):\n for parent, key, leaf in _walk({}, None, d):\n yield (d, parent, key, leaf)", "def improve_tree(tree, freq_dict):\n # todo", "def traverse_depth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extendleft(reversed(node.children))", "def walkTree(self):\n if self.parentId:\n print self.parentId, self.id, self.value\n for child in self.children.itervalues():\n child.walkTree()", "def test_print_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n save_stdout = sys.stdout\n\n try:\n node, expect, withtags = self.tree_case_1()\n\n IOStream = io.StringIO\n sys.stdout = IOStream()\n print_tree(node, get_children)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n\n # Test that explicitly setting prune to zero works\n # the same as the default (see above)\n node, expect, withtags = self.tree_case_2(prune=0)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 0)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 0, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n\n # Test output with prune=1\n node, expect, withtags = self.tree_case_2(prune=1)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 1)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call print_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in print_tree (visited was set to {} as default\n # parameter)\n sys.stdout = IOStream()\n print_tree(node, get_children, 1)\n actual = sys.stdout.getvalue()\n assert expect == actual, (expect, actual)\n\n sys.stdout = IOStream()\n print_tree(node, get_children, 1, showtags=1)\n actual = sys.stdout.getvalue()\n assert withtags == actual, (withtags, actual)\n finally:\n sys.stdout = save_stdout", "def traverse_tree(self, root):\n\n\t\tself.pre_stage()\n\t\troot.visit(self)\n\t\tself.post_stage()", "def traverse(self):\n if self.root is None:\n return ''\n return self.root.traverse()", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def file_walker(root,**kwargs):\n\n # Get our keyword argunents, and do some initialization.\n max_depth=kwargs.get('depth',None)\n if max_depth==None:\n max_depth=sys.maxsize # I don't think we'll hit this limit in practice.\n follow_links=kwargs.get('follow_links',True)\n prune=compile_filename_patterns(kwargs.get('prune',[]))\n ignore=compile_filename_patterns(kwargs.get('ignore',[]))\n report_dirs=kwargs.get('report_dirs',False)\n if report_dirs not in (False,True,'first','last'):\n raise ValueError(\"report_dirs=%r is not one of False, True, 'first', or 'last'.\"%(report_dirs,))\n stack=[(0,root)] # Prime our stack with root (at depth 0).\n been_there=set([os.path.abspath(os.path.realpath(root))])\n dir_stack=[] # Stack of paths we're yielding after exhausting those directories.\n\n while stack:\n depth,path=stack.pop()\n if report_dirs in (True,'first'):\n yield path+os.sep\n elif report_dirs=='last':\n dir_stack.append(path+os.sep)\n flist=os.listdir(path)\n flist.sort()\n dlist=[]\n # First, let the caller iterate over these filenames.\n for fn in flist:\n p=os.path.join(path,fn)\n if os.path.isdir(p):\n # Just add this to this path's list of directories for now.\n dlist.insert(0,fn)\n continue\n pat,mat=first_match(fn,ignore)\n if not pat:\n yield p\n # Don't dig deeper than we've been told to.\n if depth<max_depth:\n # Now, let's deal with the directories we found.\n for fn in dlist:\n p=os.path.join(path,fn)\n # We might need to stack this path for our fake recursion.\n if os.path.islink(p) and not follow_links:\n # Nope. We're not following symlinks.\n continue\n rp=os.path.abspath(os.path.realpath(p))\n if rp in been_there:\n # Nope. We've already seen this path (and possibly processed it).\n continue\n m=None\n pat,mat=first_match(fn,prune)\n if pat:\n # Nope. This directory matches one of the prune patterns.\n continue\n # We have a keeper! Record the path and push it onto the stack.\n been_there.add(rp)\n stack.append((depth+1,p))\n while dir_stack:\n yield dir_stack.pop()", "def walk(node):\n\n traversed_nodes.append(node)\n \n # Do something with node value...\n print node.value\n\n # Recurse on each child node\n for child_node in node.child_nodes:\n if child_node not in traversed_nodes:\n walk(child_node)", "def test_list_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def visit(self):\n self.tree = self.recursive_visit(self.tree)\n # assert self.current_line == self.tree.absolute_bounding_box.bottom_right.line", "def tree_probe(self, **kwargs):\n\n def nextSpinner(b_cursorToNextLine):\n \"\"\"Provide a rotating spinner to indicate activity by using a closure.\n\n Returns:\n inner : inner function\n \"\"\"\n spinner = '\\\\|/-'\n pos = 0\n def inner(b_cursorToNextLine):\n nonlocal pos, spinner\n if pos>=len(spinner): pos = 0\n if self.toConsole():\n self.dp.qprint('Probing filesystem... {}'.format(spinner[pos]), end = '')\n if not b_cursorToNextLine:\n self.dp.qprint('\\r', end = '', syslog = self.args['syslog'])\n else:\n self.dp.qprint('\\n', end = '', syslog = self.args['syslog'])\n pos += 1\n return inner\n return inner\n\n def path_shorten(str_path, length = 80) -> str:\n \"\"\"Shorten a Path string\n\n Returns:\n string : a shortened path\n \"\"\"\n if length < 0:\n length = os.get_terminal_size().columns + length\n if len(str_path) > length:\n l_parts = list(pathlib.PurePath(str_path).parts)\n l_copy = l_parts.copy()\n max = len(l_parts)\n offset = -1\n center = max // 2\n while len(str_path) > length:\n offset += 1\n l_shorten = [i % (max + 1) for i in range( center - offset,\n center + offset + 1)]\n for prt in l_shorten: l_copy[prt] = '...'\n str_path = str(pathlib.PurePath(*l_copy))\n return str_path\n\n def elements_flash(l_el, debugLevel):\n \"\"\"\n Flash elements in the passed list at the debugLevel\n \"\"\"\n if self.toConsole():\n for el in l_el:\n self.dp.qprint('%s (%d)\\033[K\\r' % \\\n (path_shorten(el, - len(str(len(l_el))) - 4), len(l_el)),\n level = debugLevel,\n end = '',\n syslog = self.args['syslog'])\n\n\n str_topDir = \".\"\n l_dirs = []\n l_files = []\n b_status = False\n str_path = ''\n l_dirsHere = []\n l_filesHere = []\n b_cursorToNextLine = False\n\n for k, v in kwargs.items():\n if k == 'root': str_topDir = v\n\n if int(self.verbosityLevel) >= 2:\n b_cursorToNextLine = True\n spinner = nextSpinner(b_cursorToNextLine)\n index:int = 0\n for root, dirs, files in pftree.walklevel(str_topDir,\n self.maxdepth,\n followlinks = self.b_followLinks):\n b_status = True\n if self.verbosityLevel >= 2: spinner(b_cursorToNextLine)\n str_path = root.split(os.sep)\n l_dirs.append(root)\n if self.verbosityLevel >= 2: elements_flash(l_dirs, 2)\n if index:\n l_filesHere = [root + '/' + y for y in files]\n else:\n l_filesHere = [root + '/' + y for y in dirs]\n if len(self.str_inputFile):\n l_hit = [s for s in l_filesHere if self.str_inputFile in s]\n if l_hit:\n l_filesHere = l_hit\n else:\n l_filesHere = []\n l_files.append(l_filesHere)\n if self.verbosityLevel >= 3: elements_flash(l_filesHere, 3)\n if self.toConsole() and self.verbosityLevel >=2:\n self.dp.qprint(\"\\033[A\" * 1,\n end = '',\n syslog = self.args['syslog'],\n level = 2 )\n index += 1\n if self.toConsole() and self.verbosityLevel >= 2:\n self.dp.qprint('Probing complete! ', level = 1)\n return {\n 'status': b_status,\n 'l_dir': l_dirs,\n 'l_files': l_files\n }", "def _get_internals(tree):\r\n y = tree.yea\r\n n = tree.nay\r\n a = tree.abstain\r\n if (y.is_leaf == False):\r\n internal_nodes.append(y)\r\n _get_internals(y)\r\n if (n.is_leaf == False):\r\n internal_nodes.append(n)\r\n _get_internals(n)\r\n if (a.is_leaf == False):\r\n internal_nodes.append(a)\r\n _get_internals(a)\r\n return", "async def leaf_it(d):\n async for _parent, _key, leaf in _walk({}, None, d):\n yield leaf", "def walk_tree(top):\n nodes = [top]\n for dirpath, dirnames, filenames in os.walk(top):\n for dirname in dirnames:\n nodes.append(os.path.join(dirpath, dirname))\n for filename in filenames:\n nodes.append(os.path.join(dirpath, filename))\n\n return nodes", "def test_tree_mode2(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar\n xp.tree_mode(True, xp)\n bar = xpb.bar\n bar.tree_mode(True, xp)\n baz = xpb.baz\n baz.tree_mode(True, xp)\n foo_bar = xpb.foo.bar\n foo_bar.tree_mode(True, xp)\n self.assertTrue(xp.is_tree_mode())\n l = [bar, foo_bar, xp, baz]\n self.assertTrue(xp in l)\n l.remove(xp)\n self.assertTrue(len(l) == 3)\n self.assertFalse(xp in l)\n xp.tree_mode(False, xp)\n self.assertFalse(xp.is_tree_mode())", "def traverse_tree(tree, thisFolder, path, submission):\n\n # Get files directly underneath this folder.\n blobs = tree.blobs\n thisFolderName = tree.name\n\n # Add this folder to the path.\n path = os.path.join(path, thisFolderName)\n print(path)\n\n for blob in blobs:\n filepath = os.path.join(path, blob.name)\n add_source_file(blob.name, thisFolder, filepath, submission)\n\n # Get folders directly underneath this folder.\n folders = tree.trees\n for folder in folders:\n srcFolderObj = add_source_folder(folder.name, thisFolder)[0]\n traverse_tree(folder, srcFolderObj, path, submission)\n\n return", "def _traverse_uast(self, root, word2ind, dok_mat):\n stack = [root]\n new_stack = []\n\n while stack:\n for node in stack:\n children = self._process_node(node, word2ind, dok_mat)\n new_stack.extend(children)\n stack = new_stack\n new_stack = []", "def __call__(self, node):\n if not node.children: return;\n ochildren = node.children;\n for n in ochildren:\n mark = self.ProperContainsMarker(n);\n if mark: raise ValueError(\"not implemented\");", "def _find_one_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping,\n ) -> Union[dict, None]:\n frontier = []\n explored = set()\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n return item\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))", "def in_order_traverse(root):\n stack = deque([root])\n visited = set()\n while stack:\n node = stack.pop()\n if node is None:\n continue\n if node.index in visited:\n print(node.index, end=' ')\n continue\n visited.add(node.index)\n stack.append(node.right)\n stack.append(node)\n stack.append(node.left)", "def walk_tree(tree,\n leaf_func=lambda x: None,\n pre_nonleaf_func=lambda x: None,\n post_nonleaf_func=lambda x: None):\n tree = deepcopy(tree)\n\n def walk(node):\n # Depth First Traversal of an NLTK Tree.\n if is_leaf_node(node):\n leaf_func(node)\n else:\n pre_nonleaf_func(node)\n if len(node) > 0:\n for child in node:\n walk(child)\n post_nonleaf_func(node)\n\n walk(tree)\n return tree", "def apply(self, tree):\n raise NotImplementedError()", "def depth_first_search(self):\r\n queue = [self.root]\r\n ordered = []\r\n while queue:\r\n node = queue.pop()\r\n ordered.append(node)\r\n queue.extend(node.children)\r\n \r\n while ordered:\r\n yield ordered.pop()", "def trie_walk_yielding(root, yieldfunc, seen=[], preceder=[], level=1, level_keys=[]):\n level_keys.append(list(root.keys()))\n subtrees = [root.get(k) for k in root.keys()]\n # yield subtrees\n for i, subtree in enumerate(subtrees):\n sk = list(root.keys())[i]\n seen.append(sk)\n if subtree == {None: None}:\n # the subtree is a leaf\n yield from yieldfunc(preceder, seen, level)\n gone = seen.pop() # leaf will not be remembered (after being shown)\n if i == len(subtrees) - 1:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n if i == len(subtrees) - 1:\n if level_keys[len(preceder)][0] is None:\n while (\n level_keys[len(preceder)][0] is None\n and popped == level_keys[len(preceder)][-1]\n ):\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n elif popped == level_keys[len(preceder)][-1]:\n while popped == level_keys[len(preceder)][-1]:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n continue\n elif subtree is None:\n # the 'subtree' is a 'null child' indicating the parent is 'also a leaf'\n popped = seen.pop() # leaf will not be remembered (nor shown at all)\n yield from yieldfunc(preceder, seen, level)\n continue\n subtree_keys = list(subtree.keys())\n preceder.append(sk)\n yield from trie_walk_yielding(\n subtree, yieldfunc, seen, preceder, level + 1, level_keys\n )", "def _walk(self):\n while self._slice:\n new_slice = []\n for element in self._slice:\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n new_slice.extend(sublist)\n self._slice = new_slice", "def walk(self):\n current = self\n yield current\n while current.parent:\n current = current.parent\n yield current", "def walk(self):\n yield self\n for child in self.children:\n for descendant in child.walk():\n yield descendant", "def traverse(tree):\n nonlocal result\n\n symbol, children, *_ = tree\n\n if children:\n for c in children:\n if c[0].startswith(\"<\"):\n if not c[0].startswith(symbol_name[:-1]):\n if next_leaf(c):\n result += c[0].replace(\"<\", \"\").replace(\">\", \": \") + next_leaf_content(c) + \"\\n\"\n else:\n result += c[0].replace(\"<\", \"\").replace(\">\", \"\") + \" {\" + \"\\n\"\n traverse(c)\n result += \"}\" + \"\\n\"\n else:\n traverse(c) # do not update anything, just traverse", "def visit(self, node):", "def visit(self, node):", "def tree(self) -> None:\n tree = Tree(self.root.path)\n self.root.walk_dir(tree)", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def inOrderTreeWalk(node: TreeNode, node_flat: TreeNode):\n if node is not None:\n node_flat.right = TreeNode(node.val)\n node_flat = node_flat.right\n node_flat = inOrderTreeWalk(node.left, node_flat)\n node_flat = inOrderTreeWalk(node.right, node_flat)\n return node_flat", "def _find_all_tree(tree: dict,\n func: Callable,\n args: Tuple,\n kwargs: Mapping\n ) -> Union[Sequence[dict], None]:\n frontier = []\n explored = set()\n found = []\n for uid, item in tree.items():\n frontier.append((uid, item))\n while frontier:\n uid, item = frontier.pop()\n explored.add(uid)\n if func(item, *args, **kwargs):\n found.append(item)\n if \"children\" in item:\n for child_uid, child_item in item[\"children\"].items():\n if child_uid not in explored:\n frontier.append((child_uid, child_item))\n return found", "def filetree(self) -> P:\n ...", "def get_tree_size(thread, root, top, path, docs, sizes, inodes, depth=0, maxdepth=999):\n global filecount\n global skipfilecount\n global inodecount\n global dircount\n global skipdircount\n global total_doc_count\n global warnings\n\n size = 0\n size_du = 0\n dirs = 0\n files = 0\n f_count = 0\n d_count = 0\n f_skip_count = 0\n d_skip_count = 0\n tot_doc_count = 0\n parent_path = None\n size_norecurs = 0\n size_du_norecurs = 0\n files_norecurs = 0\n dirs_norecurs = 0\n \n # use alt scanner\n # try to get stat info for dir path\n if options.altscanner:\n try:\n d_stat = alt_scanner.stat(path)\n except RuntimeError as e:\n logmsg = '[{0}] ALT SCANNER ERROR: {1}'.format(thread, e)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION: {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n else:\n # try to get os stat info for dir path\n try:\n d_stat = os.stat(path)\n except OSError as e:\n logmsg = '[{0}] OS ERROR: {1}'.format(thread, e)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n return 0, 0, 0, 0\n \n # restore times (atime/mtime)\n if restore_times:\n res, err = set_times(path, d_stat.st_atime, d_stat.st_mtime)\n if not res:\n logmsg = 'OS ERROR setting file times for {0} (error {1})'.format(path, err)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n\n # scan directory\n try:\n logger.debug('[{0}] Scanning path {1}...'.format(thread, path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] Scanning path {1}...'.format(thread, path))\n for entry in os.scandir(path):\n logger.debug('[{0}] Scanning dir entry {1}...'.format(thread, entry.path))\n if options.vverbose:\n logger.info('[{0}] Scanning dir entry {1}...'.format(thread, entry.path)) \n \n if entry.is_symlink():\n logger.debug('[{0}] skipping symlink {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping symlink {1}'.format(thread, entry.path))\n pass\n elif entry.is_dir():\n d_count += 1\n if not dir_excluded(entry.path):\n dirs += 1\n dirs_norecurs += 1\n if maxdepth > 0:\n if depth < maxdepth:\n # recurse into subdir\n if not quit:\n s, sdu, fc, dc = get_tree_size(thread, root, top, entry.path, docs, sizes, inodes, depth+1, maxdepth)\n size += s\n size_du += sdu\n files += fc\n dirs += dc\n else:\n logger.debug('[{0}] not descending {1}, maxdepth {2} reached'.format(\n thread, entry.path, maxdepth))\n if options.verbose or options.vverbose:\n logger.info('[{0}] not descending {1}, maxdepth {2} reached'.format(\n thread, entry.path, maxdepth))\n else:\n logger.debug('[{0}] skipping dir {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping dir {1}'.format(thread, entry.path))\n d_skip_count += 1\n else:\n f_count += 1\n if not file_excluded(entry.name):\n f_stat = entry.stat()\n # restore times (atime/mtime)\n if restore_times and not options.altscanner:\n ret = set_times(entry.path, f_stat.st_atime, f_stat.st_mtime)\n if not ret:\n with crawl_thread_lock:\n warnings += 1\n\n fsize = f_stat.st_size\n # calculate allocated file size (du size)\n if IS_WIN:\n fsize_du = fsize\n elif options.altscanner:\n fsize_du = f_stat.st_sizedu\n else:\n fsize_du = f_stat.st_blocks * blocksize\n # set fsize_du to 0 if inode in inodes list (hardlink)\n if f_stat.st_ino in inodes:\n fsize_du = 0\n # add inode to inodes list if hardlink count > 1\n elif f_stat.st_nlink > 1:\n with crawl_thread_lock:\n inodes.append(f_stat.st_ino)\n fmtime_sec = time.time() - f_stat.st_mtime\n fctime_sec = time.time() - f_stat.st_ctime\n fatime_sec = time.time() - f_stat.st_atime\n\n if not exc_empty_files or (exc_empty_files and fsize > 0):\n if fsize >= minfilesize and \\\n fmtime_sec > minmtime and \\\n fmtime_sec < maxmtime and \\\n fctime_sec > minctime and \\\n fctime_sec < maxctime and \\\n fatime_sec > minatime and \\\n fatime_sec < maxatime:\n size += fsize\n size_norecurs += fsize\n size_du += fsize_du\n size_du_norecurs += fsize_du\n files += 1\n files_norecurs += 1\n # get owner and group names\n if IS_WIN:\n # for windows just set both owner and group to 0, this is what scandir returns for Windows\n # and there is no known fast way to get Windows file owner (pywin32 is slow)\n owner = f_stat.st_uid\n group = f_stat.st_gid\n else:\n owner, group = get_owner_group_names(f_stat.st_uid, f_stat.st_gid)\n \n # check for bad Unicode utf-8 characters\n try:\n if parent_path is None:\n parent_path = get_parent_path(entry.path)\n file_name = get_file_name(entry.name)\n except UnicodeError:\n if parent_path is None:\n parent_path = get_parent_path(entry.path, ignore_errors=True)\n file_name = get_file_name(entry.name, ignore_errors=True)\n logmsg = '[{0}] UNICODE WARNING {1}'.format(thread, os.path.join(parent_path, file_name))\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n # index doc dict\n data = {\n 'name': file_name,\n 'extension': os.path.splitext(entry.name)[1][1:].lower(),\n 'parent_path': parent_path,\n 'size': fsize,\n 'size_du': fsize_du,\n 'owner': owner,\n 'group': group,\n 'mtime': datetime.utcfromtimestamp(int(f_stat.st_mtime)).isoformat(),\n 'atime': datetime.utcfromtimestamp(int(f_stat.st_atime)).isoformat(),\n 'ctime': datetime.utcfromtimestamp(int(f_stat.st_ctime)).isoformat(),\n 'nlink': f_stat.st_nlink,\n 'ino': str(f_stat.st_ino),\n 'type': 'file'\n }\n\n # check if using altscanner and if any additional meta data to add to data dict\n if options.altscanner:\n try:\n extrameta_dict = alt_scanner.add_meta(entry.path, f_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # check plugins for adding extra meta data to data dict\n if plugins_enabled and plugins_files:\n for plugin in plugins:\n try:\n # check if plugin is for file doc\n if plugin.for_type('file'):\n extrameta_dict = plugin.add_meta(entry.path, f_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except (RuntimeWarning, RuntimeError) as e:\n err_message = e.args[0]\n if e.__class__ == RuntimeWarning:\n logmsg = '[{0}] PLUGIN WARNING: {1}'.format(thread, err_message)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n else:\n logmsg = '[{0}] PLUGIN ERROR: {1}'.format(thread, err_message)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n extrameta_dict = e.args[1]\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] PLUGIN EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # add file doc to docs list and upload to ES once it reaches certain size\n docs.append(data.copy())\n doc_count = len(docs)\n if doc_count >= es_chunksize:\n start_bulk_upload(thread, root, docs)\n tot_doc_count += doc_count\n docs.clear()\n\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n else:\n f_skip_count += 1\n logger.debug('[{0}] skipping file {1}'.format(thread, entry.path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping file {1}'.format(thread, entry.path))\n \n # if not excluding empty dirs is set or exclude empty dirs is set but there are files or \n # dirs in the current directory, index the dir\n if not exc_empty_dirs or (exc_empty_dirs and (files > 0 or dirs > 0)):\n # get owner and group names\n if IS_WIN:\n # for windows just set both owner and group to 0, this is what scandir returns for Windows\n # and there is no known fast way to get Windows file owner (pywin32 is slow)\n owner = d_stat.st_uid\n group = d_stat.st_gid\n else:\n owner, group = get_owner_group_names(d_stat.st_uid, d_stat.st_gid)\n \n # check for bad Unicode utf-8 characters\n try:\n file_name = get_dir_name(path)\n parent_path = get_parent_path(path)\n except UnicodeError:\n file_name = get_dir_name(path, ignore_errors=True)\n parent_path = get_parent_path(path, ignore_errors=True)\n logmsg = '[{0}] UNICODE WARNING {1}'.format(thread, os.path.join(parent_path, file_name))\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n # index doc dict\n data = {\n 'name': file_name,\n 'parent_path': parent_path,\n 'size': size,\n 'size_norecurs': size_norecurs,\n 'size_du': size_du,\n 'size_du_norecurs': size_du_norecurs,\n 'file_count': files,\n 'file_count_norecurs': files_norecurs, \n 'dir_count': dirs + 1,\n 'dir_count_norecurs': dirs_norecurs + 1,\n 'dir_depth': depth,\n 'mtime': datetime.utcfromtimestamp(int(d_stat.st_mtime)).isoformat(),\n 'atime': datetime.utcfromtimestamp(int(d_stat.st_atime)).isoformat(),\n 'ctime': datetime.utcfromtimestamp(int(d_stat.st_ctime)).isoformat(),\n 'nlink': d_stat.st_nlink,\n 'ino': str(d_stat.st_ino),\n 'owner': owner,\n 'group': group,\n 'type': 'directory'\n }\n\n # check if using altscanner and if any additional meta data to add to data dict\n if options.altscanner:\n try:\n extrameta_dict = alt_scanner.add_meta(path, d_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] ALT SCANNER EXCEPTION {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n # check plugins for adding extra meta data to data dict\n if plugins_enabled and plugins_dirs:\n for plugin in plugins:\n # check if plugin is for directory doc\n try:\n if plugin.for_type('directory'):\n extrameta_dict = plugin.add_meta(path, d_stat)\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except (RuntimeWarning, RuntimeError) as e:\n err_message = e.args[0]\n if e.__class__ == RuntimeWarning:\n logmsg = '[{0}] PLUGIN WARNING: {1}'.format(thread, err_message)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n else:\n logmsg = '[{0}] PLUGIN ERROR: {1}'.format(thread, err_message)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n extrameta_dict = e.args[1]\n if extrameta_dict is not None:\n data.update(extrameta_dict)\n except Exception as e:\n logmsg = '[{0}] PLUGIN EXCEPTION: {1}'.format(thread, e)\n logger.exception(logmsg)\n if logtofile: logger_warn.exception(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n if depth > 0:\n # add file doc to docs list and upload to ES once it reaches certain size\n docs.append(data.copy())\n doc_count = len(docs)\n if doc_count >= es_chunksize:\n start_bulk_upload(thread, root, docs)\n tot_doc_count += doc_count\n docs.clear()\n \n else:\n with crawl_thread_lock:\n sizes[root] = data.copy()\n else:\n d_skip_count += 1\n logger.debug('[{0}] skipping empty dir {1}'.format(thread, path))\n if options.verbose or options.vverbose:\n logger.info('[{0}] skipping empty dir {1}'.format(thread, path))\n if dirs > 0: dirs -= 1\n\n with crawl_thread_lock:\n dircount[root] += d_count - d_skip_count\n filecount[root] += f_count - f_skip_count\n skipfilecount[root] += f_skip_count\n skipdircount[root] += d_skip_count\n total_doc_count[root] += tot_doc_count\n inodecount[root] += d_count + f_count \n\n except OSError as e:\n logmsg = '[{0}] OS ERROR: {1}'.format(thread, e)\n logger.warning(logmsg)\n if logtofile: logger_warn.warning(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n except RuntimeError as e:\n logmsg = '[{0}] ALT SCANNER ERROR: {1}'.format(thread, e)\n logger.error(logmsg)\n if logtofile: logger_warn.error(logmsg)\n with crawl_thread_lock:\n warnings += 1\n pass\n \n return size, size_du, files, dirs", "def traverseTree(mdsnode,dead_branches=False,depth=float('Nan'),current_depth=0,noisy=False,strict=False,tags=False):\n tagdict={}\n if isinstance(mdsnode,mds.tree.Tree): \n mdsnode=mdsnode.getNode(\"\\\\TOP\")\n \n name = get_mds_shortname(mdsnode) \n me = Branch(mdsnode)#put node information here if you like\n if noisy: print (\" \"*current_depth + name)\n\n #Members are data/signals, put them directly the current Node object\n #if they are arrays\n if mdsnode.getNumMembers()>0:\n leaves=mdsnode.getMembers()\n for leaf in leaves:\n leafname=get_mds_shortname(leaf)\n leafshape=get_mds_shape(leaf)\n if dead_branches or not len(leafshape) ==0:\n if noisy: print (\" \"*(current_depth+1) + leafname +\": array%s\"%str(leafshape))\n setattr(me,leafname,Leaf(leaf,strict))\n tagdict[leafname]=getattr(me,leafname)\n else:\n if noisy: print(\" \"*(current_depth+1) + leafname)\n #Children contain no immediate data, just links to more nodes. If depth is\n #not beyond limit, go down these 'branches' and add contents to the current\n #Node object\n if not depth <= current_depth and mdsnode.getNumChildren()>0:\n branches = mdsnode.getChildren()\n for b in branches:\n subname,subnode,subtags=traverseTree(b, dead_branches,depth,current_depth+1,noisy,strict)\n if len(subnode.__getDescendants__())>0:\n setattr(me,subname,subnode)\n tagdict[subname]=getattr(me,subname)\n for k,v in subtags.items(): #merge tags in\n tagdict[k]=v\n \n if current_depth==0:#we are done, returning to user\n if tags: \n for tag,obj in tagdict.items():\n setattr(me,tag,obj)\n else:\n tagbranch=Branch(mdsnode)\n for tag,obj in tagdict.items():\n setattr(tagbranch,tag,obj)\n setattr(me,'tags',tagbranch) \n return me\n return (name, me,tagdict) #else, we are still recursing back down the tree", "def traverse(self):\r\n nodes_to_visit = queue.Queue()\r\n nodes_to_visit.put(self.__rootnode)\r\n while nodes_to_visit.empty() is False:\r\n current_node = nodes_to_visit.get()\r\n yield current_node\r\n for child in current_node.children:\r\n nodes_to_visit.put(child)", "def walktree (self, top = \".\", depthfirst = True):\n \n names = os.listdir(top)\n if not depthfirst:\n yield top, names\n for name in names:\n try:\n st = os.lstat(os.path.join(top, name))\n except os.error:\n continue\n if stat.S_ISDIR(st.st_mode):\n for (newtop, children) in self.walktree (os.path.join(top, name), depthfirst):\n #print 'Scanning ', newtop\n yield newtop, children\n if depthfirst:\n yield top, names", "def __iter__(self):\n return self._collect(self.root, '')", "def traverse(self):\n return self.root.traverse()", "def testBinarySearchTree():\n\n \"\"\"\n Example After Deletion\n 7\n / \\\n 1 4\n\n \"\"\"\n t = BinarySearchTree()\n t.insert(8)\n t.insert(3)\n t.insert(6)\n t.insert(1)\n t.insert(10)\n t.insert(14)\n t.insert(13)\n t.insert(4)\n t.insert(7)\n\n # Prints all the elements of the list in order traversal\n print(t.__str__())\n\n if t.getNode(6) is not None:\n print(\"The label 6 exists\")\n else:\n print(\"The label 6 doesn't exist\")\n\n if t.getNode(-1) is not None:\n print(\"The label -1 exists\")\n else:\n print(\"The label -1 doesn't exist\")\n\n if not t.empty():\n print((\"Max Value: \", t.getMax().getLabel()))\n print((\"Min Value: \", t.getMin().getLabel()))\n\n t.delete(13)\n t.delete(10)\n t.delete(8)\n t.delete(3)\n t.delete(6)\n t.delete(14)\n\n # Gets all the elements of the tree In pre order\n # And it prints them\n list = t.traversalTree(InPreOrder, t.root)\n for x in list:\n print(x)", "def __call__(self, node):\n if node.children:\n if len(node.children) == 1:\n if self.TagEqual(node.children[0], node):\n #print node.ToString()\n node.tag = self.Tag(node, node.children[0]);\n lst = node.children[0].children;\n node.children = lst;", "def __iter__(self):\n yield self\n if not self.is_leaf():\n yield from self.left_subtree\n yield from self.right_subtree", "def test_tree_mode4(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_2.reparent(None)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def test_children_tree(depth_one_tree):\n assert str(depth_one_tree.root.children) == str([1, 2, 3, 4])", "def walk(self):\n if self.left is not None:\n yield from self.left.walk()\n yield self.item\n if self.right is not None:\n yield from self.right.walk()", "def test_get_children():\n builder = TreeBuilder()\n builder.create_root(1)\n builder.add_child(7)\n builder.add_child(2, move=True)\n builder.add_child(13)\n t = builder.build()\n\n assert t[0].data == 7\n assert t[1].data == 2\n assert t[1][0].data == 13", "def _auxRefreshTree(self, tree_index):\n tree_item = self.treeItem(tree_index)\n logger.debug(\"_auxRefreshTree({}): {}{}\".format(\n tree_index, tree_item.obj_path,\n \"*\" if tree_item.children_fetched else \"\"))\n\n if tree_item.children_fetched:\n\n old_items = tree_item.child_items\n new_items = self._fetchObjectChildren(tree_item.obj,\n tree_item.obj_path)\n\n old_item_names = [(item.obj_name,\n item.is_attribute) for item in old_items]\n new_item_names = [(item.obj_name,\n item.is_attribute) for item in new_items]\n seqMatcher = SequenceMatcher(isjunk=None, a=old_item_names,\n b=new_item_names,\n autojunk=False)\n opcodes = seqMatcher.get_opcodes()\n\n logger.debug(\"(reversed) \"\n \"opcodes: {}\".format(list(reversed(opcodes))))\n\n for tag, i1, i2, j1, j2 in reversed(opcodes):\n\n if 1 or tag != 'equal':\n logger.debug(\" {:7s}, a[{}:{}] ({}), b[{}:{}] ({})\"\n .format(tag, i1, i2,\n old_item_names[i1:i2], j1, j2,\n new_item_names[j1:j2]))\n\n if tag == 'equal':\n # Only when node names are equal is _auxRefreshTree\n # called recursively.\n assert i2-i1 == j2-j1, (\"equal sanity \"\n \"check failed \"\n \"{} != {}\".format(i2-i1, j2-j1))\n for old_row, new_row in zip(range(i1, i2), range(j1, j2)):\n old_items[old_row].obj = new_items[new_row].obj\n child_index = self.index(old_row, 0, parent=tree_index)\n self._auxRefreshTree(child_index)\n\n elif tag == 'replace':\n # Explicitly remove the old item and insert the new.\n # The old item may have child nodes which indices must be\n # removed by Qt, otherwise it crashes.\n assert i2-i1 == j2-j1, (\"replace sanity \"\n \"check failed \"\n \"{} != {}\").format(i2-i1, j2-j1)\n\n # row number of first removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" calling \"\n \"beginInsertRows({}, {}, {})\".format(\n tree_index, first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n\n elif tag == 'delete':\n assert j1 == j2, (\"delete\"\n \" sanity check \"\n \"failed. {} != {}\".format(j1, j2))\n # row number of first that will be removed\n first = i1\n # row number of last element after insertion\n last = i1 + i2 - 1\n logger.debug(\" calling \"\n \"beginRemoveRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginRemoveRows(tree_index, first, last)\n del tree_item.child_items[i1:i2]\n self.endRemoveRows()\n\n elif tag == 'insert':\n assert i1 == i2, (\"insert \"\n \"sanity check \"\n \"failed. {} != {}\".format(i1, i2))\n # row number of first element after insertion\n first = i1\n # row number of last element after insertion\n last = i1 + j2 - j1 - 1\n logger.debug(\" \"\n \"calling beginInsertRows\"\n \"({}, {}, {})\".format(tree_index,\n first, last))\n self.beginInsertRows(tree_index, first, last)\n tree_item.insert_children(i1, new_items[j1:j2])\n self.endInsertRows()\n else:\n raise ValueError(\"Invalid tag: {}\".format(tag))", "def __iter__(self):\n\n yield from self._traverse_forward(self.root)", "def __getitem__(self, i: int) -> 'Tree':\n ...", "def dfs_walk(node: ast.AST) -> Iterator[ast.AST]:\n stack = [node]\n while stack:\n node = stack.pop()\n stack.extend(reversed(list(ast.iter_child_nodes(node))))\n yield node", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def __iter__(self):\n if self.root:\n return self.root.inorder()", "def walktree(classes, children, parent):\r\n results = []\r\n classes.sort(key=attrgetter('__module__', '__name__'))\r\n for c in classes:\r\n results.append((c, c.__bases__))\r\n if c in children:\r\n results.append(walktree(children[c], children, c))\r\n return results", "def _preorder_traverse_to_list_helper(self, node, depth):\n\t\t#visit node\n\t\tl = []\n\t\tif (node):\n\t\t\tl.append(node.value())\n\t\telse:\n\t\t\tl.append(None)\n\n\t\t#anon function for this thing\n\t\tfakechild = lambda:self._preorder_traverse_to_list_helper(None, depth + 1)\n\n\t\t#call on children\n\t\tif (node):\n\t\t\tif (node.lchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.lchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (lchild)\n\t\t\t\t\tl += fakechild()\n\t\t\tif (node.rchild()):\n\t\t\t\tl += self._preorder_traverse_to_list_helper(node.rchild(), depth + 1)\n\t\t\telse:\n\t\t\t\tif (depth < self._depth):\n\t\t\t\t\t#recurse with None for empty children (rchild)\n\t\t\t\t\tl += fakechild()\n\t\telse:\n\t\t\tif (depth < self._depth):\n\t\t\t\t#recurse with None for empty children (lchild) and (rchild)\n\t\t\t\t#l += fakechild() #need to call twice?\n\t\t\t\tl += fakechild()\n\t\treturn l", "def recursively_compare_tree_against_html(self, func):\n def inner(obj, node):\n # invoke comparator function\n func(obj=obj, node=node)\n\n # filter\n child_nodes = self.get_children_of_node(node)\n\n # same number of object children and html child nodes\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # loop over children and call recursive compare on them\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n inner(obj=child_obj, node=child_node)\n\n # call inner() with root elements\n inner(obj=self.document.root, node=self.soup.body)", "def _initialize_trees(self):", "def _traverse_node_tree(self, cur_node, search_node_list):\n for _, sub_node in cur_node.get_children():\n sub_nodes = []\n self._traverse_node_tree(sub_node, sub_nodes)\n sub_node_dict = {\n 'name': sub_node.node_name,\n 'type': sub_node.node_type,\n 'is_dynamic_shape_node': sub_node.is_dynamic_shape_node,\n 'nodes': sub_nodes\n }\n search_node_list.append(sub_node_dict)", "def __next__(self):\r\n self.pointer += 1\r\n if self.pointer > self.root.size_tree:\r\n raise StopIteration\r\n\r\n return self.select(self.pointer)", "def _traverse_in_order_recursive(self, node, visit):\n # Traverse left subtree, if it exists\n if node is not None:\n self._traverse_in_order_recursive(node.left_child, visit)\n # Visit this node's data with given function\n visit(node.data)\n # Traverse right subtree, if it exists\n self._traverse_in_order_recursive(node.right_child, visit)", "def preorder_visit(t: Tree, act: Callable[[Tree], Any]) -> None:\n act(t)\n for child in t.children:\n preorder_visit(child, act)", "def _get_tree(root: spacy.tokens.Token, depth: int, token_filter: types.FunctionType) -> [spacy.tokens.Token]:\n if depth == 0:\n return [root] if token_filter(root) else []\n\n result = []\n # for tokens on the left of the root, whose head is root\n for child in filter(token_filter, root.lefts):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n result.append(root)\n # for tokens on the right of the root, whose head is root\n for child in filter(token_filter, root.rights):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n return result", "def ctxTraverse(*args, down: bool=True, left: bool=True, right: bool=True, up: bool=True,\n **kwargs)->None:\n pass", "def walk_copy(node, src):\n parent = node.parent\n children = node.children\n\n # position of node\n pos = ('root' if node.is_root() else 'basal' if parent.is_root()\n else 'derived')\n\n # whether tree is rooted\n root = node if pos == 'root' else node.parent if pos == 'basal' else None\n rooted = None if pos == 'derived' else (\n True if len(root.children) == 2 else False)\n\n if rooted:\n if pos == 'root':\n raise ValueError('Cannot walk from root of a rooted tree.')\n elif pos == 'basal':\n sibling = [x for x in node.siblings()][0]\n\n # direction of walking\n move = (('bottom' if src is sibling else 'top' if src in children\n else 'n/a') if rooted and pos == 'basal'\n else ('down' if src is parent else 'up' if src in children\n else 'n/a'))\n if move == 'n/a':\n raise ValueError('Source and node are not neighbors.')\n\n # create a new node\n res = TreeNode(node.name)\n\n # determine length of the new node\n res.length = (node.length if move == 'down'\n else src.length + node.length if move == 'bottom'\n else src.length) # up or top\n\n # determine support of the new node\n res.support = (node.support if move in ('down', 'bottom')\n else src.support)\n\n # append children except for src (if applies)\n res.extend([walk_copy(c, node) for c in children if c is not src])\n\n # append parent if walking up (except at root)\n if move == 'up' and pos != 'root':\n res.append(walk_copy(parent, node))\n\n # append sibling if walking from one basal node to another\n if move == 'top':\n res.append(walk_copy(sibling, node))\n\n return res", "def traverse_breadth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extend(node.children)", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def UCT(rootstate, itermax, verbose=False):\n\n rootnode = Node(state=rootstate)\n\n for i in range(itermax):\n node = rootnode\n state = rootstate.Clone()\n\n # Select\n while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal\n node = node.UCTSelectChild()\n state.DoMove(node.move)\n\n # Expand\n expand = True\n while expand and node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)\n m = random.choice(node.untriedMoves)\n # print(\"[Expand] Untried move %s, %s, %s\" % (m[0], m[1], m[2]))\n expand = not state.DoMove(m)\n node = node.AddChild(m, state) # add child and descend tree\n\n # Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function\n while state.GetMoves() != []: # while state is non-terminal\n state.DoMove(random.choice(state.GetMoves()))\n\n # Backpropagate\n while node != None: # backpropagate from the expanded node and work back to the root node\n node.Update(state.GetResult()) # state is terminal. Update node with result from POV of node.playerJustMoved\n node = node.parentNode\n\n # Output some information about the tree - can be omitted\n if (verbose):\n print(rootnode.TreeToString(0))\n else:\n print(rootnode.ChildrenToString())\n\n return sorted(rootnode.childNodes, key=lambda c: c.visits)[-1].move # return the move that was most visited", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def testInsertDeep(self):\n\n #insert\n for i in xrange(randint(50, 180)):\n self.s.insert(randint(-2147483648,2147483647), i)\n\n #walk through the tree\n self.assertIsNotNone(self.s._root)\n self.assertIsNone(self.s._root.parent)\n self.assertIsNotNone(self.s._root.left)\n self.assertIsNotNone(self.s._root.right)\n\n def traversalHelper(n):\n if not n:\n return\n self.assertTrue((n.parent.left is n) or (n.parent.right is n))\n traversalHelper(n.left)\n traversalHelper(n.right)\n\n traversalHelper(self.s._root.left)\n traversalHelper(self.s._root.right)", "def walk_tree(visitor, data_structure):\n if isinstance(data_structure, dict):\n for key in data_structure.keys():\n data_structure[key] = walk_tree(visitor, data_structure[key])\n elif isinstance(data_structure, list):\n for i in xrange(len(data_structure)):\n data_structure[i] = walk_tree(visitor, data_structure[i])\n else:\n data_structure = visitor(data_structure)\n return data_structure", "def _internal_build(self):\n self.nodes = self.__tree.Nodes()\n self.edges = self.__tree.Edges()\n self.augmentedEdges = {}\n for key, val in self.__tree.AugmentedEdges().items():\n self.augmentedEdges[key] = list(val)\n self.root = self.__tree.Root()\n\n seen = set()\n self.branches = set()\n\n # Find all of the branching nodes in the tree, degree > 1\n # That is, they appear in more than one edge\n for e1, e2 in self.edges:\n if e1 not in seen:\n seen.add(e1)\n else:\n self.branches.add(e1)\n\n if e2 not in seen:\n seen.add(e2)\n else:\n self.branches.add(e2)\n\n # The nodes that are not branches are leaves\n self.leaves = set(self.nodes.keys()) - self.branches\n self.leaves.remove(self.root)", "def _traverse(self, word):\n node = self.root\n for i in (ord(x)-97 for x in word):\n if not node.data[i]: return None\n node = node.data[i]\n return node", "def traverse_tree(pid,nodes):\n\n for child in get_children(pid):\n nodes.update(traverse_tree(child,nodes))\n nodes.add(pid)\n\n return nodes", "def _walk(self, element):\n if not isinstance(element, dict) or len(element) != 1:\n raise TreeIntegrityError\n key, sublist = tuple(element.items())[0]\n if not isinstance(sublist, list):\n raise TreeIntegrityError\n yield key\n for sublist_element in sublist:\n for recursive_elem in self._walk(sublist_element):\n yield recursive_elem", "def by_level_traversal(self) -> Queue:\n # initialize Queue objects\n new_q = Queue()\n last_q = Queue()\n\n #binary search tree == empty\n if self.root is None:\n return last_q\n\n #root in enque.q\n new_q.enqueue(self.root)\n\n # iterate for processing\n while not new_q.is_empty():\n working_node = new_q.dequeue()\n if working_node is not None:\n last_q.enqueue(working_node)\n new_q.enqueue(working_node.left)\n new_q.enqueue(working_node.right)\n\n return last_q", "def __init__(self):\n self._root = None\n self._count = 0", "def _traverse(node):\n all_words = []\n if node.is_leaf:\n return node.actual_word\n for key, value in node.children.items():\n curr_word = Trie._traverse(value)\n all_words = all_words + curr_word\n return all_words", "def traverse(name, furtherPath):", "def _apply_tree_policy(self, root, state):\n visit_path = [root]\n working_state = state.clone()\n current_node = root\n while not working_state.is_terminal() and current_node.explore_count > 0:\n if not current_node.children:\n # For a new node, initialize its state, then choose a child as normal.\n legal_actions = working_state.legal_actions()\n # Reduce bias from move generation order.\n self._random_state.shuffle(legal_actions)\n player_sign = -1 if working_state.current_player() != self.player else 1\n current_node.children = [SearchNode(action, player_sign)\n for action in legal_actions]\n\n if working_state.is_chance_node():\n # For chance nodes, rollout according to chance node's probability\n # distribution\n outcomes = working_state.chance_outcomes()\n action_list, prob_list = zip(*outcomes)\n action = self._random_state.choice(action_list, p=prob_list)\n chosen_child = next(c for c in current_node.children\n if c.action == action)\n else:\n # Otherwise choose node with largest UCT value\n chosen_child = max(\n current_node.children,\n key=lambda c: c.uct_value(current_node.explore_count, self.uct_c, # pylint: disable=g-long-lambda\n self.child_default_value))\n\n working_state.apply_action(chosen_child.action)\n current_node = chosen_child\n visit_path.append(current_node)\n\n return visit_path, working_state", "def walk(folder: str, filesystem: Filesystem, branch: str = 'all',\n leaf: str = 'all') -> Iterator[Tuple[str, str, str]]:\n for current_branch in filesystem.list_folders(folder):\n if branch not in ('all', current_branch):\n continue\n\n branch_folder = filesystem.join(folder, current_branch)\n for current_leaf in filesystem.list_folders(branch_folder):\n\n if leaf not in ('all', current_leaf):\n continue\n\n leaf_folder = filesystem.join(branch_folder, current_leaf)\n\n yield current_branch, current_leaf, leaf_folder", "def walk_tree(top_most_path, callback):\n for file in os.listdir(top_most_path):\n pathname = os.path.join(top_most_path, file)\n mode = os.stat(pathname)[ST_MODE]\n if S_ISDIR(mode):\n # It's a directory, recurse into it\n walk_tree(pathname, callback)\n elif S_ISREG(mode):\n # It's a file, call the callback function\n callback(pathname)\n else:\n # Unknown file type, print a message\n print(\"Skipping %s\" % pathname)", "def walk_tree(self, path, topdown=True):\n if isinstance(path, File):\n # Called with File object as an argument\n root = path\n path = root.path\n else:\n root = File(path)\n\n files, dirs = [], []\n\n try:\n for item in os.listdir(path):\n file_path = os.path.join(path, item)\n\n if self.path_ignore and self.path_ignore.match(file_path):\n # Skip excluded paths\n lg.debug(\"Ignoring path %s\" % file_path)\n continue\n\n try:\n f_object = File(file_path, seen=root.already_seen)\n except UnsupportedFileType as e:\n lg.warn('%s ..skipping' % e)\n continue\n except OSError as e:\n if e.errno == errno.ENOENT:\n # File already removed, go on\n lg.debug('File already removed: %s' % e)\n continue\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n continue\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to continue\n lg.exception(e)\n continue\n\n if f_object.directory is True:\n dirs.append(f_object)\n else:\n files.append(f_object)\n except OSError as e:\n # Exceptions that may come from os.listdir()\n if e.errno == errno.ENOENT:\n # Directory doesn't exist, go on\n pass\n elif e.errno in [errno.EPERM, errno.EACCES]:\n # Permission denied or operation not permitted, log error and go on\n lg.error(e)\n pass\n else:\n # Other errors should be fatal, but we don't want them to be\n # eg. corrupted file on GlusterFS may raise IOError, but we want to go on\n lg.exception(e)\n pass\n\n if topdown:\n yield root, dirs, files\n\n for item in dirs:\n for x in self.walk_tree(item):\n yield x\n\n if not topdown:\n yield root, dirs, files", "def iter_tree(self):\n yield self\n for c in self.children:\n for ci in c.iter_tree:\n yield ci", "def test_compiler_parse_tree(compiler, patch):\n patch.object(Compiler, 'subtree')\n tree = Tree('start', [Tree('command', ['token'])])\n compiler.parse_tree(tree)\n compiler.subtree.assert_called_with(Tree('command', ['token']),\n parent=None)", "def test_iter_children():\n builder = TreeBuilder()\n builder.create_root(0)\n\n data = list(range(2, 15, 3))\n for datum in data:\n builder.add_child(datum)\n t = builder.build()\n\n for i, child in enumerate(t):\n assert child.data == data[i]", "def tree_contains(T, x):", "def _refind_nodes(self, reSearchItems, root=None, sortByDepth=False):\n\n reListOfSearchItems = list(reSearchItems)\n\n if root == None:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for x in self.ParentMap.keys() if ReParent.match(x.tag)]\n\n else:\n Out = [root]\n\n\n while len(reListOfSearchItems) > 0:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for root in Out for x in root.iter() if ReParent.match(x.tag)]\n\n if sortByDepth == False: return Out\n\n TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)\n return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]", "def walk_depth_first(\n root: DOMNode,\n filter_type: type[WalkType] | None = None,\n *,\n with_root: bool = True,\n) -> Iterable[DOMNode] | Iterable[WalkType]:\n from textual.dom import DOMNode\n\n stack: list[Iterator[DOMNode]] = [iter(root.children)]\n pop = stack.pop\n push = stack.append\n check_type = filter_type or DOMNode\n\n if with_root and isinstance(root, check_type):\n yield root\n while stack:\n node = next(stack[-1], None)\n if node is None:\n pop()\n else:\n if isinstance(node, check_type):\n yield node\n if node.children:\n push(iter(node.children))", "def walk(self): # FileObj.walk\n yield self", "def test_tree_mode3(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_and.remove_child(xp_2)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def walk(top):\r\n yield top\r\n for name in os.listdir(top):\r\n name = os.path.join(top, name)\r\n if os.path.isdir(name) and not os.path.islink(name):\r\n for dir in walk(name):\r\n yield dir" ]
[ "0.6517461", "0.63216114", "0.620697", "0.61414737", "0.6116617", "0.60739696", "0.5977533", "0.5937574", "0.5928211", "0.58566964", "0.5848424", "0.58056533", "0.579866", "0.578824", "0.5784192", "0.5767787", "0.5766229", "0.57428783", "0.5723398", "0.5716726", "0.5701137", "0.56942636", "0.56776077", "0.5673125", "0.5662559", "0.56596893", "0.5656663", "0.5646444", "0.5638685", "0.5622813", "0.5619955", "0.5617013", "0.56154543", "0.56141734", "0.5606344", "0.56023836", "0.5590529", "0.55792636", "0.55792636", "0.55700904", "0.5565304", "0.55651337", "0.55552524", "0.5549124", "0.55420995", "0.5537793", "0.55367315", "0.5535416", "0.5535339", "0.552452", "0.55029476", "0.5474194", "0.5471379", "0.5467809", "0.5466679", "0.54627234", "0.54624975", "0.546016", "0.5452563", "0.5437124", "0.54343975", "0.54323286", "0.5431723", "0.5431335", "0.5424943", "0.5423955", "0.54219705", "0.5404825", "0.5404808", "0.5402469", "0.538809", "0.5386275", "0.53840363", "0.5381442", "0.5378778", "0.53764325", "0.53751", "0.537474", "0.53580725", "0.53562725", "0.5352689", "0.53514373", "0.5348595", "0.5345038", "0.53411186", "0.5334703", "0.53327715", "0.5327821", "0.5327138", "0.5325911", "0.53225005", "0.5321627", "0.5310697", "0.53085583", "0.5307117", "0.52972287", "0.52939224", "0.5293656", "0.5290415", "0.5286821", "0.5276516" ]
0.0
-1
Create a WSGI application factory.
def create_wsgi_factory(mounts_factories): def create_wsgi(app, **kwargs): mounts = { mount: factory(**kwargs) for mount, factory in mounts_factories.items() } return DispatcherMiddleware(app.wsgi_app, mounts) return create_wsgi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_app():\n app = Flask(__name__)\n app.config.from_object('app.configs.config')\n app.config.from_object('app.configs.settings')\n return app", "def create_app():\n from server.web import create_app\n # If we do a static javascript app via flask, add it here\n # from server.web import create_app as create_web_app\n return create_app()", "def app():\n return create_app()", "def app_factory():\n app = web.Application()\n app.add_routes([\n web.get('/ping', handle_ping),\n ])\n return app", "def create_app(settings_override=None):\n app = factory.create_app(__name__, __path__, settings_override)\n\n Bootstrap(app)\n admin.init_app(app)\n filters.init_app(app)\n Sentry(app)\n\n if not app.debug:\n for e in (404, 500):\n app.errorhandler(e)(handle_error)\n\n return app", "def app():\n app = create_app()\n return app", "def make_app(*args, **kwargs):\n app = Flask(*args, **kwargs)\n Roots(app)\n return app", "def create_app(env=\"production\"):\n app = Flask(__name__, static_url_path=\"/\")\n config_app(app, env=env)\n\n with app.app_context():\n Moment(app)\n init_db(app)\n enable_parser(app)\n register_route(app)\n register_blue(app)\n init_logger(app)\n init_scheduler(app)\n return app", "def create_app():\n app = Flask(__name__)\n\n app.config.from_pyfile('../settings.py')\n\n app.register_blueprint(layout_bp, url_prefix='/layouts')\n app.register_blueprint(sheet_bp, url_prefix='/sheets')\n app.register_blueprint(user_bp, url_prefix='/users')\n\n db.init_app(app)\n ma.init_app(app)\n migrate.init_app(app)\n login_manager.init_app(app)\n\n return app", "def create_app() -> Flask:\r\n app = Flask(__name__.split('.')[0])\r\n init_config(app)\r\n app.register_blueprint(observer)\r\n app.teardown_appcontext(close_db)\r\n app.cli.add_command(init_db)\r\n\r\n return app", "def app_factory(global_conf, load_app_kwds={}, **kwargs):\n # Create the Galaxy application unless passed in\n kwargs = load_app_properties(\n kwds=kwargs,\n **load_app_kwds\n )\n if 'app' in kwargs:\n app = kwargs.pop('app')\n else:\n from galaxy.webapps.coralsnp_reports.app import UniverseApplication\n app = UniverseApplication(global_conf=global_conf, **kwargs)\n atexit.register(app.shutdown)\n # Create the universe WSGI application\n webapp = CoralSNPReportsWebApplication(app, session_cookie='galaxycoralsnpreportssession', name=\"coralsnp_reports\")\n add_ui_controllers(webapp, app)\n # These two routes handle our simple needs at the moment\n webapp.add_route('/{controller}/{action}', controller=\"root\", action='index')\n webapp.add_route('/{action}', controller='root', action='index')\n webapp.finalize_config()\n # Wrap the webapp in some useful middleware\n if kwargs.get('middleware', True):\n webapp = wrap_in_middleware(webapp, global_conf, app.application_stack, **kwargs)\n if asbool(kwargs.get('static_enabled', True)):\n webapp = wrap_if_allowed(webapp, app.application_stack, wrap_in_static,\n args=(global_conf,),\n kwargs=kwargs)\n # Close any pooled database connections before forking\n try:\n galaxy.model.corals.mapping.metadata.bind.dispose()\n except Exception:\n log.exception(\"Unable to dispose of pooled coralsnp_reports model database connections.\")\n # Return\n return webapp", "def create_app():\n app = Flask(\n __name__,\n instance_relative_config=False,\n )\n app.config.from_object('config.Config')\n\n with app.app_context():\n # CORS\n CORS(app)\n\n # JWT & BCRYPT\n from .utils.auth import init_auth\n init_auth(app)\n\n # DB\n from .utils.db import db\n db.init_app(app)\n\n # Mail\n from .utils.mail.service import mail\n mail.init_app(app)\n app.extensions['mail'].debug = 0 # No logging\n\n # Jobs\n from .utils.scheduler import start_jobs\n start_jobs(app)\n\n # Import routes\n from .routes import (\n admin, users, files,\n suprema,\n b_locals, b_federals)\n\n app.register_blueprint(admin.bp)\n app.register_blueprint(users.bp)\n app.register_blueprint(files.bp)\n app.register_blueprint(suprema.bp)\n app.register_blueprint(b_locals.bp)\n app.register_blueprint(b_federals.bp)\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n # create app instance\n app.config.from_object(config_by_name[config_name])\n flask_bcrypt.init_app(app)\n\n CORS(app)\n\n routes.init_routes(app)\n\n return app", "def create_app(config=Config):\r\n # Initialise app and configuration\r\n app = Flask(__name__)\r\n app.config.from_object(config)\r\n\r\n\r\n # Initialise flask plugins\r\n db.init_app(app)\r\n api.init_app(app)\r\n ma.init_app(app)\r\n login.init_app(app)\r\n migrate.init_app(app, db)\r\n register_api(api)\r\n\r\n\r\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n bootstrap = Bootstrap(app) # noqa: F841\n\n with app.app_context():\n # Include our Routes\n from . import routes # noqa: F401\n\n # # Register Blueprints\n # app.register_blueprint(auth.auth_bp)\n # app.register_blueprint(admin.admin_bp)\n\n return app", "def create_app():\n app = Flask(__name__)\n\n # Load application settings\n settings = os.environ.get(\"FLASK_SETTINGS\", SETTINGS)\n if settings is not None:\n c = Config(settings)\n print(c)\n app.config.update(c.get_map('flask'))\n\n from users.views import user\n # Register the blueprints to app\n app.register_blueprint(user)\n\n db.init_app(app)\n\n return app", "def create_app():\n\n # Create app\n app = Flask(__name__)\n app.config.from_object(\"nextbus.config.Config\")\n\n app.logger = logger.app_logger\n # Load logging configuration and log initial configuration\n logger.load_config(app)\n\n # Initialise SQLAlchemy and Migrate in app\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Adding app, db and model objects to flask shell\n from nextbus import models\n app.shell_context_processor(\n lambda: {\"app\": app, \"db\": db, \"models\": models}\n )\n\n from nextbus.converters import add_converters\n add_converters(app)\n\n from nextbus.views import page\n from nextbus.resources import api\n app.register_blueprint(page)\n app.register_blueprint(api)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n flask_bcrypt.init_app(app)\n jwt.init_app(app)\n\n with app.app_context():\n # Import Blueprints\n from .routes.users_route import users_bp\n from .routes.messages_route import messages_bp\n\n # REGISTER ROUTES\n app.register_blueprint(users_bp, url_prefix=\"/users\")\n app.register_blueprint(messages_bp, url_prefix=\"/messages\")\n\n\n return app", "def create():\n\n return App()", "def create_app(config='dev'):\n if config == 'dev':\n from .conf.config import DevelopmentConfig as dev_config\n app = configure_app(Flask(__name__), dev_config)\n else:\n from .conf.config import ProdConfig\n app = configure_app(Flask(__name__), ProdConfig)\n\n # setup flask blueprints\n configure_blueprints(app)\n\n return app", "def create_app(settings_override: Optional[object] = None):\n cwd = os.path.dirname(os.path.abspath(__file__))\n package_path = [cwd]\n\n app = factory.create_app(\n __name__,\n package_path,\n settings_override,\n )\n setup_jinja_env(app)\n\n # Register custom error handlers\n if not app.debug:\n for e in [500, 404]:\n app.errorhandler(e)(handle_error)\n\n return app", "def create_app():\n\n config = config_by_name[os.getenv('APP_SETTINGS', 'dev')]\n flask_app = Flask(__name__, static_folder=None, instance_relative_config=True)\n flask_app.config.from_object(config)\n with flask_app.app_context():\n app_manager = Manager(flask_app)\n\n from app.models import db, ma\n db.init_app(flask_app)\n Migrate(flask_app, db)\n app_manager.add_command('db', MigrateCommand)\n ma.init_app(flask_app)\n\n from app.service import mail\n mail.init_app(flask_app)\n\n from app.api import blueprint_api\n flask_app.register_blueprint(blueprint_api)\n\n json_logging.ENABLE_JSON_LOGGING = True\n json_logging.COMPONENT_NAME = 'MS-Auth'\n json_logging.COMPONENT_ID = 1\n json_logging.init(framework_name='flask')\n json_logging.init_request_instrument(flask_app)\n\n return flask_app, app_manager", "def create_app(config: dict) -> Flask:\n for key, value in config.items():\n app.config[key] = value\n db.init_app(app)\n ma.init_app(app)\n app.app_context().push()\n return app", "def create_app(configobj=ProdConfig):\n\n app = Flask(__name__)\n app.config.from_object(configobj)\n configure_blueprints(app)\n configure_extensions(app)\n configure_callbacks(app)\n configure_filters(app)\n configure_error_handlers(app)\n return app", "def create_app(app=None):\n #\n # Either use the existing flask provided as an argument or initialize\n # a brand new flask application.\n #\n return app or create_flask_app()", "def create_app(config='dev'):\n config_object = {'dev': DevConfig, 'test': TestConfig}[config]\n\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n if app.config.get('PROFILE'):\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n configure_log(app)\n configure_database(app)\n configure_json(app)\n configure_converters(app)\n\n register_extensions(app)\n register_blueprints(app)\n\n log.info(\"%s loaded with %s configuration\", bright(\"ups\"), bright(config))\n\n return app", "def FactoryFactory():\n return tornado.web.Application([(r'/factory', FactoryHandler)])", "def create_app():\n logging.basicConfig(\n level=REANA_LOG_LEVEL,\n format=REANA_LOG_FORMAT\n )\n app = Flask(__name__)\n app.config.from_object('reana_server.config')\n app.secret_key = \"hyper secret key\"\n\n # Register API routes\n from .rest import ping, secrets, users, workflows # noqa\n app.register_blueprint(ping.blueprint, url_prefix='/api')\n app.register_blueprint(workflows.blueprint, url_prefix='/api')\n app.register_blueprint(users.blueprint, url_prefix='/api')\n app.register_blueprint(secrets.blueprint, url_prefix='/api')\n\n app.session = Session\n CORS(app)\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n ma.init_app(app)\n migrate = Migrate(app, db)\n\n with app.app_context():\n from . import routes\n\n # Create tables for our models\n db.create_all()\n app.logger.info(\"application started\")\n\n return app", "def create_app(app_name=PKG_NAME, config=None, **kwargs):\n app = Flask(app_name, static_url_path='/flask-static')\n\n # Update the app configuration.\n app.config.from_object(config)\n\n # Supress flask_sqlalchemy warning.\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n # For CSRF and flash\n app.secret_key = \"42d2a9e832245e0e56bb929d46393c4a467322cc21b53bc61a181004\"\n\n if kwargs.get(\"celery\"):\n init_celery(kwargs.get(\"celery\"), app)\n\n initialize_app(app)\n\n return app", "def create_app(config_name):\n\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n\n bootstrap.init_app(app)\n\n from .main import main\n app.register_blueprint(main)\n\n return app", "def create_app(mode=os.environ.get('FLASK_MODE', 'app.config.Development')):\n app = APIFlask(__name__)\n # add configurations\n app_config = config.get(mode)\n app.config.from_object(app_config)\n app_config().init_app(app)\n\n # initialize all extensions\n init_extensions(app)\n\n # register blueprints\n # add blueprint registration statements here\n from app.users import users\n app.register_blueprint(users)\n\n # register error handlers\n app.register_error_handler(400, bad_request)\n app.register_error_handler(Forbidden, forbidden)\n app.register_error_handler(404, not_found)\n app.register_error_handler(405, method_not_supported)\n app.register_error_handler(APIException, conflict)\n\n return app", "def create_app(config_class):\n # create a Flask application instance\n app = Flask(__name__)\n\n # load configs\n app.config.from_object(config_class)\n\n register_extensions(app)\n register_blueprints(app)\n register_error_handlers(app)\n register_shell_context(app)\n register_middleware(app)\n\n return app", "def make_app(conf=None):\n if not conf:\n conf = 'development'\n app = create_app(cm.get(conf))\n return app", "def create_app(settings_override=None):\n\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object('config.settings')\n app.config.from_pyfile('settings.py', silent=True)\n\n if settings_override:\n app.config.update(settings_override)\n\n app.register_blueprint(course)\n app.register_blueprint(user)\n\n extensions(app)\n\n return app", "def create_app(config='config.py'):\n app = Flask(__name__, static_folder=None)\n app.config.from_pyfile(config)\n\n # Initialize extensions/add-ons/plugins.\n mongo.init_app(app)\n login_manager.init_app(app)\n\n for blueprint in all_blueprints:\n import_module(blueprint.import_name)\n app.register_blueprint(blueprint)\n\n return app", "def create_app():\n app = FastAPI()\n configure_rest_server(app=app, router_configs=WEB_SERVICES_ROUTER_CONFIGS, db_configs=DB_CONFIGS)\n return app", "def api_factory(global_config, **local_conf):\n\treturn make_app(blueprints.api_server, settings.ProductionConfig)", "def create_app(conf: Type[Config]):\n app = Flask(__name__)\n app.config.from_object(conf)\n configure_logger(conf)\n register_pc_blueprints(app)\n register_extensions(app)\n return app", "def make_server() -> Flask:\n app: Flask = Flask(__name__)\n return app", "def create_app(testing=False, cli=False):\n app = Flask(__name__)\n app.config.from_object(\"flask_cli.config\")\n\n if testing is True:\n app.config[\"TESTING\"] = True\n\n configure_extensions(app, cli)\n configure_apispec(app)\n register_blueprints(app)\n return app", "def make_app() -> Flask:\n logger.info('creating flask application')\n app = Flask(\n 'pasta',\n static_url_path='/static',\n static_folder='./static',\n template_folder='./views')\n config.flask.SECRET_KEY = os.urandom(32)\n config.flask.SERVER_NAME = None\n app.config.from_mapping(config.flask)\n return app", "def create_app(register_blueprints=True):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object('app.default_config') # default config\n # app.config.from_pyfile('application.cfg.py') # server config file, do not include in versioning\n\n db.init_app(app)\n api = Api(app)\n api.add_resource(UserList, '/api/users')\n\n if register_blueprints:\n register_blueprints_on_app(app)\n\n return app", "def wsgi_app_factory(global_config, **local_config):\n dn = name = '${namespace}${ndot}${nested_namespace}${nsdot}${project_name}'\n wconf = global_config.copy()\n wconf.update(**local_config)\n debug = False\n if global_config.get('debug', 'False').lower() == 'true':\n debug = True\n wconf['pyramid.debug_authorization'] = 'true'\n wconf['pyramid.debug_notfound'] = 'true'\n wconf['pyramid.reload_templates'] = 'true'\n wconf['zcmls' ] = utils.splitstrip(wconf['zcmls'])\n if not wconf['zcmls']:\n wconf['zcmls'] = []\n wconf['zcmls'].insert(0, 'configure.zcml')\n for i, zcml in enumerate(wconf['zcmls']):\n if os.path.sep in zcml:\n zcml = os.path.abspath(zcml)\n else:\n zcml = pkg_resources.resource_filename(dn, zcml)\n wconf['zcmls'][i] = zcml\n\n config = Configurator(settings=wconf)\n \\# activate if you want to enable global components\n \\# globalreg = getGlobalSiteManager()\n \\# config = Configurator(registry=globalreg)\n \\# config.setup_registry(settings=wconf)\n \\# config.include('pyramid_zcml')\n\n config.hook_zca()\n for z in wconf['zcmls']:\n config.load_zcml(z)\n app = config.make_wsgi_app()\n def webbuilder_app(environ, start_response):\n req = Request(environ)\n try:\n resp = req.get_response(app)\n return resp(environ, start_response)\n except Exception, e:\n if not debug:\n return exc.HTTPServerError(str(e))(environ, start_response)\n else:\n raise\n return webbuilder_app", "def create_app(config_name):\n\n app = Flask(__name__)\n app.config.from_object(config_by_name[config_name])\n CORS(app)\n mongo.init_app(app)\n app.register_blueprint(check_bp)\n\n return app", "def create_app():\n app = Flask(__name__)\n app.config.from_pyfile('config.py')\n\n login_manager.init_app(app) # initialize flask_login with our app\n # redirect route when @login_required fails\n login_manager.login_view = 'routes.signin'\n db.init_app(app)\n\n from .routes import routes\n app.register_blueprint(routes)\n\n return app", "def create_app(config):\n\n # Initialize app. Flatten config_obj to dictionary (resolve properties).\n app = Flask(__name__)\n config_dict = dict(\n [(k, getattr(config, k)) for k in dir(config) if\n not k.startswith('_')])\n\n app.config.update(config_dict)\n\n for bp in all_blueprints:\n import_module(bp.import_name)\n app.register_blueprint(bp)\n\n pipeline.set_enforce_auth(False)\n\n # Return the application instance.\n return app", "def create_app(config_log=True, register=True):\n config = os.environ.get(ENV_CONFIG_MODULE)\n if not config:\n raise ValueError('no config found')\n return create_app_by_config(conf=config, config_log=config_log, register=register)", "def create_app():\n app = Flask(__name__)\n app.register_blueprint(playlists)\n app.register_blueprint(comments)\n return app", "def create_app(name, path, settings_override=None,\n register_security_blueprint=True):\n\n app = Flask(name, instance_relative_config=True)\n app.config.from_object(\"linkedlist.config\") # public config\n app.config.from_pyfile(\"config.py\", silent=True) # instance config\n app.config.from_object(settings_override) # argument override\n\n # patch in envvar config\n environ_config_override = find_environ_config_vars()\n for key, value in environ_config_override.iteritems():\n app.config[key] = value\n\n db.init_app(app)\n security.init_app(app, SQLAlchemyUserDatastore(db, User, Role),\n register_blueprint=register_security_blueprint)\n register_blueprints(app, name, path)\n\n # create database tables\n with app.app_context():\n db.create_all()\n\n return app", "def create_app() -> Flask:\n\n flask_app = Flask('extraction_api', template_folder='./template')\n flask_app.secret_key = \"super secret key\"\n # import blueprints\n flask_app.register_blueprint(extraction_app)\n\n return flask_app", "def create_app(config=DevConfig, **kwargs):\n app = Flask(__name__, **kwargs)\n app.config.from_object(config)\n\n # flask-restplus seem to use standard json lib and not the flask one\n # so we patch it here so it can handle UUIDs\n JSONEncoder.default = JSONEncoder_newdefault\n\n extensions.init_app(app)\n modules.init_app(app)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.register_blueprint(auth_bp, url_prefix='/auth')\n app.register_blueprint(errors_bp, url_prefix='/error')\n app.config.from_object('config.Config')\n\n db.init_app(app)\n store.bind(db)\n login_manager.init_app(app)\n Session(app)\n captcha = FlaskSessionCaptcha(app)\n captcha.init_app(app)\n\n\n with app.app_context():\n from . import routes # Import routes\n db.create_all() # Create sql tables for our data models\n\n return app", "def create_app(app_name: str):\n\n app = Flask(app_name)\n app.json_encoder = CustomJSONEncoder\n\n app.config.update({\n 'SQLALCHEMY_DATABASE_URI': build_db_uri(),\n 'SQLALCHEMY_TRACK_MODIFICATIONS': os.environ.get('SQLALCHEMY_TRACK_MODIFICATIONS', False),\n 'APP_CONFIG': {\n 'HOSTNAME': os.environ.get('HOSTNAME', ''),\n 'GREETING': os.environ.get('GREETING', 'Hello'),\n }\n })\n\n db.init_app(app)\n api = Api(app)\n\n with app.app_context():\n api.add_resource(Index, '/')\n api.add_resource(Config, '/config')\n api.add_resource(StudentMany, '/student')\n api.add_resource(StudentOne, '/student/<int:student_id>')\n return app", "def create_app(env='dev', services=dict()):\n\n # Create the flask app\n app = Flask(__name__)\n\n # Do everything in the app context\n with app.app_context():\n\n from flask import current_app, g\n g._env = env\n\n # Load the config\n current_app.config.from_object('service.config.config_%s.Config' % env)\n\n # Configure S3\n s3 = FlaskS3(app)\n g._s3 = s3\n\n # Load all further resources and services\n from . import views\n\n # Resources\n views.load_views()\n\n # Services\n from .aws import configure_aws\n configure_aws()\n\n # Load debug toolbar if enabled\n dev_toolbar = None\n if app.config.get('DEBUG_TOOLBAR', True):\n dev_toolbar = DebugToolbarExtension(app)\n g._dev_toolbar = dev_toolbar\n\n # Configure bootstrap\n Bootstrap(app)\n\n return app", "def create_app(settings_override=None):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object('config.settings')\n app.config.from_pyfile('settings.py', silent=True)\n\n # set only during the testing\n if settings_override:\n app.config.update(settings_override)\n\n app.register_blueprint(page)\n extentions_init(app)\n\n return app", "def make_app(global_conf, **app_conf):\n app = RestishApp(root.Root())\n app = repoze.who.config.make_middleware_with_config(app, global_conf, app_conf['repoze.who.ini'])\n app = setup_environ(app, global_conf, app_conf)\n # General \"middleware\".\n app = flash.flash_middleware_factory(app)\n app = cookies.cookies_middleware_factory(app)\n return app", "def create_app():\n app = web.Application(handlers=[\n (r'/', FilesetHandler),\n ])\n return app", "def create_app(config_object=Config):\n app = Flask(__name__.split('.')[0], static_folder='../client/build/static', template_folder=\"../client/build\")\n\n app.url_map.strict_slashes = False\n app.config.from_object(config_object)\n db.init_app(app)\n cache.init_app(app)\n register_blueprints(app)\n register_error_handler(app)\n \n\n return app", "def create_app():\n app = Flask(__name__)\n\n # Used by Flask to secure data\n app.config['SECRET_KEY'] = 'super-secret-secure-key'\n # Path to save the Database\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\n\n # Initialize the Database\n db.init_app(app)\n\n # Set up login manager\n from source.models import manage_login\n manage_login(app)\n\n # Blueprint for auth routes\n from source.auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n # Blueprint for non-auth routes\n from source.main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n return app", "def create_app_by_config(conf=None, config_log=True, register=True):\n # check instance path\n instance_path = os.environ.get(ENV_INSTANCE_PATH) or None\n # create app\n app = Flask(__name__, instance_path=instance_path)\n # ensure the instance folder exists\n if app.instance_path:\n try:\n os.makedirs(app.instance_path, exist_ok=True)\n except OSError:\n pass\n # configure app\n if conf:\n app.config.from_object(conf)\n # config logger\n if config_log:\n config_logger(app)\n # register blueprints\n if register:\n register_blueprints(app)\n return app", "def create_app(test_config=None):\n app = Flask(__name__)\n\n # apply the blueprints to the app\n from app import common\n\n app.register_blueprint(common.bp)\n\n # default url for site\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app", "def make_app():\n return tornado.web.Application([\n tornado.web.URLSpec(r\"/ws/\", WebSocket, name=\"websocket\"),\n tornado.web.URLSpec(r\"/\", StartPage, name='index'),\n (r\"/static/\", tornado.web.StaticFileHandler,\n dict(path=SETTINGS['static_path'])),\n ], **SETTINGS)", "def create_app():\n app = Flask(__name__)\n\n\n @app.route('/')\n def barebones():\n return 'the barebones'\n\n return app", "def create_app():\n # Creates flask object with directory for to serve static files\n app = Flask(__name__, static_url_path=C.STATIC_FILE_PATH)\n\n # Enabling CORS for the application\n CORS(app)\n\n app.debug = True\n # Registering books controller\n from app.controllers.books import mod\n app.register_blueprint(mod)\n\n # Test Route\n @app.route('/hello')\n def hello_world():\n return 'Hello World!'\n\n # Index route - serves index.html\n @mod.route('/')\n def main():\n return mod.send_static_file(\"index.html\")\n\n # serve routes from index by default\n app.add_url_rule('/', endpoint='index')\n\n return app", "def create_app(config: str) -> Flask:\n api = FlaskApp(__name__, specification_dir=Path() / \"swagger\")\n api.add_api(\"swagger.yml\")\n\n # Get `Flask` object\n app = api.app\n\n app.config.from_object(config)\n app.register_blueprint(site.mod)\n\n db.init_app(app)\n\n return app", "def create_app(config_name=\"development\"):\n # return app with config file on config folder\n app = Flask(__name__)\n\n # get default settings for app\n app.config.from_object(\"app_name.settings\")\n\n # load according config object\n app.config.from_object(app_config.config[config_name])\n\n # run classmethod to init app with Flask-DotEnv\n app_config.config[config_name].init_app(app)\n\n # register blueprints\n app.register_blueprint(api_mod, url_prefix=\"/api\")\n app.register_blueprint(mock_module, url_prefix=\"/api\")\n app.register_blueprint(support_ticket_module, url_prefix=\"/api\")\n \n # enable cors\n CORS(app)\n\n with app.app_context():\n # if config_name != \"testing\":\n # init db instance\n db.init_app(app)\n\n # migrate for Flask-Migrate\n migrate.init_app(app, db)\n\n return app", "def create_app(test_config=None):\n\n app = Flask(__name__, instance_relative_config=True)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=False)\n else:\n app.config.from_mapping(test_config)\n\n # Instantiate the Database.\n db.init_app(app=app)\n migrate = Migrate(app=app, db=db)\n\n # Initialize WTForms to handle JSON data.\n wtforms_json.init()\n\n # Routing starts from here.\n app.add_url_rule(\"/\", view_func=Home.as_view(\"home\"))\n app.register_blueprint(api)\n\n return app", "def _create_app_instance(script_info):\n return create_app()", "def create_app(self):\n\n app = create_app()\n app.config.from_object('project.config.TestingConfig')\n return app", "def create_app():\n\n app = FastAPI()\n add_root_route(app)\n\n return app", "def create_app(config_name, log_level=logging.INFO):\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n\n bootstrap.init_app(app)\n mail.init_app(app)\n moment.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n\n Markdown(app)\n\n redis_store.init_app(app)\n\n handler = RotatingFileHandler('flask.log', maxBytes=10000, backupCount=1)\n handler.setLevel(log_level)\n app.logger.addHandler(handler)\n\n #attach routes and custom error pages here\n\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n from .api import api as api_blueprint\n app.register_blueprint(api_blueprint)\n\n return app", "def create_app(settings_override=None):\n app = Flask(__name__, static_folder='static')\n\n params = {\n 'DEBUG': True,\n 'WEBPACK_MANIFEST_PATH': '../build/manifest.json'\n }\n\n app.config.update(params)\n\n if settings_override:\n app.config.update(settings_override)\n\n webpack.init_app(app)\n\n return app", "def create_app(settings_override=None):\n try:\n # initialize app & define config scopes\n app = Flask(__name__, instance_relative_config=True)\n app.logger.info(\"Initializing app\")\n\n # useful when hooking up the api to a front end or custom ajax later\n app.logger.info(\"Adding CORS\")\n CORS(app)\n\n app.logger.info(\"Loading settings from config file\")\n app.config.from_object('config.settings')\n app.config.from_pyfile('settings.py', silent=True)\n\n if settings_override:\n app.config.update(settings_override)\n\n # add extensions\n app.logger.info(\"Loading extensions\")\n extensions(app)\n\n # make api\n app.logger.info(\"Loading restful interface\")\n api = Api(app)\n\n # create tables\n @app.before_first_request\n def create_tables():\n app.logger.info(\"Creating database tables\")\n db.create_all()\n\n # add routes, '/' first is best practice\n app.logger.info(\"Loading restful routes\")\n api.add_resource(Success, app.config['ROUTE_SUCCESS'])\n\n # user routes\n api.add_resource(UserRegister, app.config['ROUTE_USER_REGISTER'])\n api.add_resource(User, app.config['ROUTE_USER'])\n\n # menu routes\n api.add_resource(MenuAdd, app.config['ROUTE_MENU'])\n api.add_resource(MenuItem, app.config['ROUTE_MENU_ITEM'])\n api.add_resource(MenuList, app.config['ROUTE_MENU_LIST'])\n\n # order routes\n api.add_resource(OrderAdd, app.config['ROUTE_ORDER'])\n api.add_resource(OrderList, app.config['ROUTE_ORDER_LIST'])\n api.add_resource(OrderItem, app.config['ROUTE_ORDER_ITEM'])\n\n app.logger.info(\"API ready\")\n return app\n\n # base exception to catch everything\n except BaseException:\n app.logger.error(app_error(nondict=True))\n return app_error()", "def generate(self) -> Flask:\n app = Flask(self.name, *self.args, **self.kwargs)\n app = self.setup_app_config(app)\n app = self.add_app_headers(app)\n app = self.add_xsrf_error_handler(app)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n # app.config.from_object('config.Config')\n file_path = os.path.abspath(os.getcwd())+\"/mpulse.db\"\n app.config.from_mapping(\n SECRET_KEY='dev',\n SQLALCHEMY_DATABASE_URI = 'sqlite:///'+file_path,\n SCHEMA=os.path.join(os.path.dirname(__file__), 'schema.sql'),\n SQLALCHEMY_TRACK_MODIFICATIONS = False,\n JSON_SORT_KEYS=False\n )\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n # init database\n db.init_app(app)\n \n with app.app_context():\n \n # Create tables if they don't exist\n db.create_all() \n \n # Include our api Routes for members\n from . import members\n # Register Blueprints\n app.register_blueprint(members.bp)\n\n return app", "def api_dev_factory(global_config, **local_conf):\n\treturn make_app(blueprints.api_server, settings.DevelopmentConfig)", "def create_app(config_overrides={}):\n # create app; load config\n app = Flask(__name__)\n app.config.from_object('config')\n app.config.update(**config_overrides)\n\n db.init_app(app)\n\n # flask-debug-toolbar\n DebugToolbarExtension(app)\n\n if app.config.get('DEBUG_TOOLBAR'):\n toolbar = DebugToolbarExtension(app)\n\n # error page handlers\n @app.errorhandler(404)\n def not_found(error):\n return render_template('404.html'), 404\n\n @app.errorhandler(500)\n def server_error(error):\n return render_template('500.html'), 500\n\n # register blueprints\n from app.main.views import mod as main_module\n\n app.register_blueprint(main_module)\n\n # load models\n from app.players.models import Player\n\n return app", "def _build_flask_app(self, name):\n app = Flask(name)\n app.add_url_rule('/ping', 'healthcheck', self._healthcheck)\n app.add_url_rule('/invocations', 'invoke', self._invoke, methods=[\"POST\"])\n app.register_error_handler(Exception, self._default_error_handler)\n return app", "def create_embedded():\n from .server import create_application\n return create_application()", "def create_app(**config_overrides):\n # we want to modify the global app, not a local copy\n global app\n global eventum\n\n app = Flask(__name__)\n\n # Load config then apply overrides\n app.config.from_object('config.flask_config')\n app.config.update(config_overrides)\n\n # Initialize assets\n assets = Environment(app)\n register_scss(assets)\n\n # Eventum\n eventum = Eventum(app)\n\n # Blueprints\n register_blueprints()\n\n return app", "def create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def root():\n \"\"\"Base view.\"\"\"\n return 'TODO - part 2 and beyond!'\n\n return app", "def create_app(self):\n app.config.from_object('config.TestingConfig')\n return app", "def create_app(config_object):\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # add blueprint\n from app.api import api_bp\n app.register_blueprint(api_bp, url_prefix='/api/v1/')\n\n # add redis client\n from app.redis_init import redis_client\n redis_client.init_app(app)\n\n # add prometheus middleware\n from app.prometheus_middleware import setup_metrics\n setup_metrics(app)\n\n return app", "def create_app(config='catalog.config.ProductionConfig'):\n # Create app\n app = Flask(__name__)\n app.config.from_object(config)\n\n # Register blueprints\n reg_bps(app)\n\n # Import models (for migration purposes)\n from . import Category, Item, AppUser # noqa: F401\n\n # Initialize extensions\n db.init_app(app)\n migrate.init_app(app, db)\n\n return app", "def create_app(config_class=DevConfig):\n\n app = Flask(__name__)\n app.config.from_object(config_class)\n\n # Register Blueprints\n from routes import bp_main\n app.register_blueprint(bp_main)\n\n return app", "def create_app(config_class=flaskblog_cf.Config):\n app = flask.Flask(__name__)\n app.config.from_object(flaskblog_cf.Config)\n\n db.init_app(app)\n bcrypt.init_app(app)\n login_manager.init_app(app)\n mail.init_app(app)\n\n import flaskblog.controller.user_controller as flaskblog_user_ctrl\n import flaskblog.controller.general_controller as flaskblog_general_ctrl\n import flaskblog.controller.posts_controller as flaskblog_post_ctrl\n import flaskblog.controller.error_pages_controller as flaskblog_error_ctrl\n\n app.register_blueprint(flaskblog_user_ctrl.users)\n app.register_blueprint(flaskblog_post_ctrl.posts)\n app.register_blueprint(flaskblog_general_ctrl.main)\n app.register_blueprint(flaskblog_error_ctrl.errors)\n\n return app", "def create_app():\n app = Flask(__name__)\n\n settings = {\n 'DEBUG': True,\n 'WEBPACK_MANIFEST_PATH': './build/manifest.json',\n 'SECRET_KEY': 'the quick brown fox jumps over the lazy dog',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:///db.sqlite',\n 'SQLALCHEMY_COMMIT_ON_TEARDOWN': True,\n 'UPLOAD_FOLDER':UPLOAD_FOLDER,\n 'ALLOWED_EXTENSIONS': set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','zip','log'])\n\n }\n\n app.config.update(settings)\n CORS(app)\n webpack.init_app(app)\n\n return app", "def create_app(config_name):\n app = FlaskAPI(__name__)\n app.config.from_object(app_config[config_name])\n app.url_map.strict_slashes = False\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n register_extensions(app)\n\n from main import main as main_blueprint\n\n app.register_blueprint(main_blueprint, url_prefix='/')\n\n from preview import preview as preview_blueprint\n\n app.register_blueprint(preview_blueprint, url_prefix='/preview')\n\n return app", "def create_app(config_filename=None, config_object=None):\n app = Flask(__name__)\n\n app.config.from_object('psephology.config.default')\n if config_filename is not None:\n app.config.from_pyfile(config_filename)\n if config_object is not None:\n app.config.from_object(config_object)\n\n db.init_app(app)\n migrate.init_app(app, db, render_as_batch=True)\n\n app.register_blueprint(ui)\n app.register_blueprint(api, url_prefix='/api')\n app.cli.add_command(cli)\n\n # Things which should only be present in DEBUG-enabled apps\n app.debug = app.config.get('DEBUG', False)\n if app.debug:\n from flask_debugtoolbar import DebugToolbarExtension\n toolbar = DebugToolbarExtension()\n toolbar.init_app(app)\n\n return app", "def make_app():\n app = flask.Flask('sahara.api')\n\n @app.route('/', methods=['GET'])\n def version_list():\n context.set_ctx(None)\n return api_utils.render({\n \"versions\": [\n {\"id\": \"v1.0\", \"status\": \"CURRENT\"}\n ]\n })\n\n @app.teardown_request\n def teardown_request(_ex=None):\n context.set_ctx(None)\n\n app.register_blueprint(api_v10.rest, url_prefix='/v1.0')\n app.register_blueprint(api_v10.rest, url_prefix='/v1.1')\n app.register_blueprint(api_v11.rest, url_prefix='/v1.1')\n\n def make_json_error(ex):\n status_code = (ex.code\n if isinstance(ex, werkzeug_exceptions.HTTPException)\n else 500)\n description = (ex.description\n if isinstance(ex, werkzeug_exceptions.HTTPException)\n else str(ex))\n return api_utils.render({'error': status_code,\n 'error_message': description},\n status=status_code)\n\n for code in six.iterkeys(werkzeug_exceptions.default_exceptions):\n app.error_handler_spec[None][code] = make_json_error\n\n if CONF.debug and not CONF.log_exchange:\n LOG.debug('Logging of request/response exchange could be enabled using'\n ' flag --log-exchange')\n\n if CONF.log_exchange:\n app.wsgi_app = log_exchange.LogExchange.factory(CONF)(app.wsgi_app)\n\n app.wsgi_app = auth_valid.wrap(app.wsgi_app)\n app.wsgi_app = acl.wrap(app.wsgi_app)\n\n return app", "def create_app(debug=False):\n app = factory.create_app(__name__, __path__, debug)\n\n # set the default json encoder\n app.json_encoder = JSONDatetimeEncoder\n\n # set the error handlers\n app.errorhandler(ApiError)(handle_api_exception)\n app.errorhandler(AuthenticationError)(handle_403)\n app.errorhandler(404)(handle_404)\n\n @app.route('/')\n def version():\n return jsonify({'version': '0.1'})\n\n return app", "def create_app(dictionary_with_strategies):\n\n app = Flask(__name__, static_url_path='',\n static_folder='../dist',\n template_folder='../dist')\n\n @app.route('/')\n def home():\n return redirect(url_for('static', filename='index.html'))\n\n app.url_map.strict_slashes = False\n app.config['Strategies'] = dictionary_with_strategies\n register_blueprints(app, \"/api\")\n\n return app", "def create_app() -> Flask:\n app = Flask('preview')\n app.json_encoder = PreviewEncoder\n app.config.from_pyfile('config.py')\n\n Base(app)\n auth.Auth(app)\n\n # Set up the API.\n app.register_blueprint(routes.api)\n register_error_handlers(app)\n\n # Add WSGI middlewares.\n middleware = [request_logs.ClassicLogsMiddleware,\n auth.middleware.AuthMiddleware]\n if app.config['VAULT_ENABLED']:\n middleware.insert(0, vault.middleware.VaultMiddleware)\n wrap(app, middleware)\n\n # Make sure that we have all of the secrets that we need to run.\n if app.config['VAULT_ENABLED']:\n app.middlewares['VaultMiddleware'].update_secrets({})\n\n # Initialize upstream services.\n PreviewStore.init_app(app)\n if app.config['WAIT_FOR_SERVICES']:\n with app.app_context(): # type: ignore\n PreviewStore.current_session().initialize()\n return app", "def create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY=os.environ.get('FLASK_SECRET_KEY', 'dev'),\n # SQLALCHEMY_DATABASE_URI='sqlite:////' + os.path.join(app.instance_path, 'app.sqlite'),\n SQLALCHEMY_DATABASE_URI=os.environ.get('FLASK_SQLALCHEMY_DATABASE_URI'),\n SQLALCHEMY_TRACK_MODIFICATIONS=False,\n )\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # Set custom json encoder\n app.json_encoder = JSONEncoder\n\n # SQLAlchemy\n from tuinbouwer_server_api.models import db, migrate\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Apscheduler\n from tuinbouwer_server_api.scheduler import scheduler, start_jobs\n scheduler.init_app(app)\n scheduler.start()\n start_jobs()\n \n # CORS\n CORS(app, resources={r'/*': {'origins': '*'}})\n\n # Website\n app.register_blueprint(website.frontend.blueprint)\n \n # API\n app.register_blueprint(api.sensor.blueprint)\n app.register_blueprint(api.frontend.blueprint)\n\n\n return app", "def create_request_app(settings):\n application = Flask(__name__)\n application.config.from_object(settings)\n\n db.init_app(application)\n\n application.register_blueprint(app_views)\n application.register_blueprint(api_views)\n\n logging_handler = RotatingFileHandler(\"{0}/app.log\".format(settings.BASE_DIR), maxBytes=10000, backupCount=1)\n logging_handler.setLevel(logging.INFO)\n application.logger.addHandler(logging_handler)\n with application.app_context():\n db.create_all()\n return application", "def create_app(config=None, app_name=\"todo-list\", blueprints=None):\n app = Flask(\n app_name,\n static_folder=os.path.join(os.path.dirname(__file__), '..', 'static'),\n template_folder='templates',\n )\n\n #app.config.from_object('project.config')\n app.config.from_pyfile('default.cfg', silent=False)\n if config:\n app.config.from_pyfile(config, silent=True)\n\n if blueprints is None:\n blueprints = BLUEPRINTS\n\n blueprints_fabrics(app, blueprints)\n extensions_fabrics(app)\n\n auth_util.init_auth_callbacks()\n\n return app", "def create_app(settings_override=None):\r\n app = Flask(__name__, instance_relative_config=True)\r\n\r\n app.config.from_object('config.settings')\r\n app.config.from_pyfile('settings.py', silent=True)\r\n\r\n if settings_override:\r\n app.config.update(settings_override)\r\n\r\n extensions(app)\r\n\r\n @app.before_first_request\r\n def init_db():\r\n session['email'] = None\r\n Database()\r\n\r\n @app.route('/')\r\n def home_page():\r\n form = LoginForm()\r\n return render_template('index.html', form=form)\r\n\r\n app.register_blueprint(users, url_prefix='/user')\r\n app.register_blueprint(admin, url_prefix='/admin')\r\n return app", "def create_app(config_mapping=None):\n logging.basicConfig(level=REANA_LOG_LEVEL, format=REANA_LOG_FORMAT)\n app = Flask(__name__)\n app.config.from_object(\"reana_workflow_controller.config\")\n if config_mapping:\n app.config.from_mapping(config_mapping)\n\n app.secret_key = \"super secret key\"\n # Register API routes\n from reana_workflow_controller.rest import (\n workflows_session,\n workflows_status,\n workflows_workspace,\n workflows,\n ) # noqa\n\n app.register_blueprint(workflows_session.blueprint, url_prefix=\"/api\")\n app.register_blueprint(workflows.blueprint, url_prefix=\"/api\")\n app.register_blueprint(workflows_status.blueprint, url_prefix=\"/api\")\n app.register_blueprint(workflows_workspace.blueprint, url_prefix=\"/api\")\n\n app.register_error_handler(UnprocessableEntity, handle_args_validation_error)\n\n app.session = Session\n return app" ]
[ "0.7593015", "0.75498307", "0.75089604", "0.7489719", "0.7488525", "0.74390584", "0.7435584", "0.7419176", "0.74065423", "0.7343458", "0.7322757", "0.730045", "0.72561944", "0.7253028", "0.72448945", "0.72247684", "0.719974", "0.7180518", "0.71766984", "0.7159296", "0.7157641", "0.7130562", "0.7127395", "0.7095701", "0.7090583", "0.7069225", "0.70653874", "0.7063539", "0.7062695", "0.70396245", "0.70360416", "0.70284396", "0.7023732", "0.70163727", "0.70091105", "0.7006", "0.6999505", "0.69961387", "0.69874436", "0.6985553", "0.6971582", "0.6969792", "0.69676435", "0.6967242", "0.69562256", "0.69495547", "0.69413316", "0.6934626", "0.6922469", "0.6918926", "0.6913815", "0.69123024", "0.69101137", "0.69016594", "0.689872", "0.6897176", "0.6895415", "0.6876932", "0.6842608", "0.6839823", "0.683328", "0.6831572", "0.6826215", "0.6825787", "0.6801277", "0.6785006", "0.677861", "0.6778204", "0.6774169", "0.67701626", "0.67602384", "0.67570096", "0.67540604", "0.6744149", "0.67378783", "0.6729011", "0.67247677", "0.67159826", "0.6715788", "0.6703768", "0.6700192", "0.66995347", "0.66911274", "0.6689997", "0.66826", "0.6676778", "0.6675388", "0.6674919", "0.6674232", "0.6674211", "0.66661", "0.6659851", "0.6651818", "0.6645535", "0.6644723", "0.66403127", "0.6635906", "0.66278064", "0.6614391", "0.6601535" ]
0.6932318
48
Fix Flask environment according to ``XForwarded_`` headers.
def wsgi_proxyfix(factory=None): def create_wsgi(app, **kwargs): wsgi_app = factory(app, **kwargs) if factory else app.wsgi_app num_proxies = app.config.get("WSGI_PROXIES") proxy_config = app.config.get("PROXYFIX_CONFIG") if proxy_config and not WERKZEUG_GTE_014: return ProxyFix(wsgi_app, **proxy_config) elif num_proxies: warnings.warn( "The WSGI_PROXIES configuration is deprecated and " "it will be removed, use PROXYFIX_CONFIG instead", PendingDeprecationWarning, ) if WERKZEUG_GTE_014: return ProxyFix(wsgi_app, num_proxies=num_proxies) else: return ProxyFix(wsgi_app, x_for=num_proxies) return wsgi_app return create_wsgi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixProxiedRequest( self, REQUEST ):\n # mod_proxy: X-Forwarded-Server\n # mod_accel: X-Host, X-Real-IP, X-URI, X-Method\n server = REQUEST.get('SERVER_URL')\n real_host = REQUEST.get('HTTP_X_FORWARDED_SERVER') or REQUEST.get('HTTP_X_HOST')\n real_addr = REQUEST.get('HTTP_X_REAL_IP')\n real_uri = REQUEST.get('HTTP_X_URI')\n\n # change SERVER_URL to frontend server's address and protocol\n if server and real_host:\n proto = REQUEST.get('HTTP_X_METHOD') or splittype( server )[0]\n host, port = splitport( real_host )\n REQUEST.setServerURL( proto, host, port or default_port.get( proto ) )\n\n # set REMOTE_ADDR to the real client's address\n if real_addr:\n REQUEST.environ['REMOTE_ADDR'] = real_addr\n\n # modify SCRIPT_NAME for proxied requests like\n # http://frontend/prefix/portal -> http://backend/portal\n if real_uri:\n # TODO: handle different portal name on frontend\n pos = real_uri.find( REQUEST['PATH_INFO'] )\n if pos > 0:\n REQUEST._script = real_uri[ 1:pos ].split('/')", "def downgrade_wsgi_ux_to_1x(environ):\n env1x = {}\n\n url_encoding = environ[native_to_unicode('wsgi.url_encoding')]\n for k, v in list(environ.items()):\n if k in [native_to_unicode('PATH_INFO'),\n native_to_unicode('SCRIPT_NAME'),\n native_to_unicode('QUERY_STRING')]:\n v = v.encode(url_encoding)\n elif isinstance(v, text_type):\n v = v.encode('ISO-8859-1')\n env1x[k.encode('ISO-8859-1')] = v\n\n return env1x", "def ignore_local_proxy_environment_variables(self):\n self._ignore_local_proxy = True", "def index():\n if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n print('here')\n return request.environ['REMOTE_ADDR']\n else:\n print('there')\n return request.environ['HTTP_X_FORWARDED_FOR']", "def _get_host(request):\n return request.headers.get('X-Forwarded-Host', request.headers['Host'])", "def add_app_headers(self, app: Flask) -> Flask:\n app.after_request(self.add_report_to_headers)\n app.after_request(self.add_csp_headers)\n return app", "def app_environment(\n monkeypatch: MonkeyPatch, default_app_env_vars: EnvVarsDict\n) -> EnvVarsDict:\n\n env_vars = setenvs_from_dict(\n monkeypatch,\n {\n **default_app_env_vars,\n \"WEBSERVER_HOST\": \"webserver\",\n \"WEBSERVER_SESSION_SECRET_KEY\": Fernet.generate_key().decode(\"utf-8\"),\n \"API_SERVER_POSTGRES\": \"null\",\n \"LOG_LEVEL\": \"debug\",\n \"SC_BOOT_MODE\": \"production\",\n },\n )\n\n # should be sufficient to create settings\n print(ApplicationSettings.create_from_envs().json(indent=1))\n\n return env_vars", "def _setup_friendly_environ(environ):\n http_host, host_url = determine_host(environ)\n if http_host == host_url:\n space_name = \"frontpage\"\n else:\n space_name = determine_space(environ, http_host)\n\n recipe_name = determine_space_recipe(environ, space_name)\n environ['wsgiorg.routing_args'][1]['recipe_name'] = recipe_name.encode(\n 'UTF-8')", "def _sanitizeEnv(self, environ):\n reqUri = None\n if environ.has_key('REQUEST_URI'):\n reqUri = environ['REQUEST_URI'].split('?', 1)\n\n # Ensure QUERY_STRING exists\n if not environ.has_key('QUERY_STRING') or not environ['QUERY_STRING']:\n if reqUri is not None and len(reqUri) > 1:\n environ['QUERY_STRING'] = reqUri[1]\n else:\n environ['QUERY_STRING'] = ''\n\n # Check WSGI_SCRIPT_NAME\n scriptName = environ.get('WSGI_SCRIPT_NAME')\n if scriptName is None:\n scriptName = self.scriptName\n else:\n warnings.warn('WSGI_SCRIPT_NAME environment variable for scgi '\n 'servers is deprecated',\n DeprecationWarning)\n if scriptName.lower() == 'none':\n scriptName = None\n\n if scriptName is None:\n # Do nothing (most likely coming from cgi2scgi)\n return\n\n if scriptName is NoDefault:\n # Pull SCRIPT_NAME/PATH_INFO from environment, with empty defaults\n if not environ.has_key('SCRIPT_NAME'):\n environ['SCRIPT_INFO'] = ''\n if not environ.has_key('PATH_INFO') or not environ['PATH_INFO']:\n if reqUri is not None:\n environ['PATH_INFO'] = reqUri[0]\n else:\n environ['PATH_INFO'] = ''\n else:\n # Configured scriptName\n warnings.warn('Configured SCRIPT_NAME is deprecated\\n'\n 'Do not use WSGI_SCRIPT_NAME or the scriptName\\n'\n 'keyword parameter -- they will be going away',\n DeprecationWarning)\n\n value = environ['SCRIPT_NAME']\n value += environ.get('PATH_INFO', '')\n if not value.startswith(scriptName):\n self.logger.warning('scriptName does not match request URI')\n\n environ['PATH_INFO'] = value[len(scriptName):]\n environ['SCRIPT_NAME'] = scriptName", "def update_env_vars(self, request, pk=None):\n app = self.get_object()\n app.update_environment_variables()\n response = {}\n return Response(response)", "def _get_forwarded_host(self, request: Request) -> Optional[str]:\n forwarded_host = request.headers.getlist(\"X-Forwarded-Host\")\n if not forwarded_host or len(forwarded_host) > 1:\n return None\n return forwarded_host[0].strip()", "def prepare_flask_request_for_saml(request):\n # If server is behind proxys or balancers use the HTTP_X_FORWARDED fields\n url_data = urlparse(request.url)\n # If in local development environment this will redirect the saml login right.\n if request.host == 'localhost':\n request.host = '30.30.30.30'\n return {\n 'https': 'on' if request.scheme == 'https' else 'off',\n 'http_host': request.host,\n 'server_port': url_data.port,\n 'script_name': request.path,\n 'get_data': request.args.copy(),\n 'post_data': request.form.copy()\n # \"lowercase_urlencoding\": \"\",\n # \"request_uri\": \"\",\n # \"query_string\": \"\"\n\n }", "def test_middleware_sets_ip_from_forwarded_for(self):\n request = MockRequest('127.0.0.1', '83.42.13.77')\n RealIPMiddleware().process_request(request)\n\n self.assertEqual(request.user_ip,\n request.META['HTTP_X_FORWARDED_FOR'])", "def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)", "def set_flask():\r\n app.run(host='0.0.0.0',\r\n port=5010,\r\n debug=False)", "def _base_environ(self, **request):\n # This is a minimal valid WSGI environ dictionary, plus:\n # - HTTP_COOKIE: for cookie support,\n # - REMOTE_ADDR: often useful, see #8551.\n # See http://www.python.org/dev/peps/pep-3333/#environ-variables\n environ = {\n 'HTTP_COOKIE': self.cookies.output(header='', sep='; '),\n 'USER': None,\n 'PATH_INFO': '/',\n 'REMOTE_ADDR': '127.0.0.1',\n 'REQUEST_METHOD': 'GET',\n 'SCRIPT_NAME': '',\n 'SERVER_NAME': 'testserver',\n 'SERVER_PORT': '80',\n \n }\n environ.update(self.defaults)\n environ.update(request)\n return environ", "def _transform_env(self) -> None:\n self.env = None if self.env == {} else self.env", "def app_env(request):\n env = {\"LOGIN_URL\": django_settings.LOGIN_URL,\n \"REDIRECT_FIELD_NAME\": getattr(django_settings, \"REDIRECT_FIELD_NAME\", \"next\"),\n \"LOGOUT_URL\": django_settings.LOGOUT_URL}\n # if hasattr(settings, \"SERVER_MAINTENANCE_MESSAGE\"):\n # env[\"SERVER_MAINTENANCE_MESSAGE\"] = settings.SERVER_MAINTENANCE_MESSAGE\n\n return env", "def put_headers_in_environ(headers, environ):\n for key, value in headers:\n environ['HTTP_%s' % key.upper().replace('-', '_')] = value", "def __before__(self, action, environ):\n host = request.headers.get('Host')\n if not (host and host in app_globals.merchants.domain_map):\n prot, host, path, params, query, fragment = urlparse.urlparse(request.url)\n return redirect(urlparse.urlunparse((prot, app_globals.default_host, path, params, query, fragment)))\n else:\n protocol = request.headers.get('X-Forwarded-Proto', 'http')\n request.merchant = app_globals.merchants.domain_map[host]\n request.qualified_host = '%s://%s'%(protocol, host)\n request.is_secured = protocol == 'https'\n log.info('%s, %s, %s', '-'*80, protocol , protocol == 'https')\n if not websession.get('region'):\n region = request.headers.get(\"X-COUNTRY\", app_globals.country_choices.fallback.code).lower()\n region = app_globals.country_choices.map.get(region, app_globals.country_choices.fallback).code\n websession['region'] = region\n c.messages = websession.get('messages', [])\n c.user = websession.get('user', ANONUSER)\n c.user._statics = app_globals.statics_service\n c.furl = str(request.params.get(\"furl\") or request.url)\n log.info('[%s] [%s] [%s] Incoming Request at %s', c.user.u_id, websession['region'], request.headers.get('Host'), url.current())\n\n if 'lang' not in websession or websession['lang'] not in app_globals.LANGUAGES:\n websession['lang'] = negotiate_locale(request.accept_language, app_globals.LANGUAGES)\n set_lang(websession['lang'])", "def map_to_app_env_vars(self, app):\n app['env_vars'] = []\n for form_envvar in self.env_vars:\n env_var = {}\n if form_envvar.var_key.data:\n env_var['var_key'] = form_envvar.var_key.data\n if form_envvar.var_value.data:\n env_var['var_value'] = form_envvar.var_value.data\n if env_var:\n app['env_vars'].append(env_var)", "def test_strict_https_header(flask_app, app):\n app.config['STRICT_HTTPS'] = True # enable strict https\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Strict-Transport-Security') == 'max-age=31536000; includeSubDomains'\n\n app.config['STRICT_HTTPS'] = False # disable\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert not headers.get('Strict-Transport-Security')", "def insert_xforwarded_for(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"insert_xforwarded_for\")", "def insert_xforwarded_for(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"insert_xforwarded_for\")", "def envvars(envs):\n old_env = {}\n for var, value in envs.items():\n old_env[var] = os.environ.get(var)\n os.environ[var] = value\n\n yield\n\n for var in envs:\n if old_env[var] is None:\n del os.environ[var]\n else:\n os.environ[var] = old_env[var]", "def patch_environment(**kwargs):\n for key, value in kwargs.items():\n os.environ[key.upper()] = str(value)\n\n yield\n\n for key in kwargs:\n if key.upper() in os.environ:\n del os.environ[key.upper()]", "def refresh_wsgi():\n\n require(\"wsgi_path\", \"sudo_user\")\n cmd = \"touch -c %s\" % env.wsgi_path\n sudo(cmd, user=env.sudo_user)", "def override_environ(**overrides):\n with override_dict(os.environ, **overrides):\n yield", "def x_forwarded_for_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"x_forwarded_for_enabled\")", "def remote_addr(env):\r\n # In production the remote address is always the load balancer\r\n # So check X-Forwarded-For first\r\n # E.g. HTTP_X_FORWARDED_FOR: '66.249.72.73, 75.101.144.164'\r\n if env.has_key('HTTP_X_FORWARDED_FOR'):\r\n ips = re.split(r'\\s*,\\s*', env['HTTP_X_FORWARDED_FOR'])\r\n if len(ips) > 0:\r\n return ips[0]\r\n\r\n return env['REMOTE_ADDR']", "def process_request_headers(request):\n request.headers.setdefault('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/51.0.2704.103 Safari/537.36')\n if 'redirect_urls' not in request.meta:\n request.headers['Referer'] = None", "def process_client_headers(\n self, *, scope: Scope, headers: Headerlike\n ) -> Headerlike:\n if self.rewrite_host_header:\n headers = headers.mutablecopy() # type: ignore\n headers[\"host\"] = self.rewrite_host_header\n return super().process_client_headers(scope=scope, headers=headers) # type: ignore", "def eff_request_host(request):\n erhn = req_host = request_host(request)\n if req_host.find(\".\") == -1 and not cookiejar.IPV4_RE.search(req_host):\n erhn = req_host + \".local\"\n return req_host, erhn", "def unset_envvars():\n for key in \"http_proxy\", \"https_proxy\", \"HTTP_PROXY\", \"HTTPS_PROXY\":\n if key in os.environ:\n del os.environ[key]", "def staging():\n env.hosts = ['staging.example.com']", "def insert_xforwarded_for(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"insert_xforwarded_for\")", "def sdss_env(request):\n m = request.getfixturevalue(\"monkeypatch\")\n for p in ('PHOTO_CALIB', 'PHOTO_DATA', 'BOSS_PHOTOOBJ', 'PHOTO_REDUX',\n 'PHOTO_RESOLVE', 'PHOTO_SKY', 'PHOTO_SWEEP'):\n m.setenv(p, '/' + p)\n return m", "def restore_env():\n\n def restore(key, value):\n if value is None:\n if key in os.environ:\n del os.environ[key]\n else:\n os.environ[key] = value\n\n restore(\"TF_XLA_FLAGS\", ORIGINAL_TF_XLA_FLAGS)\n restore(\"TF_FORCE_GPU_ALLOW_GROWTH\", ORIGINAL_TF_FORCE_GPU_ALLOW_GROWTH)", "def clean_env():\n for key in ['FOO', 'THOR', 'IRON', 'NAME', 'PERSONAL_DIR']:\n os.environ.pop(key, None)", "def prepare_flask_request(req):\n url_data = urlparse(req.url)\n return {\n \"https\": \"on\" if req.scheme == \"https\" else \"off\",\n \"http_host\": req.host,\n \"server_port\": url_data.port,\n \"script_name\": req.path,\n \"get_data\": req.args.copy(),\n \"post_data\": req.form.copy(),\n }", "def env_wrap(self):\n\n old_env = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)", "def push(self):\n\t\tif self.old_vars is not None:\n\t\t\treturn\n\n\t\tself.old_vars = {}\n\t\tfor k, v in self.vars.items():\n\t\t\tself.old_vars[k] = os.environ.get(k)\n\t\t\tif v is None:\n\t\t\t\tif k in os.environ:\n\t\t\t\t\tdel os.environ[k]\n\t\t\telse:\n\t\t\t\tos.environ[k] = v", "def LocalEnv(local_env):\n old_env = os.environ.copy()\n os.environ.update(local_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)", "def _finalize_env(self, env: Dict[str, str]) -> None:\n\n # add the applicable kernel_id and language to the env dict\n env['KERNEL_ID'] = self.kernel_id\n\n kernel_language = 'unknown-kernel-language'\n if len(self.kernel_spec.language) > 0:\n kernel_language = self.kernel_spec.language.lower()\n # if already set in env: stanza, let that override.\n env['KERNEL_LANGUAGE'] = env.get('KERNEL_LANGUAGE', kernel_language)\n\n # Remove any potential sensitive (e.g., passwords) or annoying values (e.g., LG_COLORS)\n for k in env_pop_list:\n env.pop(k, None)", "def prod():\n # Varnish proxies.\n # env.roledefs['varnish_servers'] = ['varnish1.example.org', 'varnish2.example.org']\n # The Django app servers.\n env.roledefs['webapp_servers'] = ['djangopatterns.com']\n # Static media servers\n # env.roledefs['media_servers'] = ['djangopatterns.com']\n # Postgres servers.\n env.roledefs['db_servers'] = ['djangopatterns.com']\n\n # Combine all of the roles into the env.hosts list.\n env.hosts = [host[0] for host in env.roledefs.values()]", "def env_cleanup(self):\n pass", "def map_from_app_env_vars(self, app):\n if 'env_vars' in app and len(app['env_vars']) > 0:\n empty_fieldlist(self.env_vars)\n for envvar in app.get('env_vars', []):\n self.env_vars.append_entry()\n form_envvar = self.env_vars.entries[-1].form\n form_envvar.map_from_app(envvar)", "def setup_environ(self):\n SimpleHandler.setup_environ(self)\n self.environ['ws4py.socket'] = get_connection(self.environ['wsgi.input'])\n self.http_version = self.environ['SERVER_PROTOCOL'].rsplit('/')[-1]", "def get_client_ip(environ):\n # type: (Dict[str, str]) -> Optional[Any]\n try:\n return environ[\"HTTP_X_FORWARDED_FOR\"].split(\",\")[0].strip()\n except (KeyError, IndexError):\n pass\n\n try:\n return environ[\"HTTP_X_REAL_IP\"]\n except KeyError:\n pass\n\n return environ.get(\"REMOTE_ADDR\")", "def pop(self):\n\t\tif self.old_vars is None:\n\t\t\treturn\n\n\t\tfor k, v in self.old_vars.items():\n\t\t\tif v is None:\n\t\t\t\tif k in os.environ:\n\t\t\t\t\tdel os.environ[k]\n\t\t\telse:\n\t\t\t\tos.environ[k] = v\n\n\t\tself.old_vars = None", "def app(request):\n app = flask.Flask(__name__)\n return app", "def prepend_env(self, env_name, pattern):\n if not self.has_pattern(env_name, pattern):\n if env_name not in self.environ.keys():\n self.environ[env_name] = [pattern]\n else:\n self.environ[env_name].insert(0, pattern)\n if env_name not in self.env_name_changed:\n self.env_name_changed.append(env_name)", "def _replaceEnv(self, match):\n try:\n return os.environ[match.group('var')]\n except KeyError:\n raise InvalidRequest('Can not find environment variable: '\n '{0}'.format(match.group('var')))", "def _normalize_headers(self):\n self.ncookies=dict((k.lower(), v) for k, v in self.request.cookies.iteritems())\n self.nheaders=dict((k.lower(), v) for k, v in self.request.headers.iteritems())", "def __call__(self, request):\n for field in self.FORWARDED_FOR_FIELDS:\n if field in request.META:\n if \",\" in request.META[field]:\n parts = request.META[field].split(\",\")\n request.META[field] = parts[-1].strip()\n\n response = self.get_response(request)\n\n return response", "def avoid_pip_isolation(env: Mapping[str, str]) -> dict[str, str]:\n new_env = {k: v for k, v in env.items() if k != \"PYTHONNOUSERSITE\"}\n if \"PYTHONPATH\" not in new_env:\n return new_env\n\n new_env[\"PYTHONPATH\"] = os.pathsep.join(\n [\n path\n for path in new_env[\"PYTHONPATH\"].split(os.pathsep)\n if \"pip-build-env-\" not in path\n ]\n )\n return new_env", "def update(self, env):\n del env\n return", "def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])", "def view_origin():\n\n return jsonify(origin=request.headers.get(\"X-Forwarded-For\", request.remote_addr))", "def check_environ():\n global _environ_checked\n if _environ_checked:\n return\n\n if os.name == 'posix' and 'HOME' not in os.environ:\n import pwd\n os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]\n\n if 'PLAT' not in os.environ:\n os.environ['PLAT'] = _sysconfig.get_platform()\n\n _environ_checked = 1", "def get_host(environ, use_x_forwarded_for=False):\n # type: (Dict[str, str], bool) -> str\n if use_x_forwarded_for and \"HTTP_X_FORWARDED_HOST\" in environ:\n rv = environ[\"HTTP_X_FORWARDED_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"HTTP_HOST\"):\n rv = environ[\"HTTP_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"SERVER_NAME\"):\n rv = environ[\"SERVER_NAME\"]\n if (environ[\"wsgi.url_scheme\"], environ[\"SERVER_PORT\"]) not in (\n (\"https\", \"443\"),\n (\"http\", \"80\"),\n ):\n rv += \":\" + environ[\"SERVER_PORT\"]\n else:\n # In spite of the WSGI spec, SERVER_NAME might not be present.\n rv = \"unknown\"\n\n return rv", "def GetEnvironment(self):\n environ = super(ServiceHandlerTest, self).GetEnvironment()\n if self.remote_host:\n environ['REMOTE_HOST'] = self.remote_host\n if self.server_host:\n environ['SERVER_HOST'] = self.server_host\n return environ", "def dev():\n env.hosts = ['']\n env.user = ''\n env.virtualenv_dir = ''\n env.code_dir = ''\n env.var_dir = ''\n env.activate = 'source %s/bin/activate' % env.virtualenv_dir\n env.backup_on_deploy = False", "def test_reverse_proxy_config():\n\n class ReverseProxyConfig(TestingConfig):\n REVERSE_PROXY = \"1,2,3,4\"\n\n app = create_ctfd(config=ReverseProxyConfig)\n with app.app_context():\n assert app.wsgi_app.x_for == 1\n assert app.wsgi_app.x_proto == 2\n assert app.wsgi_app.x_host == 3\n assert app.wsgi_app.x_port == 4\n assert app.wsgi_app.x_prefix == 0\n destroy_ctfd(app)\n\n class ReverseProxyConfig(TestingConfig):\n REVERSE_PROXY = \"true\"\n\n app = create_ctfd(config=ReverseProxyConfig)\n with app.app_context():\n assert app.wsgi_app.x_for == 1\n assert app.wsgi_app.x_proto == 1\n assert app.wsgi_app.x_host == 1\n assert app.wsgi_app.x_port == 1\n assert app.wsgi_app.x_prefix == 1\n destroy_ctfd(app)", "def _get_forwarded_for(self, request: Request) -> List[_BaseAddress]:\n forwarded_for_str = request.headers.getlist(\"X-Forwarded-For\")\n if not forwarded_for_str or len(forwarded_for_str) > 1:\n return []\n return [\n ip_address(addr)\n for addr in (a.strip() for a in forwarded_for_str[0].split(\",\"))\n if addr\n ]", "def prepare_app(self):\n self.app = Flask(self.APP_NAME)\n self.app.config.from_object('mmapi.config.Config')\n CORS(self.app, origins=self.app.config['CORS_ACCEPTED_ORIGINS'])\n\n # Map urls with and without a trailing slash to the same endpoint.\n self.app.url_map.strict_slashes = False", "def ssl_redirect():\n if request.get_header('X-Forwarded-Proto', 'http') != 'https':\n redirect(request.url.replace('http://', 'https://', 1), code=301)", "def app(environ: t.Dict, start_response):\n # Print the request object details in environ.items()\n for k, v in environ.items():\n print(k, v)\n\n # Let's capture the request path\n path = environ.get(\"PATH_INFO\")\n\n # Handle our different routes. Render different templates.\n # Allow user to add \"/\" or not to URL string\n # NOTE: Don't use elif statement! It skips 'data' assignment!\n if path.endswith(\"/\"):\n path = path[:-1] # remove the trailing \"/\"\n if path == \"\": # the root / index\n data = home(environ)\n elif path == \"/contact\":\n data = contact_us(environ)\n elif path == \"/box-office\":\n data = read_box_office_data(environ)\n else:\n data = render_template(template_name=\"404.html\", context={\"path\": path})\n\n # Encode data to BYTE string\n data = data.encode(\"utf-8\")\n\n # Gunicorn's start_response to get a response going\n start_response(\n f\"200 OK\",\n [(\"Content-Type\", \"text/html\"), (\"Content-Length\", str(len(data)))],\n # You can remove these headers and the browser will still parse it.\n # Modern browsers are smart enough to infer how to parse the request\n )\n # Where does this print to? Server logs I bet... YES!\n # print(f\"{data=}\\n{iter([data])}\")\n return iter([data]) # <list_iterator object at 0x10f9f1340>", "def _preload_existing_vars(prefix: str) -> Store:\n if not prefix:\n # If prefix is empty just return all the env variables.\n return environ\n\n prefixed = {}\n\n # Prefix is not empty, do the search and replacement:\n for env_name, env_value in environ.items():\n if not env_name.startswith(prefix):\n # Skip vars with no prefix.\n continue\n\n prefixed[env_name.replace(prefix, '', 1)] = env_value\n\n return prefixed", "def update_flask(self, flask_app):\n flask_app.config.update(self.flask_config_dict)", "def test_metadata_cache_uri_set_via_env_vars(monkeypatch, caplog):\n ENV_METADATA_CACHE_URI = environ_names_and_sections[NAME_METADATA_CACHE_URI][0]\n ENV_AQUARIUS_URL = deprecated_environ_names[NAME_AQUARIUS_URL][0]\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI, raising=False)\n monkeypatch.delenv(ENV_AQUARIUS_URL, raising=False)\n config = Config()\n metadata_cache_uri = config.metadata_cache_uri\n assert metadata_cache_uri == \"https://aquarius.marketplace.oceanprotocol.com\"\n\n monkeypatch.setenv(ENV_METADATA_CACHE_URI, \"https://custom-aqua.uri\")\n config = Config()\n assert config.metadata_cache_uri == \"https://custom-aqua.uri\"\n\n monkeypatch.setenv(ENV_AQUARIUS_URL, \"https://another-aqua.url\")\n with pytest.raises(ValueError):\n Config()\n\n monkeypatch.delenv(ENV_METADATA_CACHE_URI)\n config = Config()\n assert config.metadata_cache_uri == \"https://another-aqua.url\"\n assert (\n \"Config: AQUARIUS_URL envvar is deprecated. Use METADATA_CACHE_URI instead.\"\n in caplog.text\n )", "def augmented_environment(self):\n env = os.environ.copy()\n env.update(self.environ)\n return env", "def allow_unresolved_environment_tokens(self):\n return self._allow_unresolved_environment_tokens", "def test_environment_patchtest(self):\n self.env = patch.dict('os.environ', {'hello': 'world'})\n with self.env:\n self.assertEqual(os.environ['hello'], 'world')", "def get_undercloud_env_vars():\n\n # Handle backward compatibile OSCI enviornment variables\n _vars = {}\n _vars['net_id'] = os.environ.get('NET_ID')\n _vars['external_dns'] = os.environ.get('NAMESERVER')\n _vars['default_gateway'] = os.environ.get('GATEWAY')\n _vars['external_net_cidr'] = os.environ.get('CIDR_EXT')\n\n # Take FIP_RANGE and create start and end floating ips\n _fip_range = os.environ.get('FIP_RANGE')\n if _fip_range and ':' in _fip_range:\n _vars['start_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[0]\n _vars['end_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[1]\n\n # Env var naming consistent with zaza.configure.network functions takes\n # priority. Override backward compatible settings.\n _keys = ['default_gateway',\n 'start_floating_ip',\n 'end_floating_ip',\n 'external_dns',\n 'external_net_cidr']\n for _key in _keys:\n _val = os.environ.get(_key)\n if _val:\n _vars[_key] = _val\n\n # Remove keys and items with a None value\n for k, v in list(_vars.items()):\n if not v:\n del _vars[k]\n\n return _vars", "def sanitize_headers(headers):\n auth_header = headers.pop(\"Authorization\", None)\n if auth_header:\n _logger.warning(\n f\"Possible fraud: Authorization header was set to {auth_header}\"\n )\n userinfo_header = headers.pop(\"X-Userinfo\", None)\n if userinfo_header:\n _logger.warning(\n f\"Possible fraud: X-Userinfo header was set to {userinfo_header}\"\n )", "def local_only(fn):\r\n @wraps(fn)\r\n def wrapper(*args, **kwargs):\r\n if request.remote_addr != '127.0.0.1':\r\n return 401\r\n return fn(*args, **kwargs)\r\n return wrapper", "def before():\n app.logger.info(\"Local Timestamp: {}\".format(str(datetime.now())))\n app.logger.info(\"Request Method: {}\".format(request.method))\n app.logger.info(\"Request URL: {}\".format(request.url))\n app.logger.info(\"Request Access Route: {}\".format(request.access_route[0]))\n headers = \"\"\n for (key, value) in request.headers:\n # hide authorization header from logs\n if key == \"Authorization\":\n value = \"[provided]\" \n headers += \"{}: {}\\n\".format(key, value)\n app.logger.info(\"Request Headers:{}\\n{}\\n{}\".format(\"-\"*45,str(headers)[:-1], \"-\"*60))\n body = copy.deepcopy(request.json)\n if type(body) is dict and \"password\" in body:\n body['password'] = \"[provided]\"\n app.logger.info(\"Request Body: {}\".format(body))", "def env_vars(self):\n return _untag_env_vars(self._tagged_env_vars, build=False)", "def check_user_environment(config):\n if not config.has_section('user_env_vars'):\n return\n\n for env_var in config.keys('user_env_vars'):\n if env_var in os.environ:\n msg = '{} is already set in the environment. '.format(env_var) +\\\n 'Overwriting from conf file'\n config.logger.warning(msg)", "def unpatch():\n _u(sanic.Sanic, \"handle_request\")\n if not SANIC_PRE_21:\n _u(sanic.Sanic, \"_run_request_middleware\")\n _u(sanic.request.Request, \"respond\")\n if not getattr(sanic, \"__datadog_patch\", False):\n return\n setattr(sanic, \"__datadog_patch\", False)", "def upgrade_environment(self, db):\n\n pass", "def _parse_wsgi_headers(wsgi_environ):\n prefix = 'HTTP_'\n p_len = len(prefix)\n # use .items() despite suspected memory pressure bc GC occasionally\n # collects wsgi_environ.iteritems() during iteration.\n headers = {\n key[p_len:].replace('_', '-').lower():\n val for (key, val) in wsgi_environ.items()\n if key.startswith(prefix)}\n return headers", "def patch_flask_url_for(self):\n original_url_for = flask.url_for\n flask.url_for = functools.lru_cache(maxsize=None)(original_url_for)", "def override_environment(**kwargs):\n overridden = {}\n for key, value in kwargs.items():\n overridden[key] = os.environ.get(key)\n if value is None:\n os.environ.pop(key, None)\n else:\n os.environ[key] = value\n\n yield\n\n for key, value in overridden.items():\n if value is None:\n os.environ.pop(key, None)\n else:\n os.environ[key] = value", "def refresh(self):\n self.__envs = {}\n cmd_result = self.openshift.do_action(\"set\", [\"env\", self.resource_type, self.deployment_name, \"--list\"])\n for line in cmd_result.out().split(\"\\n\"):\n for env_type in self.types:\n match_obj = re.match(env_type.pattern, line)\n if match_obj:\n env = env_type(openshift=self.openshift,\n deployment=self.deployment_name,\n match=match_obj,\n environ=self)\n self.__envs[env.name] = env\n break", "def app(request) -> Flask:\n app = create_app()\n context = app.app_context()\n context.push()\n\n def teardown():\n context.pop()\n\n request.addfinalizer(teardown)\n return app", "def preprocess(self, environ):\n # log a bit about this request.\n self.logger.info(\"REQUEST %(REQUEST_METHOD)s server=%(SERVER_NAME)s:%(SERVER_PORT)s path=%(PATH_INFO)s query=%(QUERY_STRING)s\" % environ)\n\n # is this a POST, and if so, did they send along gzipped data?\n if environ['REQUEST_METHOD'] == 'POST' and environ.get(\"HTTP_CONTENT_ENCODING\") == 'gzip':\n # we need to decompress the gzipped data\n self.logger.debug(\"gzipped data found in POST body\")\n contentLength = int(environ.get('CONTENT_LENGTH', 0))\n compressedData = environ['wsgi.input'].read(contentLength)\n sio = StringIO.StringIO(compressedData)\n gzFile = gzip.GzipFile(fileobj=sio)\n decompressedData = gzFile.read()\n gzFile.close()\n sio.close()\n environ['wsgi.input'] = StringIO.StringIO(decompressedData)\n environ['CONTENT_LENGTH'] = str(len(decompressedData))\n self.logger.debug(\"expanded %d bytes of gzip into %d bytes of uncompressed data\" % (contentLength, len(decompressedData)))", "def get_env(self, loop):\n env = getattr(self.app, 'env', None)\n if not env:\n env = self.environment(self.app, loop, self.host, self.port)\n self.app.env = env\n return env", "def modified_environ(self, *remove, **update):\n env = os.environ\n update = update or {}\n remove = remove or []\n\n # List of environment variables being updated or removed.\n stomped = (set(update.keys()) | set(remove)) & set(env.keys())\n # Environment variables and values to restore on exit.\n update_after = {k: env[k] for k in stomped}\n # Environment variables and values to remove on exit.\n remove_after = frozenset(k for k in update if k not in env)\n\n try:\n env.update(update)\n [env.pop(k, None) for k in remove]\n yield\n finally:\n env.update(update_after)\n [env.pop(k) for k in remove_after]", "def modified_environ(*remove, **update):\n env = os.environ\n update = update or {}\n remove = remove or []\n\n # List of environment variables being updated or removed.\n stomped = (set(update.keys()) | set(remove)) & set(env.keys())\n # Environment variables and values to restore on exit.\n update_after = {k: env[k] for k in stomped}\n # Environment variables and values to remove on exit.\n remove_after = frozenset(k for k in update if k not in env)\n\n try:\n env.update(update)\n [env.pop(k, None) for k in remove] # pylint: disable=expression-not-assigned\n yield\n finally:\n env.update(update_after)\n [env.pop(k) for k in remove_after] # pylint: disable=expression-not-assigned", "def modified_environ(*remove, **update):\n env = os.environ\n update = update or {}\n remove = remove or []\n\n # List of environment variables being updated or removed.\n stomped = (set(update.keys()) | set(remove)) & set(env.keys())\n # Environment variables and values to restore on exit.\n update_after = {k: env[k] for k in stomped}\n # Environment variables and values to remove on exit.\n remove_after = frozenset(k for k in update if k not in env)\n\n try:\n env.update(update)\n [env.pop(k, None) for k in remove] # pylint: disable=expression-not-assigned\n yield\n finally:\n env.update(update_after)\n [env.pop(k) for k in remove_after] # pylint: disable=expression-not-assigned", "def bootstrap_wsgi():\n return get_wsgi_application()", "def update_environ():\n\n # Environment variables to set.\n BASE = os.getcwd()\n PLUGINS = os.path.join(BASE, 'lib')\n RESOURCES = os.path.join(BASE, 'res')\n MODELS = os.path.join(RESOURCES, 'models')\n\n # Set the vaue to '' to set the var to ''.\n # Anything else will be added to current var value.\n minimapper_env = {\n 'GAZEBO_RESOURCE_PATH': RESOURCES,\n 'GAZEBO_MODEL_PATH': MODELS,\n 'GAZEBO_PLUGIN_PATH': PLUGINS,\n 'GAZEBO_MODEL_DATABASE_URI': None\n }\n\n # Conditionally set environment variables.\n env = os.environ.copy()\n for key, val in minimapper_env.items():\n if val is None:\n env[key] = ''\n elif key not in env:\n env[key] = val\n elif key in env and val not in env[key]:\n env[key] = val + ':' + env[key]\n\n return env", "def request_add_host(request, address):\n\n request.setdefault('headers', {})\n request['headers'].setdefault('Host', address)\n\n return request", "def env_init(self, environ) -> None:\n environ.update(self._env)", "def instrument_flask():\n oc_trace_config = app.config.get('OPENCENSUS_TRACE', {})\n oc_trace_config.update({\n 'EXPORTER': trace_exporter.TraceExporter,\n 'PROPAGATOR': trace_context_http_header_format.TraceContextPropagator\n })\n app.config.update(OPENCENSUS_TRACE=oc_trace_config)\n return flask_middleware.FlaskMiddleware(app)", "def remote_addr(self):\n fwd = self.environ.get('HTTP_X_FORWARDED_FOR', None)\n if fwd is None:\n return self.environ.get('REMOTE_ADDR')\n # sometimes x-forwarded-for contains multiple addresses,\n # actual client is first, rest are proxy\n fwd = fwd.split(',')[0]\n return fwd", "def first_request():\n heroku_url: str = 'https://justice-ndou.herokuapp.com/'\n registered_domain: str = 'https://justice-ndou.herokuapp.com/'\n\n if request.host_url.lower().startswith(heroku_url):\n return redirect(request.host_url.lower().replace(heroku_url, registered_domain)), 301", "def test_check_env_vars(self, monkeypatch):\n context = Context(SSLv23_METHOD)\n dir_var = \"CUSTOM_DIR_VAR\"\n file_var = \"CUSTOM_FILE_VAR\"\n assert context._check_env_vars_set(dir_var, file_var) is False\n monkeypatch.setenv(dir_var, \"value\")\n monkeypatch.setenv(file_var, \"value\")\n assert context._check_env_vars_set(dir_var, file_var) is True\n assert context._check_env_vars_set(dir_var, file_var) is True" ]
[ "0.57845724", "0.5618568", "0.53275186", "0.5303353", "0.529868", "0.5294957", "0.52365404", "0.5180683", "0.5151824", "0.51268667", "0.5117508", "0.5112825", "0.5058677", "0.5011258", "0.5004951", "0.5002801", "0.49961042", "0.49657816", "0.49571234", "0.4935363", "0.49287555", "0.49120715", "0.48910323", "0.48910323", "0.4867856", "0.48652804", "0.4847339", "0.48265094", "0.4804396", "0.48005462", "0.47941378", "0.47645164", "0.47526076", "0.47513387", "0.4740892", "0.47111535", "0.47071132", "0.4692421", "0.46887064", "0.46714228", "0.4668463", "0.46655253", "0.4655609", "0.46484938", "0.46347332", "0.4631352", "0.46217775", "0.46127275", "0.45937687", "0.45788574", "0.45593292", "0.45530808", "0.45402187", "0.4534819", "0.45308688", "0.4530802", "0.45251796", "0.45171386", "0.45126757", "0.45080775", "0.45063215", "0.44936013", "0.4487477", "0.44787097", "0.44684458", "0.4464046", "0.44630876", "0.4459905", "0.44589916", "0.44480395", "0.44404754", "0.4425045", "0.4424999", "0.44211388", "0.44103932", "0.4404851", "0.44008985", "0.44007123", "0.43951303", "0.43851215", "0.4380489", "0.43781698", "0.43776506", "0.4374002", "0.43694833", "0.43671703", "0.43633983", "0.4363377", "0.43617174", "0.43579975", "0.4357172", "0.4357172", "0.43544358", "0.43387333", "0.4336138", "0.43345326", "0.4334445", "0.4331772", "0.43311542", "0.43310517" ]
0.5541965
2
Create project parser method.
def pa_cmd(args, cmd): usage = "%s <options>" % command.USAGE.format(cmd) desc = command.DESCS[cmd] parser = argparse.ArgumentParser(usage=usage, description=desc) required = parser.add_argument_group('required named arguments') required.add_argument('-d', '--desc', help='The project description', required=True, type=str, dest="desc") required.add_argument('-o', '--owner', help='The project owner', required=True, type=str, dest="owner") parser.add_argument("-c", "--plmnid", dest="plmnid", default=None, help="The network PLMNID; default=None", type=str) parser.add_argument("-s", "--ssid", dest="ssid", default=None, help="The network SSID; default=None", type=SSID) parser.add_argument("-t", "--ssid_type", dest="ssid_type", default="unique", choices=["unique", "shared"], help="The network SSID type; default=unique") (args, leftovers) = parser.parse_known_args(args) return args, leftovers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_parser(self):\n\n p = argparse.ArgumentParser(\n self.TITLE,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n p.add_argument(\n \"--name\",\n metavar=\"NAME\",\n help=\"Public name of the project, for docs etc.\",\n default=argparse.SUPPRESS\n )\n p.add_argument(\n '--id',\n metavar='IDENT',\n help=\"Internal name of the project, for directories etc.\",\n default=argparse.SUPPRESS\n )\n p.add_argument(\n \"--title\",\n metavar=\"TEXT\",\n help=\"One-line title for the project\",\n required=True,\n default=argparse.SUPPRESS\n )\n p.add_argument(\n \"--template\",\n metavar=\"PATH\",\n help=\"Template name or path\",\n default=self.DEFAULT_TEMPLATE\n )\n p.add_argument(\n \"--update\",\n action=\"store_true\",\n help=\"Update an existing project\",\n default=False\n )\n p.add_argument(\n \"--config\",\n metavar=\"PATH\",\n help=\"Configuration to use\",\n default=self.DEFAULT_CONFIG\n )\n p.add_argument(\n \"--dry-run\", \"-n\",\n action=\"store_true\",\n help=\"Don't generate anything, just validate\",\n default=False\n )\n\n return p", "def create_parser():\n pass", "def run_project_parser(self):\n\n # get Ansible project structure\n self.__get_ansible_project_content()\n self.__generate_graph('project', self.__project_content)\n\n # get Ansible roles\n self.__get_ansible_roles_content()\n self.__generate_graph('roles', self.__role_content)", "def init_parser():\n description = (\n \"This command initializes a new project for use with Transifex. It \"\n \"is recommended to execute this command in the top level directory \"\n \"of your project so that you can include all files under it in \"\n \"Transifex. If no path is provided, the current working directory\"\n \"will be used.\"\n )\n parser = ArgumentParser(description=description)\n parser.add_argument(\"--host\", action=\"store\", dest=\"host\", default=None,\n help=\"Specify a default Transifex host.\")\n parser.add_argument(\"--user\", action=\"store\", dest=\"user\", default=None,\n help=\"Specify username for Transifex server.\")\n parser.add_argument(\"--pass\", action=\"store\", dest=\"password\",\n default=None,\n help=\"Specify password for Transifex server.\")\n parser.add_argument(\n \"--force-save\",\n action=\"store_true\",\n dest=\"save\",\n default=False,\n help=\"Override .transifexrc file with the given credentials.\"\n )\n parser.add_argument(\n \"--skipsetup\",\n action=\"store_true\",\n dest=\"skipsetup\",\n default=False,\n help=\"Don't start tx config interactive wizard after setting up \"\n \"credentials.\"\n )\n parser.add_argument(\"--token\", action=\"store\", dest=\"token\", default=None,\n help=\"Specify an api token.\\nYou can get one from\"\n \" user's settings\")\n parser.add_argument(\"--no-interactive\", action=\"store_true\",\n dest=\"no_interactive\", default=False,\n help=\"Don't require user input.\")\n parser.add_argument(\"path_to_tx\", action=\"store\", nargs='?', default=None,\n help=\"Path to tx root folder.\")\n return parser", "def parseProject(self):\r\n self.project['name'] = self.root.configuration.name\r\n self.project['chip'] = ''\r\n\r\n #TODO: parse into tree structure\r\n self.project['srcs'] = []\r\n self.searchGroups(self.root, self.project['srcs'])\r\n\r\n self.project['defs'] = []\r\n self.project['incs'] = []\r\n\r\n for element in self.root.configuration.getchildren():\r\n if element.tag == 'settings':\r\n for e in element.data.getchildren():\r\n if e.tag == 'option':\r\n if e.name.text == 'OGChipSelectEditMenu':\r\n self.project['chip'] = str(e.state)\r\n elif e.name.text == 'CCDefines':\r\n for d in e.getchildren():\r\n if d.tag == 'state' and d.text != None:\r\n self.project['defs'].append(d.text)\r\n elif e.name.text == 'CCIncludePath2':\r\n for d in e.getchildren():\r\n if d.tag == 'state' and d.text != None:\r\n self.project['incs'].append(d.text)\r\n\r\n for i in range(0, len(self.project['incs'])):\r\n s = str(self.project['incs'][i])\r\n if os.path.sep not in s:\r\n if os.path.sep == '\\\\':\r\n s = s.replace('/', '\\\\')\r\n elif os.path.sep == '/':\r\n s = s.replace('\\\\', '/')\r\n\r\n self.project['incs'][i] = s.replace('$PROJ_DIR$'+os.path.sep+'..', self.path)\r\n\r\n self.project['files'] = []\r\n i = 0\r\n\r\n if os.path.exists(self.path + '/Drivers/CMSIS/Device/ST/STM32F3xx/Source/Templates/gcc'):\r\n for entry in os.listdir(self.path + '/Drivers/CMSIS/Device/ST/STM32F3xx/Source/Templates/gcc'):\r\n if entry.endswith('.S') or entry.endswith('.s'):\r\n self.project['files'].append(self.path + '/Drivers/CMSIS/Device/ST/STM32F3xx/Source/Templates/gcc/'+entry)", "def create_parser():\n now = datetime.datetime.today()\n default_date = \"{}-{}-{}\".format(now.day, now.month, now.year)\n parser = argparse.ArgumentParser(description=\"Git plugin for automatic insertion of @since and @author annotations \"\n \"into *.java source files in a project.\",\n epilog=\"© Avner & Oded\")\n parser.add_argument(\"-v\", \"--version\", help=\"Display the version of this plugin\", action='store_true')\n parser.add_argument(\"--since\", nargs='?', help=\"Add the @since annotations to project\", const=default_date)\n parser.add_argument(\"--author\", nargs='?', help=\"Add the @author annotations to project\", const=getpass.getuser())\n\n return parser", "def _create_parser(self):\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument(\n '-v',\n '--verbose',\n action='store_true',\n default=False,\n help='Verbose mode (turn on logging.info)')\n\n parser.add_argument(\n '-d',\n '--debug',\n action='store_true',\n default=False,\n help='Debug (turn on logging.debug)')\n\n return parser", "def _parse(self, args):\n parser = self._create_parser()\n return parser.parse(args)", "def build_parser(self, parser: ArgumentParser) -> None:", "def create_parser():\n parser = argparse.ArgumentParser()\n\n # required args\n parser.add_argument('--project_id',\n help='Project id for project containing BQ data',\n default=KEY_FILE,\n type=str,\n required=True)\n\n # data and model args\n parser.add_argument('--training_budget',\n help='Training budget in hours',\n default=1,\n type=int)\n parser.add_argument('--key_file',\n help='JSON key file for API access',\n default=KEY_FILE,\n type=str)\n parser.add_argument('--location',\n help='GCP region to run',\n default=LOCATION,\n type=str)\n parser.add_argument('--automl_dataset',\n help='Name of AutoML dataset',\n default=AUTOML_DATASET,\n type=str)\n parser.add_argument('--automl_model',\n help='Name of AutoML model',\n default=AUTOML_MODEL,\n type=str)\n parser.add_argument('--bq_dataset',\n help='BigQuery dataset to import from',\n default=BQ_DATASET,\n type=str)\n parser.add_argument('--bq_table',\n help='BigQuery table to import from',\n default=BQ_TABLE,\n type=str)\n parser.add_argument('--batch_gcs_input',\n help='GCS URI for batch predict CSV',\n default=BATCH_GCS_INPUT,\n type=str)\n parser.add_argument('--batch_gcs_output',\n help='GCS URI for batch predict output',\n default=BATCH_GCS_OUTPUT,\n type=str)\n return parser", "def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)", "def parse():\n # Get CLI information\n commands = _extract_commands()\n usage, description = _build_usage_and_desc(commands)\n\n # Build parser\n parser = argparse.ArgumentParser(\n usage=usage,\n description=description,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=__version__,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n help=\"set logging level to DEBUG\",\n )\n parser.add_argument(\"command\", help=\"command to run\")\n\n # Parse arguments\n first_not_option_arg_pos = len(sys.argv)\n for i, arg in enumerate(sys.argv[1:]):\n if arg[0] != \"-\":\n first_not_option_arg_pos = i + 2\n break\n\n args = parser.parse_args(sys.argv[1:first_not_option_arg_pos])\n\n # Set logging level\n logger.setup(args.verbose or False)\n\n # Check if command is valid\n if args.command not in commands:\n logger.info(\n f\"project: '{args.command}' is not a project starter command. See `project --help`.\"\n )\n return 1\n\n # Dispatch command call\n cmd_args = commands[args.command][\"func_parse\"](\n f\"project {args.command}\", sys.argv[first_not_option_arg_pos:]\n )\n return commands[args.command][\"func_run\"](cmd_args)", "def build_parser(self, add_help=True):\n self.parser = argparse.ArgumentParser(\n description=self.description, add_help=add_help\n )\n self.parser.prog = f\"python -m {self.package}.{self.module_name}\"\n self.parser.add_argument(\n \"config_file\", help=\"Path/name of YAML configuration file for NEMO nowcast.\"\n )", "def create_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n add_help=False)\n parser.add_argument(\n '--help', '-h',\n action='store_true',\n dest='help',\n help=\"\"\"show this help message and exit\"\"\")\n parser.add_argument(\n '--verbose', '-v',\n action='count',\n default=0,\n help=\"\"\"Enable verbose output from '%(prog)s'. A second and third\n '-v' increases verbosity.\"\"\")\n parser.add_argument(\n '--sequential',\n action='store_true',\n help=\"\"\"Execute analyzer sequentialy.\"\"\")\n parser.add_argument(\n '--cdb',\n metavar='<file>',\n default=\"compile_commands.json\",\n help=\"\"\"The JSON compilation database.\"\"\")\n return parser", "def _read_project(self, filename):\n parser = configparser.ConfigParser()\n parser.read(filename, \"utf8\")\n return parser", "def make_parser():\n\n parser = argparse.ArgumentParser(description='Inference engine.')\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n subparsers.required = True\n solver_subparser = subparsers.add_parser('run')\n solver_subparser.add_argument(\n '-v', '--verbose', help='enable verbose mode.', action='store_true'\n )\n solver_subparser.add_argument(\n '-d', '--debug', help='enable debug mode.', action='store_true'\n )\n solver_subparser.add_argument(\n 'filename', type=str,\n help='filename containing the instructions to process.'\n )\n return parser", "def create_parser():\n p = NewParser()\n\n p.add_argument('reference', type=str,\n help = \"Fasta reference file that reads were mapped to.\")\n\n p.add_argument('gff', type=str,\n help = \"GFF file containing reference genome annotations.\")\n\n p.add_argument('vcf', type=str,\n help = \"VCF file to parse.\")\n\n args = p.parse_args(sys.argv[1:])\n return args", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--pythonpath',\n help='A directory to add to the Python path',\n )\n self.add_arguments(parser)\n return parser", "def create_parser(self, prog_name):\n return OptionParser(\n\t\t\tprog=prog_name,\n\t\t\t#usage=self.usage(subcommand),\n\t\t\toption_list=self.option_list\n\t\t)", "def create_parser(self, prog_name, subcommand):\n parser = CommandParser(\n self, prog=\"%s %s\" % (os.path.basename(prog_name), subcommand),\n description=self.help or None,\n )\n parser.add_argument(\n '--version', action='version', version=self.get_version())\n\n self.add_arguments(parser)\n return parser", "def setup_parser(self, parser):", "def generate_parser():\n description = \"%(prog)s -- Data handling, normalization, manipulation, and plotting for HiC and 5C experimental data\"\n epilog = \"For command line options of each command, type: %(prog)s <COMMAND> -h\"\n parser = ap.ArgumentParser(description=description, epilog=epilog)\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s %(version_num)s\" % {'prog':parser.prog, 'version_num':VERSION})\n subparsers = parser.add_subparsers(dest='subcommand')\n\n add_connect_subparser(subparsers)\n add_fragments_subparser(subparsers)\n add_fivecdataset_subparser(subparsers)\n add_fivecproject_subparser(subparsers)\n add_fivecnormalize_subparser(subparsers)\n add_complete_fivec_subparser(subparsers)\n add_fivec_heatmap_subparser(subparsers)\n add_fivec_interval_subparser(subparsers)\n add_fivec_combine_replicates_subparser(subparsers)\n add_fends_subparser(subparsers)\n add_hicdataset_subparser(subparsers)\n add_hicproject_subparser(subparsers)\n add_hicnormalize_subparser(subparsers)\n add_complete_hic_subparser(subparsers)\n add_hic_heatmap_subparser(subparsers)\n add_hic_mrheatmap_subparser(subparsers)\n add_hic_interval_subparser(subparsers)\n add_hic_combine_replicates_subparser(subparsers)\n add_quasar_subparser(subparsers)\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(description=\"Manage development Footprints in Rackspace Cloud\")\n parser.add_argument(\"--username\", metavar=\"<rackspace_username>\", help=\"Username for the Rackspace API\")\n parser.add_argument(\"--password\", metavar=\"<rackspace_apikey>\", help=\"API Key for the Rackspace API\")\n \n subparsers = parser.add_subparsers(dest=\"subparser_name\")\n \n # create \n parser_create = subparsers.add_parser('create', help=\"Create a footprint\")\n # parser_create.add_argument(\"-n\", \"--name\", metavar=\"<footprint_name>\", help=\"Name of Footprint to create\") \n parser_create.add_argument(\"configfile\", metavar=\"<filename>\", help=\"Name of configuration file\")\n \n # start\n parser_start = subparsers.add_parser('start', help=\"Start a footprint\")\n parser_start.add_argument(\"name\", metavar=\"<footprint_name>\", help=\"Name of Footprint to start\") \n parser_start.add_argument(\"-m\", \"--monitor\", action=\"store_true\", help=\"Monitor Footprint operations\")\n \n # list\n parser_list = subparsers.add_parser('list', help=\"List footprints\")\n # parser_list.add_argument(\"-i\", \"--uuid\", help=\"UUID of footprint\")\n \n # destroy \n parser_destroy = subparsers.add_parser('destroy', help=\"Destroy VMs, networks, and saved images associated with a footprint\")\n # parser_destroy.add_argument(\"--id\", \"-i\", metavar=\"<uuid>\", help=\"UUID of a footprint\")\n parser_destroy.add_argument(\"-m\", \"--monitor\", action=\"store_true\", help=\"Monitor progress of footprint destruction\")\n parser_destroy.add_argument(\"name\", metavar=\"<footprint_name>\", help=\"Name of Footprint to destroy\")\n parser_destroy.add_argument(\"-f\", \"--force\", action=\"store_true\", help=\"Destroy without confirmation\")\n \n # suspend \n parser_suspend = subparsers.add_parser('suspend', help=\"Save VM images of a footprint and shuts down VMs\")\n # parser_suspend.add_argument(\"-i\", '--uuid', metavar=\"<uuid>\", help=\"UUID of a footprint to suspend\")\n parser_suspend.add_argument(\"name\", metavar=\"<footprint_name>\", help=\"Name of footprint to be suspended\")\n # parser_suspend.add_argument('-m', '--monitor', action=\"store_true\", help=\"Monitor suspending of footprint\")\n \n # save \n parser_save = subparsers.add_parser('save', help=\"Saves profile of an existing environment\")\n # parser_save.add_argument(\"-i\", \"--uuid\", metavar=\"<uuid>\", help=\"UUID of a footprint to save\")\n parser_save.add_argument(\"name\", help=\"Name of footprint to save\", metavar=\"<footprint>\")\n \n # monitor \n parser_monitor = subparsers.add_parser('monitor', help=\"Monitor an environment until everything settles\")\n parser_monitor.add_argument(\"name\", help=\"Name of footprint\", metavar=\"<footprint>\")\n \n \n # parser_restore = subparsers.add_parser('restore', help=\"Create a running environment from saved images\")\n # parser_restore = subparsers.add_parser('monitor', help=\"Monitor a running environment's status\")\n # parser_restore.add_argument(\"--uuid\", metavar=\"<uuid-of-environment>\", help=\"ID of environment to restore (see 'list' command)\")\n parser_status = subparsers.add_parser('status', help=\"Show the status of an environment\")\n parser_status.add_argument(\"name\", nargs=\"?\")\n \n # generate-config\n parser_generate_config = subparsers.add_parser('generate-config', help=\"Show the status of an environment\")\n parser_generate_config.add_argument(\"-f\", \"--filename\", help=\"Filename for configuration file\", )\n parser_generate_config.add_argument(\"-n\", \"--name\", help=\"Short name for new footprint\", )\n parser_generate_config.add_argument(\"-i\", \"--image\", help=\"Base image UUID for footprint\",)\n \n # show\n parser_show = subparsers.add_parser(\"show\", help=\"Show data about an environment\")\n parser_show.add_argument('name', help=\"Name of footprint\", metavar=\"<footprint>\")\n \n # cleanup\n parser_cleanup = subparsers.add_parser(\"cleanup\", help=\"Remove networks and images that are no longer relevant\")\n parser_cleanup.add_argument('name', help=\"Name of footprint\", metavar=\"<footprint>\")\n \n \n # dump\n parser_dump = subparsers.add_parser(\"dump\", help=\"Dump configuration data for an environment\")\n parser_dump.add_argument('name', help=\"Name of footprint\", metavar=\"<footprint>\")\n \n\n # stop\n parser_stop = subparsers.add_parser(\"stop\", help=\"Stop a running footprint\")\n parser_stop.add_argument(\"name\", help=\"Name of footprint\", metavar=\"<footprint>\")\n\n parser_lock = subparsers.add_parser(\"lock\", help=\"Lock a footprint (prevents shutdown / startup)\")\n parser_lock.add_argument(\"name\", help=\"Name of footprint\", metavar=\"<footprint>\")\n\n parser_unlock = subparsers.add_parser(\"unlock\", help=\"Unlock a footprint (allows shutdown / startup)\")\n parser_unlock.add_argument(\"name\", help=\"Name of footprint\", metavar=\"<footprint>\")\n \n \n return parser", "def init_parser():\n usage = \"usage: %prog [tx_options] init <path>\"\n description = \"This command initializes a new project for use with \"\\\n \"Transifex. It is recommended to execute this command in the \"\\\n \"top level directory of your project so that you can include \"\\\n \"all files under it in transifex. If no path is provided, the \"\\\n \"current working dir will be used.\"\n parser = OptionParser(usage=usage, description=description)\n parser.add_option(\"--host\", action=\"store\", dest=\"host\", default=None,\n help=\"Specify a default Transifex host.\")\n parser.add_option(\"--user\", action=\"store\", dest=\"user\", default=None,\n help=\"Specify username for Transifex server.\")\n parser.add_option(\"--pass\", action=\"store\", dest=\"password\", default=None,\n help=\"Specify password for Transifex server.\")\n parser.add_option(\n \"--force-save\",\n action=\"store_true\",\n dest=\"save\",\n default=False,\n help=\"Override .transifexrc file with the given credentials.\"\n )\n\n parser.add_option(\"--token\", action=\"store\", dest=\"token\", default=None,\n help=\"Specify an api token.\\nYou can get one from\"\n \" user's settings\")\n return parser", "def create_parser():\n parser = argparse.ArgumentParser(\n \"DomainTransfer\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n conflict_handler=\"resolve\",\n )\n args, _ = parser.parse_known_args()\n\n # environment\n parser.add_argument(\"--source_env\", type=str, default=\"SawyerPush-v0\")\n parser.add_argument(\"--source_noise_bias\", type=float, default=0.0)\n parser.add_argument(\"--source_noise_level\", type=float, default=0.0)\n parser.add_argument(\"--source_ob_noise_level\", type=float, default=0.0)\n\n parser.add_argument(\"--target_env\", type=str, default=\"SawyerPush-v0\")\n parser.add_argument(\"--target_noise_bias\", type=float, default=0.0)\n parser.add_argument(\"--target_noise_level\", type=float, default=0.0)\n parser.add_argument(\"--target_ob_noise_level\", type=float, default=0.0)\n\n parser.add_argument(\"--envs\", type=str2list, default=[])\n parser.add_argument(\"--eval_ckpt_paths\", type=str2list, default=[])\n parser.add_argument(\"--early_term\", type=str2bool, default=False)\n\n parser.add_argument(\"--seed\", type=int, default=123)\n\n add_env_args(parser)\n\n add_method_arguments(parser)\n\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(title=\"Commands\", dest=\"subparser_name\")\n subparsers.add_parser(\"generate-settings\", help=\"Generate settings.json to install \"\n \"Gluu Cloud Native Edition non-interactively\")\n subparsers.add_parser(\"install\", help=\"Install Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3.\")\n subparsers.add_parser(\"install-no-wait\", help=\"Install Gluu Cloud Native Edition using Kustomize. \"\n \"Depreciated > 4.3. \"\n \"There will be no wait time between installing services. \"\n \"Pods may look like they are restarting but they will \"\n \"be waiting for hierarchy \"\n \"pods to be running\")\n subparsers.add_parser(\"install-ldap-backup\", help=\"Install ldap backup cronjob only.\")\n subparsers.add_parser(\"restore\", help=\"Install Gluu Cloud Native Edition with a \"\n \"running database and previous configuration using Kustomize.\"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"uninstall\", help=\"Uninstall Gluu that was installed using Kustomize. \"\n \"Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade\", help=\"Upgrade Gluu Cloud Native Edition using Kustomize. Depreciated > 4.3\")\n subparsers.add_parser(\"upgrade-values-yaml\", help=\"Upgrade Gluu Cloud Native Edition\")\n subparsers.add_parser(\"install-couchbase\", help=\"Install Couchbase only. Used with installation of Gluu with Helm\")\n subparsers.add_parser(\"install-couchbase-backup\", help=\"Install Couchbase backup only.\")\n subparsers.add_parser(\"uninstall-couchbase\", help=\"Uninstall Couchbase only.\")\n subparsers.add_parser(\"helm-install\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This also installs the nginx-ingress chart\")\n subparsers.add_parser(\"helm-uninstall\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This also uninstalls the nginx-ingress chart\")\n\n subparsers.add_parser(\"helm-install-gluu\", help=\"Install Gluu Cloud Native Edition using helm. \"\n \"This assumes nginx-ingress is installed\")\n subparsers.add_parser(\"helm-uninstall-gluu\", help=\"Uninstall Gluu Cloud Native Edition using helm. \"\n \"This only uninstalls Gluu\")\n subparsers.add_parser(\"version\", help=\"Outputs version of pygluu installer.\")\n return parser", "def create_parser():\n desc_str = (\"\"\"\\nCreate a datafile for the specified process.\\n\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n parser.add_argument('-f','--file', \n help = 'output (data) file name', \n type = str,\n required = True)\n parser.add_argument('-l', '--length',\n help = 'length of the time series',\n type = int,\n required = True)\n parser.add_argument('-p', '--process',\n help = (\"process from cmpy.machines, eg Even\"),\n type = str,\n required = True\n )\n parser.add_argument('-d','--directory', \n help = 'output directory name', \n type = str,\n default = '.',\n required = False)\n\n # do the parsing\n args = parser.parse_args()\n\n return args", "def set_parser():\n usage = \"usage: %prog [tx_options] set [options] [args]\"\n description = \"This command can be used to create a mapping between files \"\\\n \"and projects either using local files or using files from a remote \"\\\n \"Transifex server.\"\n epilog = \"\\nExamples:\\n\"\\\n \"To set the source file:\\n $ tx set -r project.resource --source -l en <file>\\n\\n\"\\\n \"To set a single translation file:\\n $ tx set -r project.resource -l de <file>\\n\\n\"\\\n \"To automatically detect and assign the source files and translations:\\n\"\\\n \" $ tx set --auto-local -r project.resource 'expr' --source-lang en\\n\\n\"\\\n \"To set a specific file as a source and auto detect translations:\\n\"\\\n \" $ tx set --auto-local -r project.resource 'expr' --source-lang en \"\\\n \"--source-file <file>\\n\\n\"\\\n \"To set a remote resource/project:\\n\"\\\n \" $ tx set --auto-remote <transifex-url>\\n\"\n parser = EpilogParser(usage=usage, description=description, epilog=epilog)\n parser.add_option(\"--auto-local\", action=\"store_true\",\n dest=\"local\", default=False,\n help=\"Used when auto configuring local project.\")\n parser.add_option(\"--auto-remote\", action=\"store_true\",\n dest=\"remote\", default=False,\n help=\"Used when adding remote files from Transifex \"\n \"server.\")\n parser.add_option(\"-r\", \"--resource\", action=\"store\", dest=\"resource\",\n default=None,\n help=\"Specify the slug of the resource that you're \"\n \"setting up (This must be in the following format: \"\n \"`project_slug.resource_slug`).\")\n parser.add_option(\n \"--source\", action=\"store_true\", dest=\"is_source\", default=False,\n help=(\n \"Specify that the given file is a source file \"\n \"[doesn't work with the --auto-* commands].\"\n )\n )\n parser.add_option(\"-l\", \"--language\", action=\"store\", dest=\"language\",\n default=None,\n help=\"Specify which translations you want to pull \"\n \"[doesn't work with the --auto-* commands].\")\n parser.add_option(\"-t\", \"--type\", action=\"store\", dest=\"i18n_type\",\n help=(\"Specify the i18n type of the resource(s). \"\n \"This is only needed, if the resource(s) does not \"\n \"exist yet in Transifex. For a list of \"\n \"available i18n types, see \"\n \"http://docs.transifex.com/formats/\"\n ))\n parser.add_option(\"--minimum-perc\", action=\"store\", dest=\"minimum_perc\",\n help=(\"Specify the minimum acceptable percentage \"\n \"of a translation in order to download it.\"\n ))\n parser.add_option(\n \"--mode\", action=\"store\", dest=\"mode\", help=(\n \"Specify the mode of the translation file to pull (e.g. \"\n \"'reviewed'). See http://bit.ly/pullmode for the \"\n \"available values.\"\n )\n )\n group = OptionGroup(parser, \"Extended options\", \"These options can only \"\n \"be used with the --auto-local command.\")\n group.add_option(\"-s\", \"--source-language\", action=\"store\",\n dest=\"source_language\", default=None,\n help=\"Specify the source language of a resource \"\n \"[requires --auto-local].\")\n group.add_option(\"-f\", \"--source-file\", action=\"store\", dest=\"source_file\",\n default=None, help=\"Specify the source file of a \"\n \"resource [requires --auto-local].\")\n group.add_option(\"--execute\", action=\"store_true\", dest=\"execute\",\n default=False, help=\"Execute commands \"\n \"[requires --auto-local].\")\n parser.add_option_group(group)\n return parser", "def get_parser():\n parser = ArgumentParser(description=__doc__,\n formatter_class=ArgumentDefaultsHelpFormatter,\n prog='pv2')\n subparsers = parser.add_subparsers(dest='cmd')\n # subparsers.add_parser('selfcheck',\n # add_help=False,\n # help=\"Self-check of the sst toolkit.\")\n # parser.add_argument('--version',\n # action='version',\n # version=('sst %s' % str(sst.__version__)))\n subparsers.add_parser('eval',\n add_help=False,\n parents=[evaluate.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Evaluate a single image\"))\n subparsers.add_parser('train',\n add_help=False,\n parents=[train.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Train a new model.\"))\n subparsers.add_parser('plot',\n add_help=False,\n parents=[plot.get_parser()],\n formatter_class=ArgumentDefaultsHelpFormatter,\n help=(\"Plot summary information.\"))\n return parser", "def get_parser(self):\n parser = ArgumentParser()\n parser.add_argument(\n \"-c\", default='', dest='cmd',\n help=(\"just like python -c or sh -c (pass in a command)\"))\n parser.add_argument(\n \"-e\", \"--exec\", default='', dest='execfile',\n help='a filename to execute')\n parser.add_argument(\n \"-v\", '--version', default=False, dest='version',\n action='store_true',\n help=(\"show version information\"))\n parser.add_argument(\"--shell\", dest=\"shell\",\n default=False, help=\"application shell\",\n action='store_true')\n parser.add_argument(\"--config\", dest='config',\n default=\"\",\n help=\"use config file\")\n return parser", "def __parser__(self):\n return self", "def make_parser():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-p', '--platform', dest='platform', type=str, required=False, default='')\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n # Dataset related flags\n parser.add_argument(\n '--kitti_raw',\n required=True,\n help='directory where raw KITTI dataset is located.'\n )\n parser.add_argument(\n '--test_file_path',\n required=True,\n help='.txt file containing list of kitti eigen test files'\n )\n parser.add_argument(\n '--gt_path',\n required=True,\n help='.npy file containing ground truth depth for kitti eigen test files'\n )\n parser.add_argument('--min_depth', type=float, default=1e-3, help=\"threshold for minimum depth for evaluation\")\n parser.add_argument('--max_depth', type=float, default=80, help=\"threshold for maximum depth for evaluation\")\n\n # Training settings\n parser.add_argument('--ckpt_path', type=str, required=True, help='If specified, tries to restore from given path.')\n parser.add_argument('--test_batch_size', type=int, default=1, help='test batch size')\n parser.add_argument('--input_height', type=int, default=128, help='height of input image to model')\n parser.add_argument('--input_width', type=int, default=416, help='width of input image to model')\n parser.add_argument(\n '--dispnet_encoder',\n default='resnet50',\n choices=('resnet50', 'vgg'),\n help='type of encoder for dispnet'\n )\n parser.add_argument('--scale_normalize', action='store_true', help='spatially normalize depth prediction')\n parser.add_argument('--save_pred', action='store_true', help='save predictions on disk')\n parser.add_argument('--num_source', type=int, default=2, help='number of source images')\n parser.add_argument('--name', type=str, default='kitti_evaluation_weather')\n\n args = parser.parse_args()\n return args", "def configure_parser(parser):\n qisys.parsers.worktree_parser(parser)\n parser.add_argument(\"project_name\",\n help=\"The name of the project. \"\n \"The project will be created in QI_WORK_TREE/<name> \")\n parser.add_argument(\"--git\", action=\"store_true\",\n help=\"Create a git repository\")", "def get_parser():\n if sys.version_info[0] < 3:\n # Using a version of Python < 3.\n parser = ArgumentParser(version=VERSION) # pylint: disable=E1123\n else:\n parser = ArgumentParser()\n parser.add_argument('--version', action='version', version=VERSION)\n\n subparsers = parser.add_subparsers(\n title='actions', help='Types of zappa commands',\n dest='command')\n\n parser_update_stack = subparsers.add_parser(\n 'update', help='Update a zappa deploy')\n parser_update_stack.add_argument(\n '--name', required=True,\n help='Name of the deployment (dev, prod, etc.)')\n\n parser_create_stack = subparsers.add_parser(\n 'deploy', help='Create a zappa deploy')\n parser_create_stack.add_argument(\n '--name', required=True,\n help='Name of the deployment (dev, prod, etc.)')\n\n return parser", "def _make_parser(self, **kwargs):\n\n kwargs.setdefault('help', self.help)\n kwargs.setdefault('formatter_class',argparse.RawDescriptionHelpFormatter)\n kwargs.setdefault('description', self.description)\n kwargs.setdefault('name', self.name)\n names = (kwargs.get('name') or self.name).split('.')\n \n def _get_subparser(a):\n if a._subparsers:\n for action in a._subparsers._actions:\n if isinstance(action, argparse._SubParsersAction):\n return action\n raise RuntimeError('could not find adequate subparser')\n return a.add_subparsers(dest='command',\n title='commands',\n metavar='COMMAND')\n def _get_parser(node, idx, names):\n name = names[idx]\n if name in node.choices:\n return node.choices[name]\n args = {\n 'name' : name,\n 'help' : 'a group of sub-commands',\n }\n return node.add_parser(**args)\n \n parser = ACMD_PARSER\n node = _get_subparser(parser)\n\n for i,n in enumerate(names[:-1]):\n node = _get_subparser(parser)\n parser = _get_parser(node, i, names)\n \n node = _get_subparser(parser)\n kwargs['name'] = names[-1]\n parser = node.add_parser(**kwargs)\n return parser", "def parserFactory(intLanguageName, debugMode):\r\n #if text.getDebug() != debugMode:\r\n # text.setDebugRecurs(debugMode)\r\n\r\n return THE_PARSER", "def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')", "def _CreateParser():\n parser = commandline.ArgumentParser(description=__doc__, caching=True)\n\n # TODO(rcui): Have this use the UI-V2 format of having source and target\n # device be specified as positional arguments.\n parser.add_argument('--force', action='store_true', default=False,\n help='Skip all prompts (i.e., for disabling of rootfs '\n 'verification). This may result in the target '\n 'machine being rebooted.')\n sdk_board_env = os.environ.get(cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV)\n parser.add_argument('--board', default=sdk_board_env,\n help=\"The board the Chrome build is targeted for. When \"\n \"in a 'cros chrome-sdk' shell, defaults to the SDK \"\n \"board.\")\n parser.add_argument('--build-dir', type='path',\n help='The directory with Chrome build artifacts to '\n 'deploy from. Typically of format '\n '<chrome_root>/out/Debug. When this option is used, '\n 'the GYP_DEFINES environment variable must be set.')\n parser.add_argument('--target-dir', type='path',\n default=None,\n help='Target directory on device to deploy Chrome into.')\n parser.add_argument('-g', '--gs-path', type='gs_path',\n help='GS path that contains the chrome to deploy.')\n parser.add_argument('--nostartui', action='store_false', dest='startui',\n default=True,\n help=\"Don't restart the ui daemon after deployment.\")\n parser.add_argument('--nostrip', action='store_false', dest='dostrip',\n default=True,\n help=\"Don't strip binaries during deployment. Warning: \"\n 'the resulting binaries will be very large!')\n parser.add_argument('-p', '--port', type=int, default=remote.DEFAULT_SSH_PORT,\n help='Port of the target device to connect to.')\n parser.add_argument('-t', '--to',\n help='The IP address of the CrOS device to deploy to.')\n parser.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Show more debug output.')\n parser.add_argument('--mount-dir', type='path', default=None,\n help='Deploy Chrome in target directory and bind it '\n 'to the directory specified by this flag.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n parser.add_argument('--mount', action='store_true', default=False,\n help='Deploy Chrome to default target directory and bind '\n 'it to the default mount directory.'\n 'Any existing mount on this directory will be '\n 'umounted first.')\n\n group = parser.add_argument_group('Advanced Options')\n group.add_argument('-l', '--local-pkg-path', type='path',\n help='Path to local chrome prebuilt package to deploy.')\n group.add_argument('--sloppy', action='store_true', default=False,\n help='Ignore when mandatory artifacts are missing.')\n group.add_argument('--staging-flags', default=None, type=ValidateGypDefines,\n help=('Extra flags to control staging. Valid flags are - '\n '%s' % ', '.join(chrome_util.STAGING_FLAGS)))\n # TODO(stevenjb): Remove --strict entirely once removed from the ebuild.\n group.add_argument('--strict', action='store_true', default=False,\n help='Deprecated. Default behavior is \"strict\". Use '\n '--sloppy to omit warnings for missing optional '\n 'files.')\n group.add_argument('--strip-flags', default=None,\n help=\"Flags to call the 'strip' binutil tool with. \"\n \"Overrides the default arguments.\")\n group.add_argument('--ping', action='store_true', default=False,\n help='Ping the device before connection attempt.')\n group.add_argument('--mash', action='store_true', default=False,\n help='Copy additional files for mus+ash. Will not fit in '\n 'the default target-dir.')\n\n group = parser.add_argument_group(\n 'Metadata Overrides (Advanced)',\n description='Provide all of these overrides in order to remove '\n 'dependencies on metadata.json existence.')\n group.add_argument('--target-tc', action='store', default=None,\n help='Override target toolchain name, e.g. '\n 'x86_64-cros-linux-gnu')\n group.add_argument('--toolchain-url', action='store', default=None,\n help='Override toolchain url format pattern, e.g. '\n '2014/04/%%(target)s-2014.04.23.220740.tar.xz')\n\n # GYP_DEFINES that Chrome was built with. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GYP_DEFINES\n # enviroment variable. WILL BE DEPRECATED.\n parser.add_argument('--gyp-defines', default=None, type=ValidateGypDefines,\n help=argparse.SUPPRESS)\n\n # GN_ARGS (args.gn) used to build Chrome. Influences which files are staged\n # when --build-dir is set. Defaults to reading from the GN_ARGS env variable.\n # CURRENLY IGNORED, ADDED FOR FORWARD COMPATABILITY.\n parser.add_argument('--gn-args', default=None, type=ValidateGnArgs,\n help=argparse.SUPPRESS)\n\n # Path of an empty directory to stage chrome artifacts to. Defaults to a\n # temporary directory that is removed when the script finishes. If the path\n # is specified, then it will not be removed.\n parser.add_argument('--staging-dir', type='path', default=None,\n help=argparse.SUPPRESS)\n # Only prepare the staging directory, and skip deploying to the device.\n parser.add_argument('--staging-only', action='store_true', default=False,\n help=argparse.SUPPRESS)\n # Path to a binutil 'strip' tool to strip binaries with. The passed-in path\n # is used as-is, and not normalized. Used by the Chrome ebuild to skip\n # fetching the SDK toolchain.\n parser.add_argument('--strip-bin', default=None, help=argparse.SUPPRESS)\n return parser", "def build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--reference', required=True, help=\"Reference Genome URL\")\n parser.add_argument('-n', '--normal', required=True, help='Normal BAM URL. Format: UUID.normal.bam')\n parser.add_argument('-t', '--tumor', required=True, help='Tumor BAM URL. Format: UUID.tumor.bam')\n parser.add_argument('-d', '--dbsnp', required=True, help='dbsnp_132_b37.leftAligned.vcf URL')\n parser.add_argument('-c', '--cosmic', required=True, help='b37_cosmic_v54_120711.vcf URL')\n parser.add_argument('-u', '--mutect', required=True, help='Mutect.jar')\n parser.add_argument('-w', '--work_dir', required=True, help='Where you wanna work from? (full path please)')\n\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(description='Parser des liens sur les sites Jahia et Wordpress.')\n parser.add_argument('ficher_des_sites', help='le fichier contenant les sites a parser.')\n parser.add_argument('-v', '--version', help='affiche la version du parser',\n action='version', version='%(prog)s ' + __version__)\n return parser", "def get_parser():\n parser = argparse.ArgumentParser(description='Parser des liens sur les sites Jahia et Wordpress.')\n parser.add_argument('ficher_des_sites', help='le fichier contenant les sites a parser.')\n parser.add_argument('-v', '--version', help='affiche la version du parser',\n action='version', version='%(prog)s ' + __version__)\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('webpage', help='webpage to search')\n\n return parser", "def test_gen_parser(self):\n pass", "def get_parser(subparsers):\n parser = subparsers.add_parser(\"compile\", description=\"Compile model using ML on MCU flow.\")\n parser.set_defaults(flow_func=handle)\n add_compile_options(parser)\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('manga_name',\n type = str,\n help = \"Input the name of the manga.\"\n )\n parser.add_argument('-b','--begin',\n type = int,\n help = 'Input the starting chapter.Defaults to first chapter.'\n )\n parser.add_argument('-e','--end',\n type = int,\n help = 'Input the ending chapter.Defaults to the last possible chapter.'\n )\n parser.add_argument('-c','--chapter',\n type = int,\n help = 'Provide if you want to download only one chapter.'\n )\n parser.add_argument('-t','--target',\n type = str,\n help = 'The location where manga has to be downloaded.Defaults to the current directory.',\n default = '.'\n )\n parser.add_argument('-s','--site',\n type = str,\n help = 'The site through which the manga has to be downloaded. Defaults to MangaPanda.',\n default = 'mangapanda'\n )\n\n return parser", "def project():", "def project():", "def project():", "def create_parser():\n helpdict = create_parser.helpdict\n # Customized usage, for more verbosity concerning these subparsers options.\n usage = \"\"\"%(prog)s [-h] [--version] {run,info} ... \"\"\"\n usage += tw.dedent(\"\"\"\\n\n From more help on each of the subcommands, type:\n %(prog)s run -h\n %(prog)s info -h\\n\\n\"\"\")\n\n # parser = ap.ArgumentParser(\n #parser = MpArgumentParser(\n #formatter_class=ap.ArgumentDefaultsHelpFormatter,\n #description='Monte Python, a Monte Carlo code in Python',\n #usage=usage)\n parser = initialise_parser(\n description='Monte Python, a Monte Carlo code in Python', usage=usage)\n\n # -- add the subparsers\n subparser = parser.add_subparsers(dest='subparser_name')\n\n ###############\n # run the MCMC\n runparser = add_subparser(subparser, 'run', help=\"run the MCMC chains\")\n\n # -- number of steps (OPTIONAL)\n runparser.add_argument('-N', help=helpdict['N'], type=positive_int,\n dest='N')\n # -- output folder (OBLIGATORY)\n runparser.add_argument('-o', '--output', help=helpdict['o'], type=str,\n dest='folder')\n # -- parameter file (OBLIGATORY)\n runparser.add_argument('-p', '--param', help=helpdict['p'],\n type=existing_file, dest='param')\n # -- covariance matrix (OPTIONAL)\n runparser.add_argument('-c', '--covmat', help=helpdict['c'],\n type=existing_file, dest='cov')\n # -- jumping method (OPTIONAL)\n runparser.add_argument('-j', '--jumping', help=helpdict['j'],\n dest='jumping', default='fast',\n choices=['global', 'sequential', 'fast'])\n # -- sampling method (OPTIONAL)\n runparser.add_argument('-m', '--method', help=helpdict['m'],\n dest='method', default='MH',\n choices=['MH', 'NS', 'PC', 'CH', 'IS', 'Der', 'Fisher'])\n # -- update Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--update', help=helpdict['update'], type=int,\n dest='update', default=50)\n # -- update Metropolis Hastings with an adaptive jumping factor (OPTIONAL)\n runparser.add_argument('--superupdate', help=helpdict['superupdate'], type=int,\n dest='superupdate', default=0)\n # -- superupdate acceptance rate argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar', help=helpdict['superupdate-ar'], type=float,\n dest='superupdate_ar', default=0.26)\n # -- superupdate acceptance rate tolerance argument (OPTIONAL)\n runparser.add_argument('--superupdate-ar-tol', help=helpdict['superupdate-ar-tol'], type=float,\n dest='superupdate_ar_tol', default=0.01)\n # -- adaptive jumping factor Metropolis Hastings (OPTIONAL)\n runparser.add_argument('--adaptive', help=helpdict['adaptive'], type=int,\n dest='adaptive', default=0)\n # -- adaptive ts argument (OPTIONAL)\n runparser.add_argument('--adaptive-ts', help=helpdict['adaptive-ts'], type=int,\n dest='adaptive_ts', default=1000)\n\n # -- jumping factor (OPTIONAL)\n runparser.add_argument('-f', help=helpdict['f'], type=float,\n dest='jumping_factor', default=2.4)\n # -- temperature (OPTIONAL)\n runparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- minimize (OPTIONAL)\n runparser.add_argument('--minimize', help=helpdict['minimize'],\n action='store_true')\n # -- minimize argument, minimization tolerance (OPTIONAL)\n runparser.add_argument('--minimize-tol', help=helpdict['minimize-tol'], type=float,\n dest='minimize_tol', default=0.00001)\n # -- fisher (OPTIONAL)\n runparser.add_argument('--fisher', help=helpdict['fisher'],\n action='store_true')\n # -- fisher argument (OPTIONAL)\n runparser.add_argument('--fisher-asymmetric', help=helpdict['fisher-asymmetric'],\n dest='fisher_asymmetric',action='store_true')\n # -- fisher step iteration (OPTIONAL)\n runparser.add_argument('--fisher-step-it', help=helpdict['fisher-step-it'],\n dest='fisher_step_it', default=10)\n # -- fisher step iteration argument, -deltaloglkl target (OPTIONAL)\n runparser.add_argument('--fisher-delta', help=helpdict['fisher-delta'], type=float,\n dest='fisher_delta', default=0.1)\n # -- fisher step iteration argument, -deltaloglkl tolerance (OPTIONAL)\n runparser.add_argument('--fisher-tol', help=helpdict['fisher-tol'], type=float,\n dest='fisher_tol', default=0.05)\n # -- fisher symmetric likelihood assumption threshold (OPTIONAL)\n runparser.add_argument('--fisher-sym-lkl', help=helpdict['fisher-sym-lkl'], type=float,\n dest='fisher_sym_lkl', default=0.1)\n # -- configuration file (OPTIONAL)\n runparser.add_argument('--conf', help=helpdict['conf'],\n type=str, dest='config_file',\n default='default.conf')\n # -- arbitrary numbering of an output chain (OPTIONAL)\n runparser.add_argument('--chain-number', help=helpdict['chain-number'])\n # -- stop run after first successful update using --update (EXPERIMENTAL)\n runparser.add_argument('--stop-after-update', help=helpdict['stop-after-update'],\n dest='stop_after_update', action='store_true')\n # display option\n runparser.add_argument('--display-each-chi2', help=helpdict['display-each-chi2'],\n dest='display_each_chi2', action='store_true')\n # -- parallel chains without MPI (OPTIONAL)\n runparser.add_argument('--parallel-chains', help=helpdict['parallel-chains'],\n action='store_true')\n\n ###############\n # MCMC restart from chain or best fit file\n runparser.add_argument('-r', '--restart', help=helpdict['r'],\n type=existing_file, dest='restart')\n runparser.add_argument('-b', '--bestfit', dest='bf', help=helpdict['b'],\n type=existing_file)\n\n ###############\n # Silence the output (no print on the console)\n runparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n ###############\n # Adding new derived parameters to a run\n runparser.add_argument(\n '--Der-target-folder', dest=\"Der_target_folder\",\n help=helpdict['Der-target-folder'], type=str, default='')\n runparser.add_argument(\n '--Der-param-list', dest='derived_parameters',\n help=helpdict['Der-param-list'], type=str, default='', nargs='+')\n\n ###############\n # Importance Sampling Arguments\n runparser.add_argument(\n '--IS-starting-folder', dest='IS_starting_folder',\n help=helpdict['IS-starting-folder'], type=str, default='', nargs='+')\n\n ###############\n # We need the following so the run does not crash if one of the external\n # samplers is not correctly installed despite not being used\n from contextlib import contextmanager\n import sys, os\n\n @contextmanager\n def suppress_stdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try:\n yield\n finally:\n sys.stdout = old_stdout\n\n ###############\n # MultiNest arguments (all OPTIONAL and ignored if not \"-m=NS\")\n # The default values of -1 mean to take the PyMultiNest default values\n try:\n with suppress_stdout():\n from MultiNest import NS_prefix, NS_user_arguments\n NSparser = runparser.add_argument_group(\n title=\"MultiNest\",\n description=\"Run the MCMC chains using MultiNest\"\n )\n for arg in NS_user_arguments:\n NSparser.add_argument('--'+NS_prefix+arg,\n default=-1,\n **NS_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyMultiNest detected but MultiNest likely not installed correctly. '\n 'You can safely ignore this if not running with option -m NS')\n\n ###############\n # PolyChord arguments (all OPTIONAL and ignored if not \"-m=PC\")\n # The default values of -1 mean to take the PyPolyChord default values\n try:\n with suppress_stdout():\n from PolyChord import PC_prefix, PC_user_arguments\n PCparser = runparser.add_argument_group(\n title=\"PolyChord\",\n description=\"Run the MCMC chains using PolyChord\"\n )\n for arg in PC_user_arguments:\n PCparser.add_argument('--'+PC_prefix+arg,\n default=-1,\n **PC_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('PyPolyChord detected but PolyChord likely not installed correctly. '\n 'You can safely ignore this if not running with option -m PC')\n\n ###############\n # CosmoHammer arguments (all OPTIONAL and ignored if not \"-m=CH\")\n # The default values of -1 mean to take the CosmoHammer default values\n try:\n with suppress_stdout():\n from cosmo_hammer import CH_prefix, CH_user_arguments\n CHparser = runparser.add_argument_group(\n title=\"CosmoHammer\",\n description=\"Run the MCMC chains using the CosmoHammer framework\")\n for arg in CH_user_arguments:\n CHparser.add_argument('--'+CH_prefix+arg,\n default=-1,\n **CH_user_arguments[arg])\n except ImportError:\n # Not defined if not installed\n pass\n except:\n warnings.warn('CosmoHammer detected but emcee likely not installed correctly. '\n 'You can safely ignore this if not running with option -m CH')\n\n ###############\n # Information\n infoparser = add_subparser(subparser, 'info',\n help=\"analyze the MCMC chains\")\n\n # -- folder to analyze\n infoparser.add_argument('files', help=helpdict['files'],\n nargs='+')\n # Silence the output (no print on the console)\n infoparser.add_argument('--silent', help=helpdict['silent'],\n action='store_true')\n # -- to only write the covmat and bestfit, without computing the posterior\n infoparser.add_argument('--minimal', help=helpdict['minimal'],\n action='store_true')\n # -- number of bins (defaulting to 20)\n infoparser.add_argument('--bins', help=helpdict['bins'],\n type=int, default=20)\n # -- temperature (OPTIONAL)\n infoparser.add_argument('-T', help=helpdict['T'], type=float,\n dest='temperature', default=1.0)\n # -- deprecated: remove the mean-likelihood line\n infoparser.add_argument('--no-mean', help=helpdict['no-mean'],\n dest='mean_likelihood_old', action='store_false')\n # -- plot the mean-likelihood line\n infoparser.add_argument('--plot-mean', help=helpdict['plot-mean'],\n dest='mean_likelihood', action='store_true')\n # -- to remove the mean and 68% limits on top of each 1D plot\n infoparser.add_argument('--short-title-1d', help=helpdict['short-title-1d'],\n dest='short_title_1d', action='store_true')\n # -- possible plot file describing custom commands\n infoparser.add_argument('--extra', help=helpdict['extra'],\n dest='optional_plot_file', default='')\n # -- if you just want the covariance matrix, use this option\n infoparser.add_argument('--noplot', help=helpdict['noplot'],\n dest='plot', action='store_false')\n # -- if you just want to output 1d posterior distributions (faster)\n infoparser.add_argument('--noplot-2d', help=helpdict['noplot-2d'],\n dest='plot_2d', action='store_false')\n # -- if you just want to output triangle with 2d contours\n infoparser.add_argument('--noplot-2d-diag', help=helpdict['noplot-2d-diag'],\n dest='plot_diag', action='store_false')\n # -- when plotting 2d posterior distribution, use contours and not contours\n # filled (might be useful when comparing several folders)\n infoparser.add_argument('--contours-only', help=helpdict['contours-only'],\n dest='contours_only', action='store_true')\n # -- if you want to output every single subplots\n infoparser.add_argument('--all', help=helpdict['all'], dest='subplot',\n action='store_true')\n # -- to change the extension used to output files (pdf is the default one,\n # but takes long, valid options are png and eps)\n infoparser.add_argument('--ext', help=helpdict['ext'],\n type=str, dest='extension', default='pdf')\n # -- to set manually the number of plots per hoorizontal raw in 1d plot\n infoparser.add_argument('--num-columns-1d', help=helpdict['num-columns-1d'],\n type=int, dest='num_columns_1d')\n # -- also analyze the non-markovian part of the chains\n infoparser.add_argument('--keep-non-markovian', help=helpdict['keep-non-markovian'],\n dest='markovian', action='store_false')\n # -- force only analyzing the markovian part of the chains\n infoparser.add_argument('--keep-only-markovian', help=helpdict['keep-only-markovian'],\n dest='only_markovian', action='store_true')\n # -- fraction of chains to be analyzed after burn-in removal (defaulting to 1.0)\n infoparser.add_argument('--keep-fraction', help=helpdict['keep-fraction'],\n type=float, dest='keep_fraction', default=1.0)\n # -- calculate the covariant matrix when analyzing the chains\n infoparser.add_argument('--want-covmat', help=helpdict['want-covmat'],\n dest='want_covmat', action='store_true')\n # -------------------------------------\n # Further customization\n # -- fontsize of plots (defaulting to 16)\n infoparser.add_argument('--fontsize', help=helpdict['fontsize'],\n type=int, default=16)\n # -- ticksize of plots (defaulting to 14)\n infoparser.add_argument('--ticksize', help=helpdict['ticksize'],\n type=int, default=14)\n # -- linewidth of 1d plots (defaulting to 4, 2 being a bare minimum for\n # legible graphs\n infoparser.add_argument('--line-width', help=helpdict['line-width'],\n type=int, default=4)\n # -- number of decimal places that appear on the tick legend. If you want\n # to increase the number of ticks, you should reduce this number\n infoparser.add_argument('--decimal', help=helpdict['decimal'], type=int,\n default=3)\n # -- number of ticks that appear on the graph.\n infoparser.add_argument('--ticknumber', help=helpdict['ticknumber'],\n type=int, default=3)\n # -- legend type, to choose between top (previous style) to sides (new\n # style). It modifies the place where the name of the variable appear.\n infoparser.add_argument('--legend-style', help=helpdict['legend-style'],\n type=str, choices=['sides', 'top'],\n default='sides')\n # -- width of gaussian smoothing for plotting posteriors,\n # in units of bin size, increase for smoother data.\n infoparser.add_argument('--gaussian-smoothing', help=helpdict['gaussian-smoothing'],\n type=float, default=0.5)\n # interpolation factor for plotting posteriors, 1 means no interpolation,\n # increase for smoother curves (it means that extra bins are created\n # and interpolated between computed bins)\n infoparser.add_argument('--interpolation-smoothing', help=helpdict['interpolation-smoothing'],\n type=int, default=4)\n # -- plot Fisher ellipses\n infoparser.add_argument('--plot-fisher', help=helpdict['plot-fisher'],\n dest='plot_fisher',action='store_true')\n infoparser.add_argument('--center-fisher', help=helpdict['center-fisher'],\n dest='center_fisher',action='store_true')\n\n infoparser.add_argument('--posterior-smoothing', help=helpdict['posterior-smoothing'],\n type=int, default=5)\n\n return parser", "def create_parser():\n desc_str = (\n \"\"\"Look at the results of inference with cbayes scripts.\"\"\"\n )\n\n parser = argparse.ArgumentParser(description=desc_str)\n \n parser.add_argument('-dir', '--directory',\n help = 'name of the cbayes ouput directory',\n type = str,\n required = True\n )\n \n # do the parsing\n args = parser.parse_args()\n\n return args", "def create_parser():\n parser = argparse.ArgumentParser(\n description=\"First example\",\n epilog=\"Batch 2017\")\n\n # script\n parser.add_argument('--script',\n required=True,\n action='store',\n dest='script',\n help=\"A script to execute\")\n\n parser.add_argument('--dataset',\n required=True,\n action='store',\n dest='dataset',\n help=\"A dataset to use\")\n#\n# parser.add_argument('--features',\n# required=True,\n# action='store',\n# dest='features',\n# help=\"Number of features\")\n return parser", "def _init_parser():\n\t\n\t_parser = argparse.ArgumentParser()\n\t_parser.add_argument(\"--pull\", help=\"pull scripts from UR3\", action=\"store_true\")\n\t_parser.add_argument(\"--create\", help=\"create data base from script files\", action=\"store_true\")\n\t_parser.add_argument(\"--clear\", help=\"clear all data base\", action=\"store_true\")\n\treturn _parser", "def parser_create():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--config-file\", type=str, help=\"yaml configuration file name\")\n return parser.parse_args()", "def get_parser():\n parser = argparse.ArgumentParser()\n # parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('files', nargs='+')\n return parser", "def __init__(self, *args, **kw):\n self.parser = Parser(*args, **kw)", "def _initialize_project_variables(self):\n self.Source = ''\n self.Regional = ''\n self.Vernacular = ''\n self.Fallback = dict()\n self.New_Target = dict()\n self.Biblical_Terms = dict()\n self.Old_Target = dict()\n\n# self.list_projects = []\n# self.project_lines = []\n# self.indent = 0\n# self.Treed = False\n self.root = etree.Element('root')\n# #add child 'settings', all user configurable bits under here\n self.settings = etree.SubElement(self.root, \"settings\")\n# self.old_mode = dict()\n# self.spreferred = etree.SubElement(self.settings, \"preferred\")\n# self.smode = etree.SubElement(self.settings, \"mode\")\n# self.stemp = etree.SubElement(self.settings, \"template\")\n self.sf0 = etree.SubElement(self.settings, \"f0\")\n self.sf1 = etree.SubElement(self.settings, \"f1\")\n self.sf2 = etree.SubElement(self.settings, \"f2\")\n self.trout = etree.SubElement(self.root, \"tree\")", "def make_parser():\n parser = argparse.ArgumentParser(description='Parse Wiki Page')\n parser.add_argument('wikipage',\n help='the name of the wiki page to parse')\n parser.add_argument('output_file_name', nargs='?',\n help='the name of the file to upload/write to')\n parser.add_argument('-r', '--redirect', dest='redirect',\n help='the name of the remote page to redirect to')\n parser.add_argument('--s3',action='store_true',\n help='upload file to S3? (Default = False)')\n parser.add_argument('--dryrun',action='store_true')\n #ToDo: add arguments --dryrun and --tofile? --verbose? --s3 --category\n return parser", "def get_parser():\n # Get parsers for various model architectures.\n model_parser = ModelFactory.get_all_parsers()\n # Get parsers for various optimizers.\n optimizer_parser = OptimizerFactory.get_all_parsers()\n # Add parent parsers.\n parent_parsers = model_parser + optimizer_parser\n parser = argparse.ArgumentParser(parents=parent_parsers)\n\n # Generic options\n parser.add_argument('--checkpoint-step', type=int, default=1,\n help='Number of epochs between successive checkpoint creations')\n parser.add_argument('--config-file', type=str, default=[], nargs='*',\n help='File(s) to read the command-line arguments from')\n parser.add_argument('--continue', action='store_true',\n help='Continue the execution of the last experiment saved into the export directory')\n parser.add_argument('--debug', action='store_true', help='Show debug messages')\n parser.add_argument('--export-dir', type=str, required=True, help='Export directory')\n parser.add_argument('--no-gpu', action='store_true', help='Use CPU')\n \n parser.add_argument(\"--wandb-directory\", type=str, default=\"../wandb\")\n parser.add_argument(\"--disable-wandb\", action=\"store_true\", help=\"No Wandb logging\")\n\n # Data options\n parser.add_argument('--batch-size', type=int, default=[16], nargs='*', help='Batch size(s)')\n parser.add_argument('--dataset', type=str, default=[consts.SIGMORPHON2020], nargs='*',\n choices=[consts.SIGMORPHON2020], help='Dataset(s) to train on')\n parser.add_argument('--sigmorphon2020-root', type=str, help='Root directory for the SIGMORPHON 2020 dataset')\n\n # Language options\n parser.add_argument('--language-families', type=str, nargs='*', default=None,\n help='The families of languages to load the data for.'\n ' If not provided, all available families will be used.')\n parser.add_argument('--language-info-file', type=str, default='lang_config.tsv',\n help='The language information file.')\n parser.add_argument('--languages', type=str, nargs='*', default=None,\n help='The languages to load the data for.'\n ' If not provided, all available languages will be used.')\n\n # Optimizer options\n parser.add_argument('--optimizer', type=str, default=[OptimizerFactory.optimizers[0]],\n choices=OptimizerFactory.optimizers, nargs='*', help='Optimizer algorithm(s)')\n parser.add_argument('--num-epochs', type=int, default=30, help='Number(s) of epochs')\n\n # Model options\n parser.add_argument('--model-architecture', type=str, default=[ModelFactory.architectures[0]], nargs='*',\n choices=ModelFactory.architectures, help='Model architecture(s)')\n \n # Parallelism Optoions, affect various\n parser.add_argument('--loader-threads', type=int, default=0, help='Data loading threads. Default to 0 (load in main)')\n parser.add_argument('--use-dataparallel', action='store_true', help='Use torch.nn.DataParallel to wrap the model?')\n\n return parser", "def make_parser():\n\n parser = argparse.ArgumentParser(add_help=True)\n\n parser_grp_main = parser.add_argument_group('Arguments')\n\n parser_grp_main.add_argument\n\n parser_grp_main.add_argument(\n \"-i\",\n \"--inp-dir\",\n default = \"out/ln/alias/sst/all_samples\",\n help=\"The folder containing files to tidy.\"\n )\n\n parser_grp_main.add_argument(\n \"-x\",\n \"--xlsx\",\n type=str,\n help=\"The xlsx file containing the metadata to use to find samples and tidy them.\",\n default=\"Sequencing_summary.xlsx\",\n required=False)\n\n parser_grp_main.add_argument(\n \"-b\",\n \"--by-column\",\n nargs='+',\n type=str,\n help=\"The column names from the xlsx file to use to tidy.\",\n default=\"sample_name\",\n required=False)\n \n parser_grp_main.add_argument(\n \"-d\",\n \"--delete\",\n help=\"Delete file only this arg is used. Unsafe. Always run first without this argument and check all files listed to deletion.\",\n default=False,\n type=bool,\n )\n\n return parser", "def create_new_project():\n readline.parse_and_bind('tab: complete')\n\n print \\\n\"\"\"\n xbmcswift2 - A micro-framework for creating XBMC plugins.\n [email protected]\n --\n\"\"\"\n print 'I\\'m going to ask you a few questions to get this project' \\\n ' started.'\n\n # noinspection PyDictCreation\n opts = {}\n\n # Plugin Name\n opts['plugin_name'] = get_valid_value(\n 'What is your plugin name?',\n validate_nonblank\n )\n\n # Plugin ID\n opts['plugin_id'] = get_valid_value(\n 'Enter your plugin id.',\n validate_pluginid,\n 'plugin.video.%s' % (opts['plugin_name'].lower().replace(' ', ''))\n )\n\n # Parent Directory\n opts['parent_dir'] = get_valid_value(\n 'Enter parent folder (where to create project)',\n validate_isfolder,\n getcwd()\n )\n opts['plugin_dir'] = os.path.join(opts['parent_dir'], opts['plugin_id'])\n assert not os.path.isdir(opts['plugin_dir']), \\\n 'A folder named %s already exists in %s.' % (opts['plugin_id'],\n opts['parent_dir'])\n\n # Provider\n opts['provider_name'] = get_valid_value(\n 'Enter provider name',\n validate_nonblank,\n )\n\n # Create the project folder by copying over skel\n copytree(SKEL, opts['plugin_dir'], ignore=ignore_patterns('*.pyc'))\n\n # Walk through all the new files and fill in with out options\n for root, dirs, files in os.walk(opts['plugin_dir']):\n for filename in files:\n update_file(os.path.join(root, filename), opts)\n\n print 'Projects successfully created in %s.' % opts['plugin_dir']\n print 'Done.'", "def build_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='CBIS Health check')\n\n parser.add_argument('-uc', '--uc_hostname',\n required=True,\n help='Undercloud hostname (sample rst-exp3-uc)')\n\n parser.add_argument('-o', '--output',\n default='/tmp',\n help='Output folder')\n\n parser.add_argument('-t', '--test', action='store_const', const=True,\n help='Test Flag for dev mode')\n\n parser.add_argument('-tc', '--test_case', choices=[cls.__name__ for cls in BaseCheck.__subclasses__()],\n help=\"Test case to be checked\")\n\n return parser", "def create_parser() -> configargparse.ArgParser:\n parser = configargparse.ArgParser(default_config_files=[\n \"/etc/lookout/analyzer.conf\", \"~/.config/lookout/analyzer.conf\"],\n formatter_class=ArgumentDefaultsHelpFormatterNoNone,\n auto_env_var_prefix=\"lookout_\")\n slogging.add_logging_args(parser)\n subparsers = parser.add_subparsers(help=\"Commands\", dest=\"command\")\n\n def add_parser(name, help):\n return subparsers.add_parser(\n name, help=help, formatter_class=ArgumentDefaultsHelpFormatterNoNone)\n\n list_parser = add_parser(\"list\", \"Print globally available analyzers.\")\n list_parser.set_defaults(handler=list_analyzers)\n\n run_parser = add_parser(\n \"run\", \"Launch a new service with the specified (one or more) analyzers.\")\n run_parser.set_defaults(handler=run_analyzers)\n add_analyzer_arg(run_parser)\n run_parser.add(\"-c\", \"--config\", is_config_file=True,\n help=\"Path to the configuration file with option defaults.\")\n run_parser.add(\"-s\", \"--server\", required=True,\n help=\"Lookout server address, e.g. localhost:1234.\")\n run_parser.add(\"-w\", \"--workers\", type=int, default=1,\n help=\"Number of threads which process Lookout events.\")\n add_model_repository_args(run_parser)\n run_parser.add_argument(\"--request-server\", default=\"auto\",\n help=\"Address of the data retrieval service. \\\"same\\\" means --server.\")\n\n init_parser = add_parser(\"init\", \"Initialize the model repository.\")\n init_parser.set_defaults(handler=init_repo)\n add_model_repository_args(init_parser)\n\n tool_parser = add_parser(\"tool\", \"Invoke the tooling of a given analyzer.\")\n tool_parser.set_defaults(handler=run_analyzer_tool)\n tool_parser.add(\"analyzer\", help=\"Fully qualified package name with an analyzer.\")\n tool_parser.add(\"args\", nargs=argparse.REMAINDER)\n\n package_parser = add_parser(\n \"package\",\n \"Package several analyzers to a Docker container and write a sample Docker Compose config \"\n \"for Lookout.\")\n package_parser.set_defaults(handler=package_cmdline_entry)\n add_analyzer_arg(package_parser)\n package_parser.add(\"-w\", \"--workdir\", help=\"Generate files in this directory.\",\n default=tempfile.mkdtemp(prefix=\"lookout_package_\"))\n package_parser.add(\"--requirements\", help=\"Path to a custom requirements.txt\")\n package_parser.add(\"-r\", \"--repo\", help=\"GitHub repository name to watch. \"\n \"Example: \\\"src-d/lookout\\\".\",\n required=True)\n package_parser.add(\"-u\", \"--user\", help=\"GitHub user name which will send review comments.\",\n required=True)\n paturl = \"https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/\" # noqa\n package_parser.add(\"-t\", \"--token\", help=\"GitHub token for -u/--user. See \" + paturl,\n required=True)\n package_parser.add(\"-y\", \"--yes\", help=\"Run the commands in the end.\",\n action=\"store_true\")\n package_parser.add(\"-n\", \"--no\", help=\"Do not run the commands in the end.\",\n action=\"store_true\")\n return parser", "def make_parser(language):\n parser = Parser()\n parser.onto_mode = True\n mappings = {'en': 'ENGLISH', 'de': \"GERMAN\", 'cn': \"CHINESE\" }\n parser.language = mappings[language]\n return parser", "def create_parser(self, prog_name, subcommand):\n return OptionParser(prog=prog_name,\n usage=self.usage(subcommand),\n version=self.get_version(),\n option_list=self.option_list)", "def build_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(help='Blogstrap commands')\n init_parser = subparsers.add_parser(\n 'init',\n help='Initialize the Blogstrap directory')\n init_parser.set_defaults(func=init)\n init_parser.add_argument('-t', '--target',\n dest='target',\n type=str,\n default='.',\n help='Target folder to generate files in')\n init_parser.add_argument('--no-homepage',\n action='store_true',\n default=False,\n help='if specified, no homepage will be created')\n run_parser = subparsers.add_parser(\n 'run', help=\"Run the Flask development server\")\n run_parser.set_defaults(func=run)\n run_parser.add_argument('-c', '--config',\n dest='config',\n type=str,\n default=None,\n help='path to a config file')\n\n return parser", "def create_parser_impl(self, common: Common, handler: base.ParserHandler) -> parser_impl.ParserImpl:\n return _ParserImpl(common, handler)", "def build_parser(self):\n parser = argparse.ArgumentParser(\n description=\"Run Crystal Matching algorithm attempting to translate co-ordinates \"\n \"on an input image to the coordinate-space of an output image while \"\n \"accounting for possible movement of crystals in the sample.\")\n\n if sys.version_info[0] < 3:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=file,\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n else:\n parser.add_argument('Formulatrix_image',\n metavar=\"Formulatrix_image_path\",\n type=argparse.FileType('r'),\n help='Image file from the Formulatrix - selected_point should correspond to co-ordinates on '\n 'this image.')\n parser.add_argument('beamline_stack_path',\n metavar=\"beamline_stack_path\",\n help=\"A path pointing at a directory which stores images to be stacked or a path to a stacked image.\")\n parser.add_argument('selected_points',\n metavar=\"x,y\",\n nargs='*',\n help=\"Comma-separated co-ordinates of selected points to be translated from the marked image \"\n \"to the target image.\")\n parser.add_argument('-o','--output',\n metavar=\"focused_image_path\",\n help=\"Specify directory for the stacked image. \"\n \"A file called 'processed.tif' will be created in the directory.\"\n \"'processed.tif' will be created in log directory if this is not set.\")\n parser.add_argument('--config',\n metavar=\"path\",\n action=ReadableConfigDir,\n default=join(self.get_script_path(), readable_config_dir.CONFIG_DIR_NAME),\n help=\"Sets the configuration directory.\")\n parser.add_argument('--scale',\n metavar=\"scale\",\n help=\"The scale between the Formulatrix and beamline image given as the resolution of each \"\n \"image separated by a colon. Note this is relative (1:2 is the same as 2:4) and a value \"\n \"must be specified for each image using the format \"\n \"'[Formulatrix_image_resolution]:[beamline_image_resolution]'.\")\n parser.add_argument('-j', '--job',\n metavar=\"job_id\",\n help=\"Specify a job_id - this will be reported in the output to help identify this run.\")\n parser.add_argument('--to_json',\n action='store_true',\n help=\"Output a JSON object.\")\n parser.add_argument('--version',\n action='version',\n version=VersionHandler.version_string())\n parser.add_argument('--log',\n metavar=\"path\",\n help=\"Write log files to the directory specified by path.\")\n self.parser = parser", "def configure_parser(parser):\n qibuild.parsers.cmake_build_parser(parser)\n qibuild.parsers.project_parser(parser)\n group = parser.add_argument_group(\"make options\")\n group.add_argument(\"--rebuild\", \"-r\", action=\"store_true\", default=False)\n group.add_argument(\"--coverity\", action=\"store_true\", default=False,\n help=\"Build using cov-build. Ensure you have \"\n \"cov-analysis installed on your machine.\")\n group.add_argument(\"--num-workers\", \"-J\", dest=\"num_workers\", type=int,\n help=\"Number of projects to be built in parallel\")", "def Create(self, name):\n self.name = name\n self.guid = MakeGuid(name)\n\n # Create XML doc\n xml_impl = xml.dom.getDOMImplementation()\n self.doc = xml_impl.createDocument(None, 'VisualStudioProject', None)\n\n # Add attributes to root element\n self.n_root = self.doc.documentElement\n self.n_root.setAttribute('ProjectType', 'Visual C++')\n self.n_root.setAttribute('Version', '8.00')\n self.n_root.setAttribute('Name', self.name)\n self.n_root.setAttribute('ProjectGUID', self.guid)\n self.n_root.setAttribute('RootNamespace', self.name)\n self.n_root.setAttribute('Keyword', 'MakeFileProj')\n\n # Add platform list\n n_platform = self.doc.createElement('Platforms')\n self.n_root.appendChild(n_platform)\n n = self.doc.createElement('Platform')\n n.setAttribute('Name', 'Win32')\n n_platform.appendChild(n)\n\n # Add empty ToolFiles section\n self.n_root.appendChild(self.doc.createElement('ToolFiles'))\n\n # Add configurations section\n self.n_configs = self.doc.createElement('Configurations')\n self.n_root.appendChild(self.n_configs)\n\n # Add files section\n self.n_files = self.doc.createElement('Files')\n self.n_root.appendChild(self.n_files)\n\n # Add empty Globals section\n self.n_root.appendChild(self.doc.createElement('Globals'))", "def parser(self):\n\t\tdom = ET.parse(self.input_filename)\n\t\tself.doc = dom.getroot()", "def get_parser():\n # Parent and only parser.\n parser = argparse.ArgumentParser(\n add_help=True,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('mode', action='store',\n choices=range(len(MODES)),\n type=int,\n help='Select mode of file download.\\n'\n ' e.g: 0(rated) or 1(list).')\n parser.add_argument('torr_page', action='store',\n choices=range(len(TORRENTS)),\n type=int,\n help='Select tracking page to download from.\\n'\n ' e.g: 0 to .. ' + str(len(TORRENTS)-1) + '.')\n parser.add_argument('str_search', action='store',\n type=str,\n help='Input torrent string to search.\\n'\n ' e.g: \"String search\"')\n return(parser)", "def setup_parser(self, parser, args):\r\n\r\n pass", "def create_parser(self, prog_name, subcommand):\r\n return OptionParser(prog=prog_name,\r\n usage=self.usage(subcommand),\r\n version='',\r\n add_help_option = False,\r\n option_list=self.option_list)", "def _setup_parser():\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument('--eval_model', type=str, default=None)\n parser.add_argument('--stack', type=int, default=1)\n parser.add_argument('--flare', action='store_true')\n parser.add_argument('--mixreg', action='store_true')\n\n env_group = parser.add_argument_group(\"Env Args\")\n env_group.add_argument('--env_name', type=str, default=ENV_NAME)\n env_group.add_argument('--num_envs', type=int, default=NUM_ENVS)\n env_group.add_argument('--num_levels', type=int, default=NUM_LEVELS)\n env_group.add_argument('--start_level', type=int, default=START_LEVEL)\n\n agent_group = parser.add_argument_group(\"Agent Args\")\n PPOAgent.add_to_argparse(agent_group)\n\n model_group = parser.add_argument_group(\"Model Args\")\n ImpalaPPO.add_to_argparse(model_group)\n\n return parser", "def buildParser():\n\n parser = argparse.ArgumentParser(\n description='Script to parse bagfile to json file')\n parser.add_argument('-b', '--bag', help='Bag file to read',\n required=True, type=str)\n parser.add_argument('-i', '--include',\n help='list or regex for topics to include',\n required=False, nargs='*')\n parser.add_argument('-e', '--exclude',\n help='list or regex for topics to exclude',\n required=False, nargs='*')\n parser.add_argument('-o', '--output',\n help='name of the output file',\n required=True)\n return parser", "def get_parser():\n module_parser = ArgumentParser(\n formatter_class=ArgumentDefaultsHelpFormatter)\n module_parser.add_argument(\"-i\", dest=\"data_path\", type=str,\n help=\"the location dataset\")\n module_parser.add_argument(\"-o\", dest=\"output_path\", type=str,\n help='base dir for outputs')\n module_parser.add_argument(\"-subdir\", dest=\"subdir\", type=str,\n choices=['test', 'train', 'val', 'all'],\n help='subdir: trn, test, val, or all ...')\n module_parser.add_argument(\"-n\", dest=\"n_train\", type=int,\n help='n: number of images for training')\n module_parser.add_argument(\"-Rx\", dest=\"x_res\", type=int,\n help='x resulution for final img')\n module_parser.add_argument(\"-Ry\", dest=\"y_res\", type=int,\n help='y resolution of final image')\n module_parser.add_argument(\"-d\", dest=\"d\",\n type=int,\n default=0,\n help='debug')\n return module_parser", "def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p", "def get_parser():\n parser = argparse.ArgumentParser(\n description=\"CLEWsy tools for CLEWs models\")\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"clewsy {ver}\".format(ver=__version__))\n \n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action=\"store_const\",\n const=logging.INFO)\n parser.add_argument(\n \"-vv\",\n \"--very-verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action=\"store_const\",\n const=logging.DEBUG)\n subparsers = parser.add_subparsers()\n \n # Parser for building a clews model\n build_parser = subparsers.add_parser(\"build\", help=\"build a CLEWs model from clustering data and a yaml model description file\")\n build_parser.add_argument(\n \"yamlfile\",\n help=\"Please provide the yaml model description file\",\n )\n build_parser.set_defaults(func=build)\n \n \n return parser", "def get_parser():\n\tparser = argparse.ArgumentParser('preprocessing.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\nRun a piepline for one NICER ObsID data. \n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('obsid', type=str, \n\t\thelp='ObsID (e.g., 4012010109)')\t\n\treturn parser", "def setup_parser(self):\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument('words', metavar='W', nargs='+', help=POSITIONAL_HELP)\n parser.add_argument('-a','--any', dest=\"search_funct\", action=\"store_const\", \n const='any', default='all', help=SEARCH_HELP)\n parser.add_argument('-o','--only-id', action='store_true', help=ID_HELP)\n parser.add_argument('-u', '--update', action='store_true', help=UPDATE_HELP)\n return parser", "def build_parser():\n def commaSplitter(str):\n \"\"\"\n Argparse a comm-seperated list\n \"\"\"\n # leave this here as a reminder of what I should do to make the argument parsing more robust\n\n # if sqrt != int(sqrt):\n # msg = \"%r is not a perfect square\" % string\n # raise argparse.ArgumentTypeError(msg)\n # return value\n return str.split(',')\n\n def existing_file(fname):\n \"\"\"\n Argparse type for an existing file\n \"\"\"\n if not os.path.isfile(fname):\n raise ValueError(\"Invalid file: \" + str(fname))\n return fname\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('-p', '--prefix', help='dont really know what this does...',\n action='store', default='patient', dest='prefix')\n parser.add_argument('-d', '--date', help='dont really know what this does...',\n action='store', default='', dest='sampledate')\n parser.add_argument('template', type=argparse.FileType('r'), help='BEAST config template file')\n parser.add_argument('fasta', type=argparse.FileType('r'), help='file of sequences (in FASTA format)')\n\n return parser", "def make_parser():\n parser = argparse.ArgumentParser(description=config.DESCRIPTION)\n parser.add_argument('url_file', metavar='URL_FILE', type=str,\n help=config.HELP_URL_FILE)\n parser.add_argument('-d', metavar='DEST_DIR', dest='destination_dir', default=config.DEFAULT_DESTINATION_DIR, type=str,\n help=config.HELP_DESTINATION_DIR)\n parser.add_argument('-l', metavar='LOG_FILE', dest='log_file', default=config.DEFAULT_LOG_FILE, type=str,\n help=config.HELP_LOG_FILE % config.DEFAULT_LOG_FILE)\n\n return parser", "def __init__(self, parser=None):", "def get_parser():\n parser = ArgumentParser(\n description='phpMyAdmin work reporting tool\\n\\nGenerates list of commits and issues handled in given period.',\n epilog='Credentials can be also stored in ~/.config/phpmyadmin:\\n\\n[github]\\nuser=USER\\ntoken=TOKEN',\n formatter_class=RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n '-u', '--user',\n help='GitHub username, used for both reporting and authentication'\n )\n parser.add_argument(\n '-t', '--token',\n help='GitHub authentication token'\n )\n parser.add_argument(\n '-s', '--start-date',\n type=dateutil.parser.parse,\n default=datetime.now() - timedelta(days=7),\n help='Starting datetime, defaults to 7 days ago'\n )\n parser.add_argument(\n '-e', '--end-date',\n type=dateutil.parser.parse,\n default=datetime.now(),\n help='Ending datetime, defaults to current timestamp'\n )\n parser.add_argument(\n '-f', '--format',\n choices=('markdown', ),\n default='markdown',\n help='Output format',\n )\n parser.add_argument(\n '-w', '--weekly',\n action='store_true',\n help='Weekly report not including private repositories'\n )\n parser.add_argument(\n '-W', '--last-week',\n action='store_true',\n help='Create report for last week'\n )\n parser.add_argument(\n '-M', '--last-month',\n action='store_true',\n help='Create report for last month'\n )\n parser.add_argument(\n '--this-week',\n action='store_true',\n help='Create report for this week'\n )\n return parser", "def get_parser():\r\n parser = argparse.ArgumentParser(description=( # pylint: disable=redefined-outer-name\r\n \"Automatically finds translation errors in all edx-platform *.po files, \"\r\n \"for all languages, unless one or more language(s) is specified to check.\"\r\n ))\r\n\r\n parser.add_argument(\r\n '-l', '--language',\r\n type=str,\r\n nargs='*',\r\n help=\"Specify one or more specific language code(s) to check (eg 'ko_KR').\"\r\n )\r\n\r\n parser.add_argument(\r\n '-e', '--empty',\r\n action='store_true',\r\n help=\"Includes empty translation strings in .prob files.\"\r\n )\r\n\r\n parser.add_argument(\r\n '-v', '--verbose',\r\n action='count', default=0,\r\n help=\"Turns on info-level logging.\"\r\n )\r\n\r\n return parser", "def parse(self, **kwargs):\n\t\treturn self.create(**kwargs)", "def generate_parser():\n description = \"%(prog)s -- Predict RNA expression from cCREs and Ideas states\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('-r', '--rna', dest=\"rna\", type=str, action='store', required=True,\n help=\"RNA expression file\")\n parser.add_argument('-s', '--state', dest=\"state\", type=str, action='store', required=True,\n help=\"State file\")\n parser.add_argument('-c', '--cre', dest=\"cre\", type=str, action='store', required=True,\n help=\"CRE file\")\n parser.add_argument('-l', '--lessone', dest=\"lessone\", type=int, action='store', default=0,\n help=\"Cell type to leave out\")\n parser.add_argument('-o', '--output', dest=\"output\", type=str, action='store', default='./out',\n help=\"Output prefix\")\n parser.add_argument('-i', '--iterations', dest=\"iterations\", type=int, action='store', default=100,\n help=\"Refinement iterations\")\n parser.add_argument('-t', '--threads', dest=\"threads\", type=int, action='store', default=1,\n help=\"Number of threads to use\")\n parser.add_argument('--initialization-dist', dest=\"init_dist\", type=int, action='store', default=1000,\n help=\"Beta initialization distance cutoff\")\n parser.add_argument('--promoter-dist', dest=\"promoter_dist\", type=int, action='store',\n help=\"If specified, learn betas for promoters up to promoter distance cutoff\")\n parser.add_argument('--cre-dist', dest=\"cre_dist\", type=int, action='store',\n help=\"CRE distance cutoff\")\n parser.add_argument('--cre-exclude-promoter', dest=\"cre_noprom\", action='store_true',\n help=\"Exclude promoter from CREs\")\n parser.add_argument('--sum-cres', dest=\"sum_cres\", action='store_true',\n help=\"Sum CREs instead of finding overall proportions\")\n parser.add_argument('--correlation', dest=\"correlation\", type=float, action='store', default=0.0,\n help=\"Initial correlation cutoff\")\n parser.add_argument('--pca', dest=\"pca\", type=float, action='store',\n help=\"Convert state ratios into PCAs explaining this much variance\")\n parser.add_argument('--trainstats', dest=\"train_stats\", action='store_true',\n help=\"Output training statistics\")\n parser.add_argument('--max-CREs', dest=\"max_cres\", action='store', type=int, default=0,\n help=\"Maximum number of CREs allowed to be selected per TSS at a time (0 is no max)\")\n parser.add_argument('--skip-training', dest=\"skip_training\", action='store_true',\n help=\"Skip CRE-TSS pairining refinement\")\n parser.add_argument('--shuffle-states', dest=\"shuffle_states\", action='store_true',\n help=\"Shuffle the state proportions of each CRE as a negative control\")\n parser.add_argument('-e', '--eRP', dest=\"eRP\", action='store', type=str,\n help=\"A previously generated eRP TSS-cCRE pair file. Passing this will ignore initial TSS-CRE pair selection\")\n parser.add_argument('--seed', dest=\"seed\", action='store', type=int,\n help=\"Random number generator state seed\")\n parser.add_argument('-v', '--verbose', dest=\"verbose\", action='store', type=int, default=2,\n help=\"Verbosity level\")\n return parser", "def get_parser():\n\n parser = ArgumentParser()\n\n req_argument = parser.add_argument_group('required arguments')\n\n parser.add_argument(\"-o\", \"--outdir\", type=str, default='result',\n help=\"Path for results\")\n parser.add_argument(\"-fname\", \"--file_name\", type=str, default=\"try1\",\n help=\"The name the output file should have within the output directory\")\n parser.add_argument(\"-freq\", \"--frequency\", type=str,\n help=\"File to read the haplotype frequencies from\")\n parser.add_argument(\"-over\", \"--overlap\", type=str,\n help=\"File to read the peptide vs alleles or peptide vs haplotype data\")\n parser.add_argument(\"-o_a\", \"--overlap_allele\", type=int, default=0,\n help=\"1 if the --overlap file passed in is peptide vs alleles and 0 if it is peptide vs haplotypes and has already been binarized\")\n # parser.add_argument(\"-n\", \"--ntarget\", type=int, default=5,\n # help=\"The ntarget for max n-times coverage\")\n parser.add_argument(\"-maxpep\", \"--max_number_of_pepts\", type=int, default=30,\n help=\"The maximum number of peptides allowed in a vaccine\")\n parser.add_argument(\"-c\", \"--cut\", type=int, default=3,\n help=\"The cut value for ommitting peptides that are too similar; a value of 0 should be provided if similar peptides are not to be excluded from a vaccine design.\")\n\n\n \n return parser", "def get_parser(subparsers):\n parser = subparsers.add_parser(\"models\", description=\"Manage ML on MCU models.\")\n parser.set_defaults(func=handle)\n add_common_options(parser)\n add_context_options(parser)\n add_models_options(parser)\n return parser", "def _make_parser(self):\n return DefusedExpatParser()", "def get_parser(self):\n parser = argparse.ArgumentParser(description='Short sample app')\n\n parser.add_argument('-a', action=\"store_true\", default=False)\n parser.add_argument('-b', action=\"store\", dest=\"b\")\n parser.add_argument('-c', action=\"store\", dest=\"c\", type=int)\n return parser", "def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser", "def parser():\n parser = ArgumentParser()\n parser.add_argument('dir_jsons', help='dir containing json files')\n parser.add_argument('dir_out', help='output directory')\n parser.add_argument('file_name', help='name of HTML file')\n return parser", "def parser(*args, **kwargs):\n return NotImplementedError", "def get_parser():\n p = argparse.ArgumentParser(description='such a good program')\n p.add_argument('infile')\n p.add_argument('outfile')\n return p", "def get_instance(cls, project, parameters):\n\n\t\tif False == parameters.has_key(\"name\") or \"\" == parameters[\"name\"]:\n\t\t\traise DepFileParsingError()\n\n\t\tschema_version = Project.LAST_SCHEMA_VERSION\n\t\tif parameters.has_key(\"schema\") and \"\" != parameters[\"schema\"]:\n\t\t\tschema_version = parameters[\"schema\"]\n\n\t\treturn Project(parameters[\"name\"], schema_version)", "def create_parser():\n parser = argparse.ArgumentParser(description='Watching for files containing magictext')\n parser.add_argument('--ext', help='File extensions to filter on, default=.txt', default='.txt')\n parser.add_argument('--poll', help=\"Polling interval in seconds, default=1.0\", type=float, default=1.0)\n parser.add_argument('directory', help='Directory to watch.')\n parser.add_argument('magictext', help='Text to search for within matching files.')\n return parser", "def setup_parser():\r\n parser = argparse.ArgumentParser(description='Freeseer Recording Utility',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n parser.add_argument(\"-v\", \"--version\", action='version',\r\n version=textwrap.dedent('''\\\r\n Freeseer {version} ({platform})\r\n Python {pymajor}.{pyminor}.{pymicro}\r\n PyGst {pygst_version}\r\n PyQt {pyqt_version}\r\n Qt {qt_version}\r\n Yapsy {yapsy_version}\r\n '''.format(version=__version__,\r\n platform=sys.platform,\r\n pymajor=sys.version_info.major,\r\n pyminor=sys.version_info.minor,\r\n pymicro=sys.version_info.micro,\r\n pygst_version=pygst._pygst_version,\r\n pyqt_version=QtCore.PYQT_VERSION_STR,\r\n qt_version=QtCore.QT_VERSION_STR,\r\n yapsy_version=yapsy.__version__)))\r\n\r\n # Configure Subparsers\r\n subparsers = parser.add_subparsers(dest='app', help='Command List')\r\n setup_parser_record(subparsers)\r\n setup_parser_config(subparsers)\r\n setup_parser_talk(subparsers)\r\n setup_parser_report(subparsers)\r\n setup_parser_upload(subparsers)\r\n return parser", "def build_parser(description):\n parser = argparse.ArgumentParser(description = description)\n #requiredNamed = parser.add_argument_group('required named arguments')\n parser.add_argument('-p', '--profile', default='default',\n choices=get_profile_names(),\n help='botocore profile name for AWS creds and other vars.')\n parser.add_argument('-r', '--region', default=None,\n help='AWS region to use')\n #parser.add_argument('--search-regions', action='store_true', default=False,\n # help='search regions for VPC with given vpc_name')\n #parser.add_argument('--quiet', action='store_true', default=False,\n # help='prevent status messages to STDOUT')\n\n # create a subparser for our plugins to attach to.\n subparser = parser.add_subparsers(\n title = 'subcommands',\n description = 'valid subcommands',\n help = '--help for additional subcommand help'\n )\n\n plugins = load_entry_points('botoform.plugins')\n load_parsers_from_plugins(subparser, plugins)\n\n return parser", "def buildParser( declaration = grammar ):\n return VRMLParser( declaration, \"vrmlFile\" )" ]
[ "0.7340575", "0.7248573", "0.71354264", "0.6643591", "0.6587333", "0.6457381", "0.6324095", "0.6283771", "0.6264125", "0.62213314", "0.62162375", "0.62034565", "0.62025255", "0.61537164", "0.61472064", "0.613323", "0.60847723", "0.6076085", "0.607107", "0.60409564", "0.60259813", "0.60045224", "0.59626454", "0.5941302", "0.5920361", "0.5910445", "0.5901308", "0.58885026", "0.58867687", "0.58867353", "0.58821213", "0.5870974", "0.5869089", "0.5856793", "0.58455664", "0.5835632", "0.5830802", "0.582871", "0.58220947", "0.5814835", "0.58099157", "0.58099157", "0.5795793", "0.578826", "0.57760435", "0.5773582", "0.576962", "0.576962", "0.576962", "0.57607347", "0.5757971", "0.5729829", "0.5717859", "0.5716921", "0.5700241", "0.568946", "0.5682308", "0.5679856", "0.5657902", "0.5657322", "0.5654501", "0.5653326", "0.5648881", "0.5647495", "0.56454015", "0.5644162", "0.5643549", "0.5639736", "0.56346774", "0.5626378", "0.562482", "0.56008583", "0.5597338", "0.5584535", "0.5583383", "0.5577558", "0.5577555", "0.5571843", "0.5570929", "0.5558676", "0.55549306", "0.5544737", "0.5535632", "0.55274427", "0.55247974", "0.552089", "0.5519667", "0.55191565", "0.5506539", "0.54919034", "0.5490098", "0.54895145", "0.54859394", "0.5482792", "0.54824674", "0.5479853", "0.5478936", "0.5473625", "0.54701024", "0.5469246", "0.5463037" ]
0.0
-1
Add a new Project
def do_cmd(gargs, args, _): request = { "desc": args.desc, "owner": args.owner } if args.ssid: request["wifi_props"] = { "bssid_type": args.ssid_type, "ssid": args.ssid } if args.plmnid: plmnid = PLMNID(args.plmnid) request["lte_props"] = { "plmnid": plmnid.to_str() } headers = command.get_headers(gargs) url = '/api/v1/projects' response, _ = command.connect(gargs, ('POST', url), 201, request, headers=headers) location = response.headers['Location'] tokens = location.split("/") project_id = tokens[-1] print(project_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return", "def add(self, name, project):\n self.projects[name] = project", "def test_add_project(self):\n pass", "def add_project(project, network, id):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.add_project(project, network, id)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']", "def project_create(project):\n client.project.create(project)", "def add_project(request):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save()\n messages.success(request, 'Project added successfully!')\n return redirect(reverse('portfolio'))\n else:\n messages.error(request, 'Failed to add project.\\\n # Please ensure the form is valid')\n else:\n form = ProjectForm()\n\n form = ProjectForm()\n template = 'portfolio/add_project.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def newProject(self):\n dialog = NewProjectDialog()\n if not dialog.name is None and not dialog.path is None:\n self._app.createProject(str(dialog.name), str(dialog.path))", "def addProject(self, project):\n\n result = Project.getProjectDependencies(project, \"external\", self.__updateRepositories)\n for project in result:\n\n Console.info(\"Adding %s...\", Console.colorize(project.getName(), \"bold\"))\n Console.indent()\n\n # Append to session list\n self.__projects.append(project)\n\n # Import library methods\n libraryPath = os.path.join(project.getPath(), \"jasylibrary.py\")\n if os.path.exists(libraryPath):\n self.loadLibrary(project.getName(), libraryPath, doc=\"Library of project %s\" % project.getName())\n\n # Import command methods\n commandPath = os.path.join(project.getPath(), \"jasycommand.py\")\n if os.path.exists(commandPath):\n self.loadCommands(project.getName(), commandPath)\n\n # Import project defined fields which might be configured using \"activateField()\"\n fields = project.getFields()\n for name in fields:\n entry = fields[name]\n\n if name in self.__fields:\n raise UserError(\"Field '%s' was already defined!\" % (name))\n\n if \"check\" in entry:\n check = entry[\"check\"]\n if check in [\"Boolean\", \"String\", \"Number\"] or isinstance(check, list):\n pass\n else:\n raise UserError(\"Unsupported check: '%s' for field '%s'\" % (check, name))\n\n self.__fields[name] = entry\n\n\n Console.outdent()", "def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")", "def add_project(self, proj, i):\r\n self.__projects[i] = proj", "def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)", "def addProject(self, name: str, description: str = \"NULL\"):\n query = f\"\"\"\n INSERT INTO projects (name, description)\n VALUES ('{name}', '{description}');\n \"\"\"\n sql.executeQuery(self.connection, query)", "def add_project(self, project):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO projects VALUES (null, ?, ?, ?, ?)\", (project['owner'],\n project['title'],\n datetime.now(), datetime.now(),))\n\n self.conn.commit()\n project_id = cursor.lastrowid\n\n self.conn.cursor().execute(\"INSERT INTO users_projects VALUES (?,?)\", (project['owner'], project_id),)\n self.conn.commit()\n return self.get_project(project_id)", "def add_project(project, taglist):\n if anonymize:\n import random\n project['name'] = 'Anonimized Project ' + str(project['id'])[-3:]\n project['client'] = 'Anonimized Client'\n\n wf.add_item(title=project['name'],\n subtitle='Client: ' +\n project['client'] +\n ' Hit ENTER to show menu, press ALT for more info.',\n modifier_subtitles={\n 'alt': 'Tags: ' + ', '.join(taglist),\n },\n arg=str(project['id']),\n valid=True,\n icon='icons/project_{0}.png'.format(\n project['project_state']).lower(),\n copytext=project['name'])", "def new_project(self, rootdir=None):\n if rootdir is None:\n rootdir = Ui.instance().select_directory(user.home)\n if not os.path.exists(rootdir):\n os.makedirs(rootdir)\n\n print 'Weld.new_project in ', rootdir\n project = Project(rootdir)\n\n project.save()\n self.project = project\n self.current_project_path = rootdir\n Ui.instance().set_resources_draggable(True)\n Ui.instance().show_status('new project created')", "def create_project(self, **kwargs):\n _url = f\"{self.base_url}/projects\"\n if \"name\" not in kwargs:\n raise ValueError(\"Parameter 'name' is mandatory\")\n return self.http_call(\"post\", _url, json_data=kwargs).json()", "def add_project(self, project=None):\n is_project = type(project) is Project\n id_exists = project.client_id in [c.client_id for c in self.client_list]\n pid_exists = project.project_id() in [p.project_id() for p in self.project_list]\n\n # cancel if it's no project or the client_id does not exist\n # or the project_id already exists\n if not is_project or not id_exists or pid_exists:\n return False\n\n # add the project\n self.project_list.append(project)\n self.save_project_to_file(project=project)\n return True", "def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0} {1}{2}'.format(bitbucket_repository, folder_project_local, conta))\n local('cd {0}{1}'.format(folder_project_local, conta))\n local('mkvirtualenv {0}'.format(conta))\n local('setvirtualenvproject')\n local('pip install -r requirements.txt')\n local('rm -rf {0}{1}/.git'.format(folder_project_local, conta))\n local('rm -rf README.md')\n local('git init')\n local('git remote add origin [email protected]:{0}/{1}.git'.format(bitbucket_user, conta))", "def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))", "def post_project():\n\n title = request.form.get('title')\n description = request.form.get('description')\n max_grade = request.form.get('max_grade')\n\n hackbright.make_new_project(title, description, max_grade)\n\n flash(\"Successfully added new project.\")\n\n return redirect(\"/project?title={}\".format(title))", "def add_new_project(title, description, max_grade):\n QUERY = \"\"\"INSERT into Projects (title, description, max_grade) VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (title, description, max_grade))\n db_connection.commit()\n print \"Success! Add %s project, and here is the description: %s, and max grade: %s\"\\\n %(title, description, max_grade)", "def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")", "def create_project(self, name, description=None):\n description = description or ''\n data = self._run(\n url_path=\"projects/add\",\n name=name,\n description=description\n )\n return data['result']['project']['id']", "def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p", "def create_new_project(project_name, token=None):\n session = konfuzio_session(token)\n url = create_new_project_url()\n new_project_data = {\"name\": project_name}\n r = session.post(url=url, json=new_project_data)\n return r", "def add_project(self, project):\n project_id = storage_utils.get_next_id(self._storage_location)\n self.save_project(project_id, project)\n return project_id", "def NewProject (projectname):\n\tif projectname == \"\" or projectname == None:\n\t\tnewprojcode(projectname)\n\telse:\n\t\tnewprojCode_withNamed()", "def createProject(self, payLoad):\n\n uri = \"/v1/projects/\" \n response = self.client.post(uri, payLoad)\n return response", "def add_project(name, sample_mask):\n try:\n _ = Project.query.first()\n except OperationalError:\n db.create_all()\n p = Project(name=name, sample_mask=sample_mask)\n with_transaction(db.session, lambda session: session.add(p))\n return Project.query.filter_by(id=p.id).one()", "def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def _create_project(org, project_name):\n project = Project(\n org=org,\n name=project_name\n )\n project.save()\n return project", "def add_task():\n found = False\n project_id = None\n task = request.form['task']\n project = request.form['project']\n \n if not task:\n return redirect('/')\n\n if not project:\n project = 'Tasks'\n\n projects = Projects.query.all()\n\n for proj in projects:\n if proj.project_name == project:\n found = True\n\n # add the project if not in database already\n if not found:\n add_project = Projects(project, True)\n db.session.add(add_project)\n db.session.commit()\n projects = Projects.query.all()\n\n # set the active tab\n for proj in projects:\n if proj.project_name == project:\n project_id = proj.project_id\n proj.active = True\n else:\n proj.active = False\n\n status = bool(int(request.form['status']))\n\n # add the new task\n new_task = Tasks(project_id, task, status)\n db.session.add(new_task)\n db.session.commit()\n return redirect('/')", "def on_add(self, project, name, **kwargs):\n pass", "def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def post(self, data):\n conn = pecan.request.db_conn\n try:\n project = db_models.Project(**data.as_dict())\n return conn.create_project(request.context, project)\n except Exception:\n LOG.exception('Fail to create project: %s' % data.as_dict())\n raise exception.ProjectCreateFailed(project_id=data.project_id,\n user_id=data.user_id)", "def create_project(self, pool, project, arg):\n self.verify_pool(pool)\n svc = self.project_path % (pool, project)\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n svc = self.projects_path % pool\n ret = self.rclient.post(svc, arg)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error creating project: '\n '%(project)s on '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'project': project,\n 'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.ShareBackendException(msg=exception_msg)", "def new_project(self, project_name: str) -> str:\n if project_name in [NO_PROJECT_NAME, \"\"]:\n raise MephistoDBException(f'Invalid project name \"{project_name}')\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n c.execute(\n \"INSERT INTO projects(project_name) VALUES (?);\", (project_name,)\n )\n project_id = str(c.lastrowid)\n return project_id\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException()\n elif is_unique_failure(e):\n raise EntryAlreadyExistsException(\n f\"Project {project_name} already exists\"\n )\n raise MephistoDBException(e)", "def create_project(self, name):\n project = self._post('/projects', data={'name': name})\n self.create_project_hook(project['id'], self.webhook_url + name)\n return project", "def create_project(self, project_name=None, check=True):\n project_name = project_name or next(utils.generate_ids('project'))\n page_projects = self._page_projects()\n page_projects.button_create_project.click()\n\n with page_projects.form_create_project as form:\n form.field_name.value = project_name\n form.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=project_name).wait_for_presence()\n\n return project_name", "def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())", "def add_project(self, name, branches):\n prj_e = self._doc.createElement('project')\n prj_e.setAttribute('name', name)\n for branch in branches:\n br_e = self._doc.createElement('branch')\n for key, val in branch.iteritems():\n br_e.setAttribute(key, val)\n prj_e.appendChild(br_e)\n self._doc.firstChild.appendChild(prj_e)", "def add_project_to_groups(projectname):\n groups = request.get_json().get(\"groups\", [])\n return jsonify(\n admin.add_project_to_groups(\n current_app.scoped_session(), username, groups=groups\n )\n )", "def add_project(title, description, url='http://test.com', tags=None, pinned=False):\n # Create project.\n project = Project.objects.get_or_create(\n title=title, description=description, url=url, pinned=pinned)[0]\n\n # Add tags to project.\n tags = tags if tags else []\n project_tags = [add_project_tag(tag) for tag in tags]\n project.tags.add(*project_tags)\n return project", "def add_project(title, description, max_grade):\n\n QUERY = \"\"\"\n INSERT INTO Projects (title, description, max_grade) VALUES (?, ?, ?)\n \"\"\"\n\n db_cursor.execute(QUERY, (title, description, max_grade))\n db_connection.commit()\n\n print \"Successfully added %s: %s with a max grade of %s\" % (title, description, max_grade)", "def test_create_project(client, session, tokens):\n response = client.post(\n \"/projects\",\n json={\n \"name\": \"New Project\",\n \"organizations\": [],\n \"teams\": [],\n \"users\": [],\n },\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 201\n project_id = response.json[\"id\"]\n assert Project.query.filter(Project.id == project_id).count() == 1", "def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()", "def post_project_create(self, resource_dict):\n pass", "def test_projects_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_create_project_request(self):\n pass", "def add_to_project(resulthash,project):\n t = models.HBTask.objects.get(resulthash=resulthash)\n p = models.Project.objects.get(name=projectname)\n p.tasks.add(t)", "def project(projectname,targetamount):\n if (validatename(projectname) and validatenum(targetamount)):\n targetamount=float(targetamount)\n con = lite.connect(databasefile)\n with con:\n cur = con.cursor() \n cur.execute(\"SELECT Id FROM projects where name=?\", (projectname,))\n exists = cur.fetchone()\n if exists:\n click.echo(\"Project name already exists!\")\n sys.exit()\n cur.execute(\"INSERT INTO projects (Name, Tamount) VALUES (?, ?)\", (projectname, targetamount))\n click.echo(\"Added %s project with target of $%-.2f\" % (projectname, targetamount))", "def get_add_project_form():\n\n return render_template(\"project_add.html\")", "def _on_new_project(self):\n lang = self.ddnGuiLanguage.get()\n projectfile = filedialog.asksaveasfilename(\\\n filetypes=[('Paratext Biblical Terms', '.htm'), ], \\\n initialdir=self.BibTerm, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['BibTerms2Dict project'], \\\n defaultextension='.prj')\n if os.path.exists(projectfile):\n messagebox.showwarning(LOCALIZED_TEXT[lang]['New Project'], \\\n LOCALIZED_TEXT[lang]['{} already exist choose another name.'].\\\n format(os.path.basename(projectfile)))\n return\n else:\n newfile = codecs.open(fileout, mode='w', encoding='utf-8')\n newfile.close()\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n self.ddnCurProject.set(os.path.basename(projectfile)[:-4])\n self.update\n\n pass", "def create_new_project():\n readline.parse_and_bind('tab: complete')\n\n print \\\n\"\"\"\n xbmcswift2 - A micro-framework for creating XBMC plugins.\n [email protected]\n --\n\"\"\"\n print 'I\\'m going to ask you a few questions to get this project' \\\n ' started.'\n\n # noinspection PyDictCreation\n opts = {}\n\n # Plugin Name\n opts['plugin_name'] = get_valid_value(\n 'What is your plugin name?',\n validate_nonblank\n )\n\n # Plugin ID\n opts['plugin_id'] = get_valid_value(\n 'Enter your plugin id.',\n validate_pluginid,\n 'plugin.video.%s' % (opts['plugin_name'].lower().replace(' ', ''))\n )\n\n # Parent Directory\n opts['parent_dir'] = get_valid_value(\n 'Enter parent folder (where to create project)',\n validate_isfolder,\n getcwd()\n )\n opts['plugin_dir'] = os.path.join(opts['parent_dir'], opts['plugin_id'])\n assert not os.path.isdir(opts['plugin_dir']), \\\n 'A folder named %s already exists in %s.' % (opts['plugin_id'],\n opts['parent_dir'])\n\n # Provider\n opts['provider_name'] = get_valid_value(\n 'Enter provider name',\n validate_nonblank,\n )\n\n # Create the project folder by copying over skel\n copytree(SKEL, opts['plugin_dir'], ignore=ignore_patterns('*.pyc'))\n\n # Walk through all the new files and fill in with out options\n for root, dirs, files in os.walk(opts['plugin_dir']):\n for filename in files:\n update_file(os.path.join(root, filename), opts)\n\n print 'Projects successfully created in %s.' % opts['plugin_dir']\n print 'Done.'", "def create_project(self, project):\n\n with self._transaction.cursor() as cur:\n if project.project_id is not None:\n id_ = project.project_id\n else:\n cur.execute(\"SELECT MAX(project_id) + 1 \"\n \"FROM barcodes.project\")\n id_ = cur.fetchone()[0]\n\n query = f\"\"\"\n INSERT INTO barcodes.project\n ({PROJECT_FIELDS})\n VALUES (\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s);\"\"\"\n\n cur.execute(query,\n [id_, project.project_name, project.is_microsetta,\n project.bank_samples, project.plating_start_date,\n project.contact_name, project.additional_contact_name,\n project.contact_email, project.deadlines,\n project.num_subjects, project.num_timepoints,\n project.start_date, project.disposition_comments,\n project.collection, project.is_fecal,\n project.is_saliva, project.is_skin, project.is_blood,\n project.is_other, project.do_16s,\n project.do_shallow_shotgun, project.do_shotgun,\n project.do_rt_qpcr, project.do_serology,\n project.do_metatranscriptomics,\n project.do_mass_spec, project.mass_spec_comments,\n project.mass_spec_contact_name,\n project.mass_spec_contact_email,\n project.do_other,\n project.branding_associated_instructions,\n project.branding_status, project.subproject_name,\n project.alias, project.sponsor, project.coordination,\n project.is_active])\n\n # if we made it this far, all is well\n return id_", "def get_project_add_form():\n\n return render_template(\"project_add.html\")", "def test_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n new_project = fake_clients.identity_cache[\"new_projects\"][0]\n self.assertEqual(new_project.name, \"test_project\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def CreateProject(projectName='project'):\r\n projectName = input('''The project's name: ''')\r\n if not os.path.exists(projectName):\r\n os.mkdir(projectName)\r\n else:\r\n print('There is a file with the same name.')\r\n\r\n for dir in ['OPT', 'SCF', 'PHO']:\r\n if not os.path.exists(projectName + os.sep + dir):\r\n os.mkdir(projectName + os.sep + dir)", "def add_user_project(self, user_id, project_id):\n self.conn.cursor().execute(\"INSERT INTO users_projects VALUES (?, ?)\", (user_id, project_id,))\n self.conn.commit()", "def do_project(self, arg):\n def _usage():\n self.do_help('project')\n args = shlex.split(arg)\n if not args:\n _usage()\n return\n commands = ['create', 'delete', 'update']\n first_arg = args[0].lower()\n is_project_info = first_arg not in commands\n if is_project_info:\n # Get the project info\n project_name = args[0].decode('utf8')\n self.display_project_info(project_name)\n return\n if first_arg == 'create':\n # Create a new project\n self.create_project()\n return\n if len(args) == 1:\n print(self.error_wrong_parameters)\n _usage()\n return\n project_name = args[1].decode('utf8')\n if first_arg == 'update':\n # Update a project\n self.update_project(project_name)\n elif first_arg == 'delete':\n # Delete a project\n self.delete_project(project_name)\n return", "def create_project(request):\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp)\n\n # check whether it's valid:\n if form.is_valid():\n prj_obj = form.save(commit=False)\n # prj_obj.description = bleach.clean(prj_obj.description, strip=True)\n # fint the user profile object based on the email in session\n user_profile = UserProfile.objects.get(email=request.session['email'])\n prj_obj.user = user_profile\n # Save the project object - project needs to exist before\n # manytomany field is accessed.\n prj_obj.save()\n # get the list of tag objects to add to project\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n prj_obj.tags.add(tag_object)\n for article_object in article_object_list:\n prj_obj.articles.add(article_object)\n prj_obj.save()\n return HttpResponse(str(prj_obj.id))\n # return HttpResponseRedirect('/projects/' + str(prj_obj.id))\n else:\n print form.errors.as_data()\n else:\n # Remove when front end updated.\n form = ProjectForm()\n return render(request, 'projects/create_project.html', {'form': form})", "def save(self, project_id=None):\r\n if project_id is not None:\r\n project = Project.objects.get(pk=int(project_id))\r\n else:\r\n project = Project()\r\n # Fill out the data of the given project and prepare it\r\n # for saving into database.\r\n project.Name = self.cleaned_data['name']\r\n project.ProjectClient = self.cleaned_data['project_client']\r\n project.Start = self.cleaned_data['start']\r\n project.End = self.cleaned_data['end']\r\n project.ProjectManager = self.cleaned_data['project_manager']\r\n project.QualityAssurance = self.cleaned_data['quality_assurance']\r\n project.Price = self.cleaned_data['price']\r\n project.Segment = self.cleaned_data['segment']\r\n project.Type = self.cleaned_data['type']\r\n project.save()\r\n # If the item was just created, set up workflow for it\r\n if project_id is None:\r\n workflow = Workflow.objects.get(name='Project')\r\n utils.set_workflow(project, workflow)\r\n state = utils.get_state(project)\r\n project.Status = state\r\n project.save()\r\n return project", "def create_project_info(data):\n\t\n\tproject = ProjectInfo()\n\tproject.name = data['name']\n\tproject.description = data['description']\n\tproject.start_date = data['start_date']\n\tproject.end_date = data['end_date']\n\tproject.save()\n\tprint ('Inserted')\n\treturn True", "def test_duplicate_tasks_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\"project_name\": \"test_project_2\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)", "def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu", "def add_project_tag(title):\n return ProjectTag.objects.get_or_create(title=title)[0]", "def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())", "def project():", "def project():", "def project():", "def create_project(conn, project):\n sql = ''' INSERT INTO projects(name,begin_date,end_date)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, project)\n return cur.lastrowid", "def test_projects_id_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def create(self, token: Any):\n params = [token, ]\n method = \"ProjectAPI.Create\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))", "def OnNew(self, e):\n self.mainparent.statusbar.SetStatusText(\"New Project\", 0)\n\n filename = \"__new_project__\"\n self.mainparent.input_file = InputFile(filename, read=False)\n self.mainparent.statusbar.SetStatusText(\"---New Project---\", 2)\n self.mainparent.file_loaded = True\n\n # reset menus and such\n self.mainparent.reset_namelist_menu()\n self.mainparent.nmlpanel.reset(unset_namelist=True)", "def add_projects_to_cmpd(new_comp, projects):\n [new_comp.project_id.add(x) for x in projects]\n new_comp.save()\n return new_comp", "def _post_project(prj=None):\n template_path = (os.path.join(\n os.path.split(__file__)[0], \"post_project_template.xml\"))\n with open(template_path, 'r') as file:\n template = Template(file.read())\n response_xml = template.render(\n name=f\"Project_TEST_{datetime.now()}\",\n open_date=str(datetime.today().date()),\n res_uri=f\"{LIMS_API.tools.api.host}researchers/1\")\n\n prj_response = LIMS_API.tools.api.post(\n f\"{LIMS_API.tools.api.host}projects\", response_xml)\n\n prj_response_soup = BeautifulSoup(\n prj_response, \"xml\").find(\"prj:project\")\n prj = api_types.Project(\n prj_response_soup.find(\"name\"),\n DEFAULT_RES,\n datetime.today().date(),\n [],\n prj_response_soup[\"uri\"])\n\n return prj", "def create_project(self, conn, name, description=\"\"):\n group = conn.group.allocate(name, description)\n # returns Project object\n return group", "def behaviors_coding_map_creator_signal_addtoproject(self, behav_coding_map):\n\n if not self.project:\n QMessageBox.warning(self, programName, \"No project found\",\n QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n self.projectChanged = True", "def create_project(conn, project):\n sql = ''' INSERT INTO projects(name,score)\n VALUES(?,?) '''\n cur = conn.cursor()\n cur.execute(sql, project)\n return cur.lastrowid", "def add_project(project_info):\n project = project_collection.insert_one(project_info)\n user = user_collection.find_one({\"_id\": project_info[\"owner\"]})\n list1 = user[\"owner\"]\n list1.append(project.inserted_id)\n user_collection.find_one_and_update(\n {\"_id\": project_info[\"owner\"]},\n {\n \"$set\": {\n \"owner\": list1,\n }\n },\n upsert=False,\n )\n\n key = search_collection.find_one({\"_id\": SEARCH_ID})\n for skill in project_info[\"projectSkills\"]:\n try:\n value_list = key[skill]\n value_list.append(project.inserted_id)\n search_collection.find_one_and_update(\n {\"_id\": SEARCH_ID}, {\"$set\": {skill: value_list}}, upsert=False\n )\n except AttributeError:\n value_list = list()\n value_list.append(project.inserted_id)\n search_collection.find_one_and_update(\n {\"_id\": SEARCH_ID},\n {\n \"$set\": {\n skill: value_list,\n }\n },\n upsert=False,\n )\n except KeyError:\n value_list = list()\n value_list.append(project.inserted_id)\n search_collection.find_one_and_update(\n {\"_id\": SEARCH_ID},\n {\n \"$set\": {\n skill: value_list,\n }\n },\n upsert=False,\n )", "def run(opts, args):\n create_new_project()", "def add_projects_to_group(groupname):\n projects = request.get_json().get(\"projects\", [])\n response = jsonify(\n admin.add_projects_to_group(current_app.scoped_session(), groupname, projects)\n )\n return response", "def register(self, options):\n request = RegisterProjectRequest(\n project=options.project,\n )\n\n response = self.execute('RegisterProject', request, options)\n log.info('Response: status=%(status)s request_id=%(request_id)s', dict(\n status=response.status,\n request_id=response.request_id,\n ))", "def project():\n\n return M(c=\"project\", f=\"task\")(\n M(\"Tasks\", f=\"task\")(\n M(\"Create\", m=\"create\"),\n M(\"My Open Tasks\", vars={\"mine\":1}),\n ),\n )", "def ktrack_project(ktrack_instance):\n project = ktrack_instance.create(\"project\", {\"name\": \"My_Test_Project\"})\n return project", "def test_create_project_root(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': None,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def insert_project(name, loc, url, collection):\n entry = {\n \"name\": name,\n \"location\": loc,\n \"url\": url,\n \"date\":datetime.utcnow()\n }\n\n collection.insert(entry)", "def make_project(id):\n return {\n \"type\": \"Project\",\n \"metrics\": [],\n \"tags\": [],\n \"id\": id,\n \"description\": \"\",\n \"applicant\": \"\",\n }", "def register(self, **form_data):\n g.security.require_access(self.neighborhood, 'register')\n shortname, reg_kwargs = self._parse_add_project_data(form_data)\n\n # install the project\n try:\n c.project = self.neighborhood.register_project(\n shortname, **reg_kwargs)\n except RegistrationError:\n redirect_to = self.neighborhood.url()\n ming.odm.odmsession.ThreadLocalODMSession.close_all()\n flash(\"You do not have permission to register\", \"error\")\n else:\n redirect_to = c.project.script_name + 'home/'\n ming.odm.odmsession.ThreadLocalODMSession.flush_all()\n flash('Welcome to your new project!')\n\n redirect(redirect_to)", "def get_projects_route():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n if post_data is not None:\n add_project(post_data)\n response_object['message'] = 'Project added!'\n else:\n response_object['projects'] = get_projects()\n return jsonify(response_object)", "def test_projects_put(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def create_projects(self):\n if self.gl is None or self.config is None:\n print(\"No config/Gitlab found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Project creation.\")\n gl = self.gl\n config = self.config\n for project in config[\"projects\"]:\n # get the import url\n imp_url = config[\"projects\"][project][\"import_url\"]\n\n # Set rights/members/protected master\n if config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"all_users\":\n for user in self.users:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"user\":\n for user in self.users:\n if user.username == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'Access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"group\":\n for group in self.groups:\n if group.name == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for group \\'\" + group.name + \"\\'\")\n pj = group.projects.create({'name': project,\n 'namespace_id': group.id,\n 'import_url': imp_url})\n else:\n print(\"Project owner Config is wrong, aborting\")\n exit(1)\n # Delete protected Master Branch\n if config[\"projects\"][project][\"protect_master_branch\"] == \"False\":\n print(\"Removing Project master Branch protection\")\n pj.protectedbranches.delete('master')", "def create_keystone_v3_project(self, **kwargs):\n LOG_OBJ.debug(\"Creating the project.\")\n print self.project_info\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _project_info = {\"project\": {}}\n for argument in [\"name\", \"description\", \"domain_id\",\n \"enabled\", \"disabled\"]:\n try:\n _project_info['project'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_project_info)\n response = self.request(\"POST\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating project\")\n print (\"No response from Server while creating project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Project details : %s \" % output)\n print (\"Project details : %s \" % output)\n return output['project']['id']", "def post(self, request):\n body = request.body.decode(\"utf-8\")\n print(body)\n print(request.META)\n if not body:\n return HttpResponse(status=HTTPStatus.BAD_REQUEST)\n\n data = json.loads(body)\n project_name = data['name']\n projects = Project.objects.all()\n serializer = ProjectSerializer(projects, many=True)\n existing_projects = [project['name'] for project in serializer.data]\n if project_name in existing_projects:\n return Response(status=HTTPStatus.CONFLICT)\n\n project_location = os.path.join(PROJECTS_FOLDER, project_name+'.aedt')\n project = Project.objects.create(name=project_name, project_location=project_location)\n project.save()\n return HttpResponse(HTTPStatus.OK)", "def create(self):\n \n # create the sequence structure by calling the self.project.create\n self.project.create()", "def test_add_projects_to_group(client):\n group = client.add_projects_to_group(TEAM_ID, GROUP_ID,\n [\"638597985c913f818559f3.17106287\",\n \"404021655ce68d0f36ad23.02802891\"])\n assert group.team_id == TEAM_ID\n assert group.group_id == GROUP_ID\n assert \"638597985c913f818559f3.17106287\" in group.projects\n assert \"404021655ce68d0f36ad23.02802891\" in group.projects", "def newprojcode(name):\n\tprint \"\\n======Creando Nuevo Proyecto======\\n\"\n\tproject_name = name\n\n\tif project_name == \"\" or project_name == None:\n\t\tcancel()\n\n\tprint \"*Nombre del Proyecto: \", project_name\n\n\tproject_languges = raw_input(\"*Lenguaje: \")\n\tpname = project_name\n\n\tprint \"\\n==================================\\n\"\n\n\tdirectory = str(\"Project_\" + pname + \"/\")\n\n\tif os.path.exists(\"Project\"):\n\t\t#Nos ubicamos en el directorio raiz del Proyecto\n\t\tsubprocess.call([\"mkdir\", directory], shell=True)\n\t\tprint \"Creando el Directorio Raiz...\"\n\telse:\n\t\tos.mkdir(\"Project\")\n\t\tos.chdir(\"Project/\")\n\t\tsubprocess.call([\"mkdir\", directory])\n\t\tif not os.path.exists(directory):\n\t\t\tprint \"LA CARPETA {} NO EXISTE!\".format(directory)\n\t\t\tcancel()\n\t\telse:\n\t\t\tos.chdir(directory)\n\n\tdirs = \"Project\" + pname + \"/\"\n\t#Nos ubicamos en el directorio raiz del Proyecto\n\tos.chdir(dirs)\n\tprint \"Accediendo al Directorio\", dirs + \"...\"\n\tprint \"Creando el Directorio de Iconos...\"\n\tsubprocess.call(\"mkdir Iconos\", shell=True)\t\t#directorio iconos *\n\tprint \"Creando el Directorio de Debug...\"\n\tsubprocess.call(\"mkdir Debug\", shell=True)\t\t#directorio debug *\n\tprint \"Crenado el Directoiro de Scripts...\"\n\tsubprocess.call(\"mkdir Scripts\", shell=True)\t#directorio scripts *\n\tprint \"Creando los Archivos XML del Proyecto...\\n\"\n\tsubprocess.call(\"source XMLProjectFiles.sh\", shell=True)\n\tprint \"Se ha Creado el Proyecto\", pname, \" con Exito!!\"\n\n\t#Se crea el codigo de verificacion del proyecto\n\tfor i in range(0, 15):\n\t\tx = random.randint(1, 1000000)\t#Calcula numeros aleatorios de 1 a 1,000,000(1 millon)\n\t\tVerifiCode = x\t\t\t\t\t#VerifiCode deja el valor de 0 y toma el valor de x\n\t\tCodeValue = bin(VerifiCode)\t\t#Encripta el codigo a binario\n\n\tprint \"Su codigo de proyecto es:\", CodeValue + \"\\n\"\n\tSaveKey(CodeValue)\n\tprint \"Realizando copias de archivos prioritarios a los servidores...\"\n\tpcommands.ServerCopy()\n\tprint \"Copias realizadas con exito!!\"" ]
[ "0.8426382", "0.772563", "0.75920844", "0.7583703", "0.7538995", "0.7501233", "0.7473201", "0.7460157", "0.74459225", "0.7423872", "0.73809856", "0.7358321", "0.73564327", "0.7351144", "0.7339243", "0.7321466", "0.7311935", "0.72474617", "0.7245785", "0.72370243", "0.72247237", "0.72229296", "0.7217502", "0.7215283", "0.71328056", "0.71318513", "0.7096915", "0.7058818", "0.7034111", "0.6987222", "0.69145024", "0.68953866", "0.68858725", "0.6885155", "0.6864527", "0.6860948", "0.6860948", "0.6860948", "0.6853365", "0.6845538", "0.6838484", "0.6837285", "0.6804359", "0.67946213", "0.67752236", "0.67558044", "0.6748415", "0.6735529", "0.6729383", "0.6685444", "0.6642759", "0.66075504", "0.6581872", "0.6570899", "0.65473324", "0.6547314", "0.6541213", "0.65265125", "0.65240294", "0.65216714", "0.6460538", "0.64577895", "0.64567035", "0.6451188", "0.6442429", "0.6440693", "0.64364296", "0.6435651", "0.6420465", "0.64184463", "0.6417508", "0.6417165", "0.6417165", "0.6417165", "0.64156395", "0.64094496", "0.6398277", "0.6382775", "0.6367895", "0.63541853", "0.6344396", "0.6335568", "0.63225496", "0.6320254", "0.62701195", "0.6259594", "0.6241376", "0.62400097", "0.6226468", "0.621255", "0.62092674", "0.62074494", "0.62072957", "0.6200345", "0.6198384", "0.6193246", "0.61847556", "0.61841303", "0.6174095", "0.6171013", "0.61688596" ]
0.0
-1
Renders the contact page.
def data(): return render_template( 'data.html', title='data', year=datetime.now().year, message='my data page.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact():\n return render_template('contact.html')", "def contact():\n\n\treturn render_template('contact.html', title='Contact',\n\t\t\t\t\t\t year=datetime.now().year,\n\t\t\t\t\t\t message='Your contact page.')", "def contact():\n return render_template(\n 'contact.html',\n nav=nav,\n title='Contact me',\n year=datetime.now().year,\n message='The following are ways to contact me'\n )", "def contact():\r\n return render_template(\r\n 'contact.html',\r\n title='Contact',\r\n year=datetime.now().year,\r\n message='Your contact page.'\r\n )", "def contact():\r\n return render_template(\r\n 'contact.html',\r\n title='Contact',\r\n year=datetime.now().year,\r\n message='Your contact page.'\r\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=\"2020\",\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n message='Contact me if you have any questions.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n message='My contact page.'\n )", "def contact():\n return render_template(\n 'contact.html',\n title='Contact',\n year=datetime.now().year,\n img_tichonet = '/static/pics/tichonet.png',\n message='Your contact page.'\n )", "def contact():\n return render_template(\n 'contact.jade',\n title='Contact',\n year=datetime.now().year,\n )", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'AscensionESports_Baseline/contact.html',\n {\n 'background': getSiteBackground(),\n 'color': getSiteColor(),\n 'title':'Contact Us',\n 'message':'Feel free to contact us via any of the following platforms!',\n 'year':datetime.now().year,\n }\n )", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n context_instance = RequestContext(request,\n {\n 'title':'Contact',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n })\n )", "def contact(request):\n return render(request, \"contact.html\")", "def contact(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/contact.html',\r\n context_instance=RequestContext(request,\r\n {\r\n 'title': 'Contact',\r\n 'message': 'Your contact page.',\r\n 'year': datetime.now().year,\r\n })\r\n )", "def processContactRequest(self):\n\n\t\t# Use the simple page renderer to create the body content\n\t\treturn self.render_simple_page('Contact')", "def contact():\n if request.method == \"POST\":\n flash(\"{}, Thank you for getting in touch! We appreciate you contacting us, one of our colleagues will get back in touch with you soon! Have a great day!\".format(\n request.form[\"name\"]))\n\n return render_template('pages/contact.html', isFooter=True)", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact',\n 'message':'Contact page.',\n 'librarian':get_librarians(),\n 'year':datetime.now().year,\n }\n )", "def contact(request):\n\n return render(request, 'home/contact.html')", "def contact_us():\n return render_template('home/contact-us.html')", "def contact():\n return render_template('contact.html',\n title='联系',\n year=datetime.now().year,\n message='如果需要联系我')", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'csa/contact.html',\n {\n 'title':'Contact',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )", "def contact(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/contact.html',\r\n {\r\n 'title':'Contact',\r\n 'message':'Your contact page.',\r\n 'year':datetime.now().year,\r\n }\r\n )", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact',\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )", "def contact(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/contact.html',\r\n context = \r\n {\r\n 'title':_('Contact'),\r\n 'message':'Pour nous contacter.',\r\n 'year':datetime.now().year,\r\n }\r\n )", "def contacts():\n return render_template(\n \"contacts.html\",\n title = \"Contacts\")", "def contact():\n if request.method == \"POST\":\n flash(\"Thanks {}, we have recived your message!\".format(\n request.form.get(\"name\")))\n return render_template(\"contact.html\", page_title=\"Contact Us\")", "def contact(request):\n assert isinstance(request, HttpRequest)\n contact = models.ContactUs.objects.all()\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Contact Us',\n 'message':'Our contact information:',\n 'year':datetime.now().year,\n 'contact': contact\n }\n )", "def contact(request):\n\n return HttpResponse(render(request,'noxusProject/contact.html'))", "def contact(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'contact.html',\n { 'is_longdale_user': user_is_ingroup(request, 'longdale_user'),\n 'title':'Contact',\n 'message':'Henk van den Heuvel',\n 'year':datetime.now().year,\n }\n )", "def contact_us(environ):\n return render_template(\n template_name=\"contact.html\", context={\"path\": environ.get(\"PATH_INFO\")}\n )", "def contact(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/contact.html',\r\n {\r\n 'title':'联系我们',\r\n 'message':'在使用过程中如果遇到一些问题请及时反馈给我们,也欢迎提出你的意见和建议',\r\n 'year':datetime.now().year,\r\n }\r\n )", "def contact(request):\n\n contacts = ContactDetails.objects\n return render(request, 'contact_app/contact.html', {\"contacts\":contacts})", "def contact(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/contact.html',\r\n {\r\n 'title':'Iletisim',\r\n\r\n 'year':datetime.now().year,\r\n }\r\n )", "def contact():\n img.compare_images()\n return render_template(\n 'contact.html',\n title='Contact',\n #year=datetime.now().year,\n year=datetime.now().year,\n message='Your contact page.'\n )", "def show_contacts():\n data_list = queries2.contacts()[0]\n table_titles = queries2.contacts()[1]\n title = \"Contacts\"\n return render_template('pages.html', data_list=data_list, title=title, table_titles=table_titles)", "def contact(request):\n cart = Cart(request)\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title':'Контакты',\n 'cart': cart,\n 'message':'Your contact page.',\n 'year':datetime.now().year,\n }\n )", "def contact(request):\n if request.method == 'POST':\n form = ContactForm(data=request.POST)\n\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('contact_template.txt')\n\n context = Context({\n 'contact_name': contact_name,\n 'contact_email': contact_email,\n 'content': form_content,\n })\n content = template.render(context)\n\n email = EmailMessage(\n \"New contact form submission for romanstapleton.com\",\n body=content,\n from_email=contact_email,\n to=['[email protected]', '[email protected]'],\n headers={'Reply-To': contact_email}\n )\n email.send()\n\n messages.add_message(request, messages.SUCCESS, 'Your message has been successfully sent')\n return redirect('contact')\n\n else:\n form = ContactForm()\n\n return render(request, 'contact.html', {'form': form})", "def contact(request):\n\n cust_email = request.POST.get('contact_email')\n\n if request.method == 'POST':\n contact_form = {\n 'name': request.POST.get('name'),\n 'email': request.POST.get('contact_email'),\n 'contact_as': request.POST.get('contact_as'),\n 'message': request.POST.get('message')\n }\n subject = render_to_string(\n 'contact/contact_emails/contact_email_subject.txt',\n {'contact': contact_form}\n )\n body = render_to_string(\n 'contact/contact_emails/contact_email_body.txt',\n {'contact': contact_form}\n )\n send_mail(\n subject,\n body,\n cust_email,\n [settings.DEFAULT_FROM_EMAIL]\n )\n messages.success(request, 'Mail Sent!')\n\n form = ContactForm\n\n context = {\n 'form': form,\n 'on_contact_page': True\n }\n\n return render(request, 'contact/contact.html', context)", "def contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n from_email = form.cleaned_data['email']\n name = form.cleaned_data['name']\n send_mail(\n subject,\n ' '.join(['From:', name, 'Message:', message]),\n from_email,\n ADMIN_EMAILS\n )\n return HttpResponseRedirect('/contact/thank_you')\n else:\n form = ContactForm()\n\n return render(request, 'core/contact.html', {\n 'form': form,\n 'current_view': 'contact'\n })", "def contact(request):\n if not request.method == 'POST':\n return redirect('/')\n\n data = request.POST\n name = data.get('name')\n\n return render(request, 'contact_sent.html', {'name': name})", "def contact():\n if request.method == \"POST\":\n mongo.db.contact.insert_one(request.form.to_dict())\n\n return jsonify(success=True)\n\n return render_template(\"contact.html\", page_title=\"Contact Us\")", "def contact_us(request):\n form = ContactForm(request.POST, None)\n if request.method == 'POST':\n if form.is_valid():\n form.save()\n template = loader.get_template('contact.html')\n context = {\n 'form': form\n }\n return HttpResponse(template.render(context, request))", "def contact(request):\n if request.method == 'POST':\n contact_form = forms.Contact(request.POST)\n if contact_form.is_valid():\n cleaned = contact_form.cleaned_data\n\n message = f\"\"\"\n Name: {cleaned['first_name']} {cleaned['last_name']}\n\n Message:\n {cleaned['message']}\n\n (Press to reply to automatically reply to sender)\n \"\"\"\n\n email = EmailMessage(\n subject='[Site Visitor Message]',\n body=message,\n to=['[email protected]'],\n reply_to=[cleaned['email']]\n )\n email.send(fail_silently=True)\n return redirect(reverse('contact'))\n else:\n contact_form = forms.Contact()\n\n context = {\n 'form': contact_form\n }\n return render(request, 'contact.html', context)", "def view_contacts(self):\n with open(self.filename, \"r\") as contactsFile:\n contacts = self.display_contact(contactsFile.readlines())\n\n if not contacts:\n return self.msgbox(\"No contacts found.\")\n\n self.msgbox(msg=\"\\n\".join(contacts), title=\"Showing All Contacts\")", "def hello_hbnb():\n return render_template('contact_us.html')", "def success_view(request):\n return render(request, 'contact/contact_success.html')", "def referee_help_contacts(request):\n\n if not validate_request(request): return redirect(reverse(URL_FORBIDDEN))\n\n if request.method == \"GET\":\n return render(\n request,\n 'app/referee/help_contacts.html',\n {\n 'title':'Help Contacts',\n 'layout_data' : get_layout_data(request),\n }\n )\n else:\n return redirect(reverse(URL_BAD_REQUEST))", "def contact():\n return dict(\n title='Contact',\n message='Your contact page.',\n year=datetime.now().year\n )", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def showEditContact(self):", "def contact(request):\n\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n contact_message = {\n 'full_name': request.POST['full_name'],\n 'email': request.POST['email'],\n 'message': request.POST['message'],\n }\n form.save()\n\n # Send the user an email acknowledging their message.\n cust_email = request.POST['email'],\n subject = render_to_string(\n 'contact/confirmation_emails/confirmation_email_subject.txt')\n body = render_to_string(\n 'contact/confirmation_emails/confirmation_email_body.txt',\n {'contact_message': contact_message})\n email_from = settings.EMAIL_HOST_USER\n recipient_list = cust_email\n send_mail(\n subject,\n body,\n email_from,\n recipient_list,\n )\n\n messages.success(request, 'Thank you for your message. \\\n Someone will respond via email soon.')\n else:\n messages.error(request, 'Message failed. Please ensure the \\\n form is valid.')\n\n form = ContactForm()\n template = 'contact/contact.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def contact(request):\n title = 'Contact Us'\n title_align_center = True\n form = ContactForm(request.POST or None)\n if form.is_valid():\n form_email = form.cleaned_data.get(\"email\")\n form_message = form.cleaned_data.get(\"message\")\n form_full_name = form.cleaned_data.get(\"full_name\")\n # print email, message, full_name\n subject = 'Site contact form'\n from_email = settings.EMAIL_HOST_USER\n to_email = [from_email, '[email protected]']\n contact_message = \"%s: %s via %s\"%( \n form_full_name, \n form_message, \n form_email)\n some_html_message = \"\"\"\n <h1>hello</h1>\n \"\"\"\n send_mail(subject, \n contact_message, \n from_email, \n to_email, \n html_message=some_html_message,\n fail_silently=True)\n\n context = {\n \"form\": form,\n \"title\": title,\n \"title_align_center\": title_align_center,\n }\n return render(request, \"forms.html\", context)", "def about(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/about.html',\r\n context_instance = RequestContext(request,\r\n {\r\n 'title': 'About',\r\n 'message': 'Your application description page.',\r\n 'year': datetime.now().year,\r\n 'contact': Contact.objects.get(pk=1),\r\n\r\n })\r\n )", "def contact_view(request):\n if request.method == \"POST\":\n contact_form = ContactForm(request.POST)\n if contact_form.is_valid():\n contact_form.save()\n return redirect(reverse('accounts:profile'))\n else:\n if request.user.is_authenticated:\n initial_data = {\n \"user\": request.user,\n \"name\": request.user.first_name + \" \" + request.user.last_name,\n \"email\": request.user.email\n }\n form = ContactForm(\n request.POST or None, initial=initial_data)\n else:\n form = ContactForm()\n\n context = {\n \"form\": form,\n \"locations\": Location.objects.all()\n }\n return render(request, \"contact.html\", context=context)", "def contact(request):\n\n form = ContactForm(request.POST or None)\n if form.is_valid():\n name = form.cleaned_data.get(\"name\")\n email = form.cleaned_data.get(\"email\")\n comment = form.cleaned_data.get(\"comment\")\n\n if request.user.is_authenticated:\n subject = str(request.user) + \"'s Comment\"\n else:\n subject = \"A Visitor's Comment\"\n\n # Sends a the users message to the site owner\n comment = name + \" with the email, \" + email + \\\n \", sent the following message:\\n\\n\" + comment\n send_mail(subject, comment, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_FROM_EMAIL])\n\n message = 'Thank you! I will get back to you as soon as I can!'\n form = ContactForm()\n context = {'form': form, 'message': message}\n else:\n context = {'form': form}\n\n return render(request, 'contact/contact.html', context)", "def test_contact_page(self, client):\n response = client.get(url_for('contact.index'))\n assert response.status_code == 200", "def display_contact(self):\n contacts = \"\".join(str(contact) for contact in self.contact_list)\n print(contacts)", "def contactView(request):\n submitSuccess = False\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n fromEmail = form.cleaned_data['fromEmail']\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n message = \"From user : \" + str(request.user.username) + \"\\n\\n\" + (\"-\" * 40) + \"\\n\\n\" + message\n\n # Send the mail\n try:\n send_mail(subject, message, fromEmail, ['[email protected]'])\n except BadHeaderError:\n return HttpResponse('Invalid e-mail header found.')\n submitSuccess = True\n\n context = {\n 'form' : form,\n 'submitSuccess' : submitSuccess\n }\n return render(request, 'contact.html', context)", "def contact(request):\n print(request.POST)\n form = ContactForm(request.POST or None)\n if form.is_valid():\n form = ContactForm() # cleaning form\n return render(request, 'new_app/form.html', {'title': 'Contact us',\n 'form': form,\n })", "def contact():\n # request form data\n form = ContactForm(request.form)\n # Get user name if logged in\n user_name = None\n if session:\n user_name = session[\"user\"]\n # validate form\n if request.method == \"POST\" and form.validate():\n contact_name = form.name.data\n contact_email = form.email.data\n contact_message = form.message.data\n message = contact_get_message_string(\n user_name, contact_name, contact_email, contact_message)\n # Set server variable\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n # Put the SMTP connection in TLS (Transport Layer Security) mode\n server.starttls()\n # Attempt to log in to email server\n try:\n server.login(mail_username, mail_password)\n # Display flash message if there is is an exception\n except smtplib.SMTPException:\n flash(\n \"Could not log into email server, \" +\n \"please check configuration variables\",\n \"danger\")\n return render_template(\"contact.html\", form=form)\n else:\n # Set message variable\n msg = EmailMessage()\n msg[\"Subject\"] = \"New contact message from FreeFrom\"\n msg[\"From\"] = mail_username\n msg[\"To\"] = mail_username\n msg.set_content(message)\n # Attempt to send message\n try:\n server.send_message(msg)\n # Display flash message if there is is an exception\n except smtplib.SMTPException:\n flash(\n \"Contact email has not been succesfully sent, \" +\n \"please try again\",\n \"warning\")\n return render_template(\"contact.html\", form=form)\n # Display flash message if email is succesfully sent\n else:\n flash(\n \"Contact email has been succesfully sent\",\n \"success\")\n return render_template(\"home.html\")\n\n # If user is logged in, set email address field automatically\n if user_name:\n form.email.data = user_get(user_name)[\"email\"]\n return render_template(\"contact.html\", form=form)", "def delivery(request):\n\n return render(request, 'contact/delivery.html')", "def contact(request):\n email = request.GET.get(\"email\")\n version = get_version_or_leave(request, \"contact\", email)\n\n if version == '1':\n\n return contact_v1(request)\n\n else:\n\n api_access_logging(\n request,\n \"contact\",\n email,\n \"400\",\n \"4\",\n None\n )\n return Response(\n {\n \"error_code\": \"4\",\n \"detail\": errors_for_customers[\"4\"]\n },\n status=status.HTTP_400_BAD_REQUEST\n )", "def view_contact_chat(self):\n if self._user.chats == {}:\n print(\"No chats to be viewed yet\")\n self.homepage()\n \n print('-=' * 30)\n chats = self._user.list_chats()\n user_choice = self._int_input_in_range(\"Pick whose contact chat to be viewed: \"\n ,range_ = (1, len(chats)))\n if not user_choice:\n return self.homepage()\n \n chat, contact = chats[user_choice - 1]\n chat_content = chat.get_content(self._user)\n print('-=' * 12 + \" Chat Window \" + '-=' * 12)\n if chat_content != []:\n for line in chat_content:\n print(line.rstrip()) \n else:\n print('This chat is empty, send your first msg now')\n \n user_choice = self._int_input_in_range(' (1) Send new msg \\n (2) Back to homepage \\n Your choice: '\n , range_ = (1,2))\n if user_choice == 1:\n print('HINT: send (0) to exist the chat window')\n return self._send_msg(contact)\n else:\n return self.homepage()", "def contact_view(request):\n # If GET request, load empty contact form\n if request.method == 'GET':\n form = ContactForm()\n # If POST request...\n else:\n form = ContactForm(request.POST)\n # Check that required fields are filled\n if form.is_valid():\n subject = form.cleaned_data['subject']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n name = form.cleaned_data['name']\n if name:\n subject = f\"{name}: {subject}\"\n # Attempt to send an email\n try:\n send_mail(subject=subject,\n message=message,\n from_email=email,\n recipient_list=['[email protected]'])\n # Handle any error here\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n return redirect('success')\n return render(request, 'contact/contact.html', {'form': form})", "def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)", "def openContactUrl(self):\r\n url = QUrl(\"http://freeseer.readthedocs.org/en/latest/contact.html\")\r\n QDesktopServices.openUrl(url)", "def contact_us(request):\n\n if request.method == \"POST\":\n if request.POST.get(\"fname\") and request.POST.get(\"emailadd\"):\n post = Contact()\n post.full_name = request.POST.get(\"fname\")\n post.email = request.POST.get(\"emailadd\")\n post.phone_number = request.POST.get(\"pnumber\")\n post.message = request.POST.get(\"cmessage\")\n post.save()\n\n subject = \"Althea Store Inquiry\"\n message = post.message = request.POST.get(\n \"cmessage\") + \" From: \" + post.full_name + \" Sender's Email Address \" + post.email + post.phone_number\n from_email = \"[email protected]\"\n if subject and message and from_email:\n try:\n send_mail(\n subject, message,\n from_email, ['[email protected]'])\n except BadHeaderError:\n return HttpResponse(\"Invalid Header Found\")\n return render(request, \"contact/contact_success.html\")\n return HttpResponse(\"Make sure all fields are entered and valid.\")\n return render(request, \"contact/contact_success.html\")\n return render(request, \"contact/contact_us.html\")", "def contact(request):\n\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n if form.is_valid():\n form.save()\n messages.info(\n request, 'Your message is send. '\n + 'We will reply within 48 hours!')\n else:\n messages.error(request, 'Please try again.')\n return redirect(reverse('contact'))\n else:\n form = ContactForm()\n\n template = 'contact/contact.html'\n context = {\n 'form': form,\n }\n return render(request, template, context)", "def contact(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\ttitle=\"For any inquiry, please contact us using following contact form or email us at [email protected]!\"\n\tform = ContactForm(request.POST or None)\n\tcontextcontact={\"title\":title, \"form\":form}\n\tif form.is_valid():\n\t\tform_email = form.cleaned_data.get(\"email\") # get email address\n\t\tform_email=form_email.strip()\n\t\tform_full_name = form.cleaned_data.get(\"full_name\") # get name\n\t\tform_full_name=form_full_name.strip()\n\t\tform_message = form.cleaned_data.get(\"message\") # get message\n\t\tform_message=form_message.strip()\n\t\tsubject='Site contact form for MRMAssayDB'\n\t\tfrom_email=settings.EMAIL_HOST_USER\n\t\tto_email=[from_email, '[email protected]','[email protected]','[email protected]']\n\t\tcontact_message=\"%s: %s via %s\"%(\n\t\t\tform_full_name,\n\t\t\tform_message,\n\t\t\tform_email)\n\t\tsend_mail(subject,contact_message,from_email,to_email,fail_silently=True) # sent email\n\n\t\tcontextcontact={\"title\":\"Thank you for your enquiry\",\"hide\":True}\n\treturn render(request, 'contact.html', contextcontact)", "def test_contact(self):\n response = self.client.get('/contact/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'home/contact.html')", "def contact(request):\n if request.method == 'POST': # If the form has been submitted...\n form = ContactForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n fullname = form.cleaned_data['fullname']\n email = form.cleaned_data['email']\n topic = form.cleaned_data['topic']\n message = form.cleaned_data['message']\n reply_needed = form.cleaned_data['reply']\n tmp_message = \"Message From: \" + fullname + \"\\n\\n\"\n tmp_message += \"Email Address: \" + email + \"\\n\\n\"\n if reply_needed:\n tmp_message += \"Reply Needed: Yes\\n\\n--------------------\\n\\n\"\n else:\n tmp_message += \"Reply Needed: No\\n\\n--------------------\\n\\n\"\n\n subject = 'Wainz Webform: '+topic\n message = tmp_message + message\n from_email = '[email protected]'\n recipient_list = ['[email protected]']\n\n send_mail(subject, message, from_email, recipient_list, fail_silently=False)\n\n return HttpResponseRedirect('thanks/') # Redirect after POST\n else:\n form = ContactForm() # An unbound form\n\n return render(request, 'wainz/contact.html', { 'form': form, })", "def contact(request):\n if request.method == 'POST':\n contact_form = ContactForm(request.POST)\n if contact_form.is_valid():\n message = request.POST['message']\n subject = request.POST['subject']\n send_mail(\n subject,\n \"Message from: \" +\n request.POST['email'] +\n \"Message: \" +\n message,\n '[email protected]',\n ['[email protected]'],\n fail_silently=False,\n )\n messages.success(request, \"Your message has been sent!\",\n extra_tags=\"alert-success\")\n return redirect(reverse('index'))\n else:\n messages.error(request, \"Unable to send message at this time\",\n extra_tags=\"alert-danger\")\n else:\n contact_form = ContactForm()\n return render(request, 'contact.html', {'contact_form': contact_form})", "def contact(request):\n\n form = ContactForm(request.POST or None)\n if request.method == \"POST\":\n if form.is_valid():\n form.save()\n message = request.POST['message']\n subject = request.POST['subject']\n email = request.POST['email']\n\n send_mail(\n subject,\n message,\n email,\n ['[email protected]'],\n fail_silently=False)\n\n messages.success(request, \"Your message has sent, \\\n we'll get back to you shortly.\")\n else:\n messages.error(request, \"Sorry, your message did not send this time, \\\n please try again.\")\n\n return redirect(reverse('contact'))\n else:\n form = ContactForm()\n\n template = 'contact/contact.html'\n context = {\n 'form': form,\n }\n return render(request, template, context)", "def about():\n return render_template(\"pages/about.html\")", "def contact_linkup(self, request, pk):\n obj_api = api()\n title_contact = \"Tu contacto Linkup\"\n token = request.session['token']\n resp = obj_api.get(slug='sellers/' + pk + \"/\", token=token)\n return render(request, 'frontend/actors/client/my_account.html', {'data_user': resp, \n 'title_contact': title_contact})", "def about():\n\n\treturn render_template(\"about.html\")", "def contact(request):\n\n if request.method == \"POST\":\n contact_form = ContactForm(request.POST)\n\n if contact_form.is_valid():\n contact = contact_form.save(commit=False)\n contact.client = request.user\n contact.save()\n\n messages.success(\n request, \"Thank you, I will assess your order and be in touch via email as soon as possible. If you have any questions please contact me through the Contact Me page.\")\n\n return redirect('profile')\n\n else:\n messages.error(\n request, \"Sorry, your request could not be submitted, please try again\")\n\n else:\n contact_form = ContactForm()\n\n return render(request, 'contact.html', {'contact_form': contact_form})", "def about():\r\n return render_template(\"/home/about.html\")", "def test_contact(self):\n\t\tresponse = self.client.get('/contact')\n\t\tself.assertContains(response, 'Contact', 4, 200)", "def contact(request):\n\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n messageData = {\n 'first_name': form.cleaned_data['first_name'].title,\n 'last_name': form.cleaned_data['last_name'].title,\n 'email': form.cleaned_data['email'],\n 'phone': form.cleaned_data['phone'],\n 'subject': form.cleaned_data['subject'],\n 'message': form.cleaned_data['message'],\n 'companyEmail': settings.DEFAULT_FROM_EMAIL,\n }\n\n subject = render_to_string(\n 'home/emails/subject.txt',\n {'messageData': messageData})\n body = render_to_string(\n 'home/emails/body.txt',\n {'messageData': messageData})\n\n send_mail(\n subject,\n body,\n messageData['email'],\n [messageData['companyEmail']]\n )\n\n if form.cleaned_data['send_copy']:\n subject = render_to_string(\n 'home/emails/subject-copy.txt',\n {'messageData': messageData})\n body = render_to_string(\n 'home/emails/body-copy.txt',\n {'messageData': messageData})\n\n send_mail(\n subject,\n body,\n messageData['companyEmail'],\n [messageData['email']]\n )\n\n context = {\n 'api_key': settings.GOOGLE_MAPS_API_KEY,\n 'form': form,\n }\n messages.success(request, 'Your message has been sent. \\\n We will contact you soon.')\n return redirect('contact')\n\n context = {\n 'api_key': settings.GOOGLE_MAPS_API_KEY,\n 'form': form,\n }\n return render(request, 'home/contact.html', context)", "def menu_contact_author(self, event=None):\n self.parentPanel.contact_author()", "def aboutus():\n return render_template(\"aboutUs.html\")", "def about_page(request):\r\n return render(request, 'ez_main/about_page.html')", "def contacts(request):\n User = get_user_model()\n ids = set(request.user.chatmessage_set.all().values_list(\"recipients\", flat=True))\n context = {\n 'contacts': User.objects.filter(pk__in=ids)\n }\n return render(request, \"chat/contacts.html\", context)", "def about():\n\n return render_template('about_page.html', title='About')", "def aboutus(request):\r\n\treturn render(request, 'aboutus.html')", "def contact():\n if request.method == 'POST':\n send_email()\n return \"\"", "def homepage(self):\n print('-=' * 12 + \" Home Page \" + '-=' * 12)\n self._user.list_contacts()\n options = {1: self.add_contact, 2:self.remove_contact ,3: self.view_contact_chat, 4: self.sign_out, 5: self.exit}\n print_out = \"(1) Add new contact \\n (2) Remove Contact \\n (3) View my chats \\n (4) Sign out \\n (5) Exit\"\n return self._take_option(options, print_out)", "def callview(request):\n return render(request, \"calls/dial_screen.html\", {})", "def about_us(request):\n\treturn render(request, 'about_us.html')" ]
[ "0.8343477", "0.83071643", "0.8217564", "0.8212342", "0.8212342", "0.8208706", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.8185291", "0.81841487", "0.8170303", "0.806876", "0.7832755", "0.7826334", "0.78097415", "0.779785", "0.77798384", "0.7741527", "0.7725631", "0.7716775", "0.76903915", "0.7687783", "0.7653086", "0.7627353", "0.755838", "0.7544549", "0.7544549", "0.7544549", "0.7544549", "0.7474663", "0.7467872", "0.74139005", "0.74138606", "0.73774153", "0.7268939", "0.7252146", "0.72296184", "0.71329397", "0.7132637", "0.712325", "0.69038403", "0.67919743", "0.6702882", "0.6694067", "0.6588862", "0.65069604", "0.6439454", "0.6388359", "0.63786966", "0.63593686", "0.6336598", "0.63228536", "0.62715006", "0.62605333", "0.6230714", "0.6173808", "0.61420345", "0.61383533", "0.6133115", "0.6131662", "0.6121316", "0.61110103", "0.6083097", "0.6070797", "0.6069548", "0.6063139", "0.6049904", "0.6040035", "0.60050243", "0.59814554", "0.5977957", "0.59778374", "0.5975703", "0.59755594", "0.59427613", "0.5940001", "0.5899223", "0.5893989", "0.5889439", "0.588292", "0.5873649", "0.5853969", "0.5853008", "0.58428377", "0.5841549", "0.58349127", "0.5821419", "0.58086056", "0.5796618", "0.5795808", "0.57952833", "0.57795966", "0.5769805", "0.57616144", "0.5761171", "0.5746867" ]
0.0
-1
Renders the about page.
def about(): return render_template( 'about.html', title='About', year=datetime.now().year, message='about page.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def about():\n return render_template('about.html', title='About')", "def about():\n\n return render_template('about_page.html', title='About')", "def about():\n\n\treturn render_template(\"about.html\")", "def on_about(self):\n render_about_window()", "def about():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n\n\treturn render_template('about.html', title='About',\n\t\t\t\t\t\t year=datetime.now().year,\n\t\t\t\t\t\t message='Your application description page.')", "def about():\n\n return render_template('about.html', title=\"About\")", "def about():\n return render_template(\n 'about.html',\n title='About',\n time=datetime.now(),\n message='about me'\n )", "def about():\n return render_template('about.html', title='About')", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='My application description page.'\n )", "def about():\n return render_template(\n 'about.html',\n title='About',\n message='Project Brief.'\n )", "def about():\n\n return render_template('about.html')", "def about():\r\n return render_template(\"/home/about.html\")", "def about():\n return render_template(\n 'about.html',\n title='Tools',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template('about.html')", "def about():\n return render_template(\"pages/about.html\")", "def about(request):\n context = {'client_id': settings.OPENHUMANS_CLIENT_ID,\n 'oh_proj_page': settings.OH_ACTIVITY_PAGE}\n return render(request, 'main/about.html', context=context)", "def about(request):\n assert isinstance(request, HttpRequest)\n\n return render(\n request,\n 'AscensionESports_Baseline/about.html',\n {\n 'background': getSiteBackground(),\n 'color': getSiteColor(),\n 'title':'About Us',\n 'year': datetime.now().year,\n }\n )", "def about():\n return render_template('about.html', name=\"COMP3161\")", "def about():\n return render_template(\"about.html\")", "def about():\n return render_template(\"about.html\")", "def about_page(request):\r\n return render(request, 'ez_main/about_page.html')", "def about():\n return render_template(\n 'about.html',\n nav=nav,\n title='About this site',\n year=datetime.now().year,\n message='This site shows different applications of Flask'\n )", "def about(self) -> None:\n self._impl.show_about_dialog()", "def about():\n return render_template('about.html', name=\"Nadrine Simms\")", "def aboutAction(self):\n about = UI_about.UI_about()\n about.exec_()\n\n return", "def about_us():\n\n return render_template('about_us.html')", "def about_page(request):\n\n return render(request, 'about_page/about_page.html')", "def about(request):\n return render_template('core/about.html')", "def about_us():\r\n return render_template(\"about_us.html\")", "def about_us():\n return render_template('home/about-us.html')", "def about(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'about.html',\n context_instance=RequestContext(request, {})\n )", "def about(request):\n # Test Comment\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'about.html',\n context_instance=RequestContext(request, {})\n )", "def about(self, widget):\n self.about_dialog.show()", "def about(request):\n context = {}\n return render(request, 'store/about.html', context)", "def about(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/about.html',\r\n context = \r\n {\r\n 'title':_('About'),\r\n 'year':datetime.now().year,\r\n }\r\n )", "def about(request):\n context = get_context()\n return render(request, 'about.html', context)", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n context_instance = RequestContext(request,\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n })\n )", "def aboutus(request):\r\n\treturn render(request, 'aboutus.html')", "def about():\n return render_template(\n 'about.jade',\n title='About',\n year=datetime.now().year,\n repository_name=repository.name,\n )", "def about():\n\n return render_template('about.html',\n title='关于',\n year=datetime.now().year,\n message='我想说的一些话')", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'csa/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )", "def on_about(self, event):\n pass", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title': 'About',\n 'message': 'Your application description page.',\n 'year': datetime.now().year,\n 'logged_user': request.session.get('user')\n }\n )", "def show_about(self):\n\n msg = f\"Program: {__program__}\\nVersion: {__version__}\\nDate: {__date__}\"\n self._message_information(\"About\", msg)", "def about_us(request):\n\treturn render(request, 'about_us.html')", "def aboutme():\n return render_template('about.html')", "def about():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n if check_authentication(session_id, user_id):\n return render_template('about.html', user=user_id, session_id=session_id)\n else:\n return render_template('about.html')", "def aboutus():\n return render_template(\"aboutUs.html\")", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n }\n )", "def about(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/about.html',\r\n {\r\n 'title':'About',\r\n 'message':'Your application description page.',\r\n 'year':datetime.now().year,\r\n }\r\n )", "def about(request):\n context = {\n\n }\n template = loader.get_template('about.html')\n return HttpResponse(template.render(context, request))", "def about(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/about.html',\r\n context_instance = RequestContext(request,\r\n {\r\n 'title': 'About',\r\n 'message': 'Your application description page.',\r\n 'year': datetime.now().year,\r\n 'contact': Contact.objects.get(pk=1),\r\n\r\n })\r\n )", "def about(request):\n return render(request, \"about.html\", {})", "def about():\n global version\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Loughton Languages suite',\n version=version\n )", "def about(request):\n return render(request, 'wantedly_app/about.html')", "def about(request):\n return render(request, 'code_challenge/about.html', {})", "def about(request):\n\n return render(request, 'home/about.html')", "def about() -> Any:\n return render_template(\"about.html\")", "def about(request):\n return render(request, \"about/about.html\")", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'about.html',\n { 'is_longdale_user': user_is_ingroup(request, 'longdale_user'),\n 'title':'About',\n 'message':'Radboud University CESAR utility.',\n 'year':datetime.now().year,\n }\n )", "def about(request):\n return render(request, 'about.html')", "def about(request):\n return render(request, \"about.html\")", "def get(self):\n WriteTemplate(self.response, 'about.html', {})", "def about(request):\n\n context = {\n\n }\n\n return render(request, 'about.html', context=context)", "def about(request):\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n context_instance=RequestContext(request,\n {\n 'title': 'See how you have voted and how the envoys did.',\n 'message': 'Votes.',\n 'year': datetime.now().year,\n })\n )", "def about():\n return render_template('about.html', name=\"Mary Jane\")", "def about():\n return render_template('about.html', name=\"Mary Jane\")", "def about():\n return render_template('about.html', name=\"Mary Jane\")", "def onAbout(self, event):\n sc.showAboutWindow(self)", "async def about(self, ctx):\n self.log_command_call(\"about\", ctx.message)\n embed = create_embed(description=ABOUT_TEXT)\n await ctx.send(embed=embed)", "def about(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/about.html',\r\n {\r\n 'title':'Hakkimizda',\r\n 'year':datetime.now().year,\r\n }\r\n )", "def about(request):\r\n assert isinstance(request, HttpRequest)\r\n return render(\r\n request,\r\n 'app/about.html',\r\n {\r\n 'title':'关于',\r\n 'message':'易书网使用说明',\r\n 'year':datetime.now().year,\r\n }\r\n )", "def onShowAbout(self, event):\r\n\t\tself.AboutDialog.show()", "def about(request):\n assert isinstance(request, HttpRequest)\n Description = models.AboutUs.objects.all()\n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n 'Description': Description\n \n }\n )", "async def about(ctx):\n await ctx.send(get_about())" ]
[ "0.85650355", "0.85034525", "0.84489655", "0.84044397", "0.82935613", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82709616", "0.82680845", "0.8243358", "0.8239194", "0.8224137", "0.82079816", "0.8204868", "0.8173699", "0.8172736", "0.80967313", "0.80501616", "0.80501616", "0.80501616", "0.80501616", "0.80501616", "0.80501616", "0.80501616", "0.80501616", "0.80501616", "0.8038504", "0.803304", "0.8021883", "0.8000992", "0.7984036", "0.7984036", "0.7973993", "0.79610586", "0.7939532", "0.7935028", "0.79308105", "0.7845368", "0.78225416", "0.7812546", "0.7800322", "0.7799685", "0.7796368", "0.7796368", "0.7792524", "0.7790704", "0.77900374", "0.77592456", "0.7738024", "0.7720711", "0.7702098", "0.7699647", "0.7697684", "0.7696568", "0.76937646", "0.76879525", "0.76801294", "0.7662798", "0.7636956", "0.7614703", "0.7612688", "0.7612688", "0.7612688", "0.7612688", "0.7608596", "0.76054096", "0.7597973", "0.7593886", "0.7592873", "0.7577847", "0.757313", "0.7548597", "0.7534229", "0.7484951", "0.7464537", "0.7456734", "0.74555165", "0.74472183", "0.74320763", "0.74320537", "0.741428", "0.741428", "0.741428", "0.74014294", "0.734481", "0.7298118", "0.729211", "0.7213844", "0.71754843", "0.7126521" ]
0.8394454
4
The name to use for logging
def _log_name(): return os.path.splitext(os.path.basename(__file__))[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logger_name(self):\n return self.__class__.__name__", "def log_stream_name(self) -> str:\n ...", "def log_name(self) -> Optional[str]:\n return self._log_name", "def logPrefix(self):\n return self.__class__.__name__", "def log_group_name(self) -> str:\n ...", "def logTestName(self):\n logging.info('%s', self.id())", "def logger_name( self ):\n return Constants.LogKeys.steps", "def get_name(self):\n return \"{0}: \".format(self.__class__.__name__)", "def set_file_name(self):\n name = 'LogImage'\n name_log_date = time.strftime(\"%Y%m%d\")\n self.name_log = name + name_log_date + '.log'", "def rename(self, name):\n self._name = name\n self._logger = logging.getLogger(name)\n self._logger.setLevel(self._level)", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def name(self):\n pass", "def name(self):\r\n pass", "def logging_id(self) -> str:\n return getattr(self, '_logging_id_', self.__class__.__qualname__)", "def log_stream_name(self) -> str:\n return jsii.get(self, \"logStreamName\")", "def log_stream_name(self) -> str:\n return jsii.get(self, \"logStreamName\")", "def name(self) -> str: # pragma: no cover", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def name(self) -> str:\n pass", "def get_name():\n return __name__", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def name():\n pass", "def name():\n pass", "def function_name(func):\n return log(level=\"info\", message=_function_name(func))", "def _get_logname(self,seq):\n return self.home,\"..xaction.%s.log\"%seq", "def name(self):\n ...", "def getName(self):\r\n return self.__name__", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self):\n return f\"{self._name}_{self._sensor}\"", "def get_log_tag(self):\n return \"{}::{}- \".format(self.__class__.__name__,\n str(inspect.stack()[1][3]))", "def logname():\n global _basename\n \n parent = os.path.splitext(os.path.basename(wheresdaddy()))[0]\n return '.'.join([_basename, os.path.splitext(os.path.basename(sys.argv[0]))[0], parent])", "def name(self):\n return self.__name__", "def name() -> str:\n pass", "def get(name):\r\n log = logging.getLogger(\"%s.%s\" % (ROOT_NAME, name))\r\n return log", "def name(self):\n return f\"{self._name} {self._sensor_name}\"", "def __str__(self) -> str:\n\t\treturn self.name if self != logging.CRITICAL else \"FATAL\"", "def name ( self ) :\n return self.__name if self.__name else ''", "def log_group_name(self):\n return self._get_param(CW_LOGS_CFN_PARAM_NAME)", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def get_name(self):\n pass", "def get_name(self):\n pass", "def log():\n return logging.getLogger(__name__)", "def __init__(self, name=None):\r\n self.log = {}\r\n self.name = name", "def logger(self):\n pass", "def get_name(self):\n return self.__name", "def label(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"label\"))\r\n return self._name", "def name(cls) -> str:\n return super().name", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def logger_format(self) -> str:\n\t\treturn ('%(asctime) -19s | %(levelname) -8s | %(threadName) -10s | '\n\t\t\t\t'%(funcName) -16s | %(message)s')", "def name(self):\n raise NotImplementedError # pragma: no cover", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name" ]
[ "0.82156974", "0.7782969", "0.77613235", "0.7558219", "0.7540767", "0.74446285", "0.73443824", "0.7230502", "0.7157241", "0.7145923", "0.7061045", "0.7061045", "0.6993713", "0.6953699", "0.6931624", "0.6909159", "0.6909159", "0.68735456", "0.6843457", "0.6843457", "0.6843457", "0.6843457", "0.68325305", "0.68240017", "0.68240017", "0.68110853", "0.68110853", "0.6807072", "0.6791704", "0.6780531", "0.67604506", "0.67601496", "0.67601496", "0.67601496", "0.67601496", "0.67601496", "0.6755488", "0.6748481", "0.6742853", "0.6741444", "0.6732399", "0.67210746", "0.6716376", "0.66970384", "0.6693455", "0.6693448", "0.66838324", "0.66838324", "0.66838324", "0.66838324", "0.66838324", "0.66838324", "0.66714334", "0.66714334", "0.6668574", "0.6661524", "0.6647493", "0.66363615", "0.66263306", "0.6624509", "0.661768", "0.661768", "0.661768", "0.661768", "0.6611592", "0.66069734", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946", "0.65944946" ]
0.76744807
3
Compute path of file relative to this module.
def _sibling_path(name): here = os.path.dirname(os.path.join(os.getcwd(), __file__)) return os.path.normpath(os.path.join(here, name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_relative_pathname(self):\n return os.path.join(Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def file_path(self):\n return posixpath.dirname(self.file_name)", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def relative_path(__file__, path):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), path))", "def compute_path(file: mesonlib.FileOrString) -> str:\n if isinstance(file, File):\n return file.absolute_path(self.source_dir, self.build_dir)\n return os.path.normpath(os.path.join(self.build_dir, file))", "def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)", "def get_relative_path(self, file_path):\n file_path = os.path.abspath(file_path)\n if self.base_dir is not None:\n file_path = file_path.replace(os.path.abspath(self.base_dir), \"\")\n assert file_path[0] == \"/\"\n file_path = file_path[1:]\n return file_path", "def path(self):\n return os.path.dirname(os.path.abspath(self._filename))", "def file_path(self):\n return self.lib.file_path", "def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))", "def get_file_path(self):\n if self.path[0] in self._simulation_data.mfpath.model_relative_path:\n return os.path.join(\n self._simulation_data.mfpath.get_model_path(self.path[0]),\n self._filename,\n )\n else:\n return os.path.join(\n self._simulation_data.mfpath.get_sim_path(), self._filename\n )", "def get_path(self, project_file=None):\n root = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..')\n )\n if project_file:\n return os.path.join(root, project_file)\n else:\n return root", "def path(cls, relpath=None):\r\n base = os.getcwd() if not ParseContext._active else cls.locate().current_buildfile.parent_path\r\n return os.path.abspath(os.path.join(base, relpath) if relpath else base)", "def _abs_path(fn):\n return os.path.join(os.path.dirname(__file__), fn)", "def get_current_module_path(module_path, relative_path=\"\"):\n base_path = os.path.dirname(module_path)\n file_path = os.path.join(base_path, relative_path)\n file_path = os.path.normpath(file_path)\n\n return file_path", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)", "def path(self) -> str:\n return self.src + \"/\"", "def __relative_path(self, p4file):\n return self.ctx.depot_path(p4file.depot_path).to_gwt()", "def get_file_path(self):\n if self.file_path is None:\n return None\n if self.file_path.endswith('.pyc'):\n return self.file_path[:-1]\n return self.file_path", "def absolute_physical_path(self) -> str:\n return self._path", "def get_relative_path(self):\n if self.dip or self.sip or self.replica:\n raise PackageError(\n \"Get relative path for sip or replica packages not yet implemented\"\n )\n if self.deleted:\n raise PackageError(\"There are no relative paths for deleted packages\")\n if self.uuid is None:\n raise PackageError(\"Cannot generate a relative path without a package UUID\")\n rel = \"\"\n left_offset = len(self.default_pair_tree)\n right_offset = -len(self.compressed_ext)\n try:\n if self.current_path.endswith(self.compressed_ext):\n rel = self.current_path[left_offset:right_offset]\n else:\n rel = self.current_path[left_offset:]\n except AttributeError:\n raise PackageError(\"Current path doesn't exist for the package\")\n return \"{}/data/METS.{}.xml\".format(rel, self.uuid)", "def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))", "def _rel_path(fn):\n return os.path.join('./eng-edu/ml/cc/src', fn)", "def rel_path(self):\n return \"{}/{}\".format(Path(self.dir_path).basename, self.index_file)", "def fpath(self):\n return os.path.join(self.path, self.name)", "def _file_path(self, file: str) -> str:\n return os.path.abspath(f\"tests/resources/{file}\")", "def build_relpath(self):\n return join_path(\"..\", self.build_dirname)", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def file_path() -> str:\n stack_t = inspect.stack()\n ins = inspect.getframeinfo(stack_t[1][0])\n return os.path.abspath(ins.filename)", "def relative_path(filename):\n length = len(os.path.abspath(DOC_BUILD_DIR)) + 1\n return os.path.abspath(filename)[length:]", "def relpath(filename):\n return os.path.join(os.path.dirname(__file__), filename)", "def get_path(relative_path=None):\n\n root_path = os.path.dirname(os.path.dirname(__file__))\n\n if relative_path is None:\n return root_path\n else:\n return os.path.abspath(os.path.join(root_path, relative_path))", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def relative_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"relative_path\")", "def module_path():\n try:\n this_file_path = __file__\n except NameError:\n # inside an interpreter, we can use the stack to find the file\n # path.\n tbs = traceback.extract_stack()\n this_file_path = tbs[0][0]\n # move back up to rfm directory\n dev_root = os.path.dirname(this_file_path)\n\n return dev_root", "def get_path(self):\n raise NotImplementedError(\"This asset does not support absolute paths\")", "def path(x):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), x))", "def file_path(self) -> global___Expression:", "def inputpathrel(self):\n if self.config:\n relpath = os.path.relpath(self.inputpath, self.config.workingdir)\n\n if relpath.startswith(\"../\"):\n return self.inputpath\n\n else:\n return relpath\n\n return self.inputpath", "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "def path(self):\n return self.file_path()", "def module_path(self):\n return self.config['cwd']", "def get_relative_path(self):\n return urlparse(self.browser.current_url).path", "def relpath(filename):\n\n return os.path.join(os.path.dirname(__file__), filename)", "def get_relative_path(path: str):\n return os.path.relpath(path, get_project_root())", "def get_relative_regression_path(cls) -> str:\n # Get the fully-qualified name of the subject (in dotted form)\n fully_qualified_name: str = cls.subject_type().__module__ + '.' + cls.subject_type().__qualname__\n\n # Replace the dots with platform-dependent slashes\n return fully_qualified_name.replace(\".\", os.sep)", "def get_file_path(path_from_module, file_name):\n if not path.exists(path_from_module):\n makedirs(path_from_module)\n\n fn = path.realpath(path.join(getcwd(), path.dirname(__file__))).split(\"/src/\")[0]\n return \"{0}/{1}/{2}\".format(fn, path_from_module, file_name)", "def script_path(filename):\n import os\n\n filepath = os.path.join(os.path.dirname(__file__))\n return os.path.join(filepath, filename)", "def path_for_import(name):\n return os.path.dirname(os.path.abspath(import_module(name).__file__))", "def _get_module_path():\n\n return os.path.dirname(os.path.realpath(__file__))", "def get_file_path(self):\n return self._file_path", "def get_path(data_path):\n\treturn os.path.dirname(os.path.realpath(__file__)) + os.sep + data_path", "def full_path(self):\n return os.path.abspath(self.path)", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def source_file_path(self) -> str:\n return self._source_file_path", "def get_abs_path(filename):\n dirname = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(dirname, filename)", "def module_directory(file_path):\n return os.path.dirname(os.path.realpath(file_path))", "def getAbsolutePath(relPath):\n currDir = os.path.dirname(__file__)\n return os.path.join(currDir, relPath)", "def get_absolute_pathname(self):\n return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())", "def _get_relative_path(self, abs_path):\r\n relative_path = os.path.relpath(abs_path, settings.PROJECT_ROOT)\r\n return relative_path", "def execution_path(self, filename):\n return os.path.join(os.path.dirname(inspect.getfile(sys._getframe(0))), filename)", "def get_module_path(module):\n return pathlib.Path(os.path.dirname(os.path.abspath(inspect.getfile(module))))", "def full_path(filename):\n\timport os.path\n\tfolder = os.path.dirname(os.path.realpath(__file__))\n\treturn os.path.join(folder, filename)", "def _getAbsolutePath(self, filename):\n\n # find the correct path, in the experiment file they are either\n # relative to the experiment file, or an absolute path\n if filename != os.path.abspath(filename):\n return os.path.join(self._path, filename)\n else:\n return filename", "def get_relative_source_path(self, source_path=None):\r\n if not source_path:\r\n source_path = self.source_path\r\n if source_path is None:\r\n return None\r\n\r\n return os.path.relpath(\r\n os.path.abspath(os.path.join(self.settings['PATH'], source_path)),\r\n os.path.abspath(self.settings['PATH'])\r\n )", "def absPath(path):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)", "def get_file_path(filename, path='Data/'):\n path= os.path.abspath(os.path.dirname(path))\n return os.path.join(path, filename)", "def relative(self, path):\n return re.sub(self.path_regex, '', path).lstrip(os.sep)", "def path(filename: str) -> str:\n path = os.path.dirname(sys.argv[0])\n if not path:\n path = '.'\n return path + '/' + filename", "def get_abs_path(file_path, relative_path):\n import os\n dir_path = os.path.dirname(file_path)\n abs_path = os.path.join(dir_path, relative_path)\n return abs_path", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._R_script)", "def path():\n # Exclude path to this script from path.\n this_file = os.path.realpath(__file__)\n this_path = os.path.dirname(this_file)\n return os.pathsep.join(p for p in sys.path if p != this_path)", "def get_relative_source_path(self, source_path=None):\n if not source_path:\n source_path = self.source_path\n if source_path is None:\n return None\n\n return posixize_path(\n os.path.relpath(\n os.path.abspath(os.path.join(\n self.settings['PATH'],\n source_path)),\n os.path.abspath(self.settings['PATH'])\n ))", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def get_base_dir(self):\n dir_of_this_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.dirname(dir_of_this_file)", "def get_location(self):\n return os.path.dirname(self.filename)", "def get_relative_path(self, source: str) -> str:\n abs_top_level_dir = os.path.normcase(\n os.path.normpath(self.get_top_level_directory()))\n abs_working_dir = os.path.normcase(\n os.path.normpath(os.path.join(os.getcwd(), source)))\n\n if not abs_working_dir.startswith(abs_top_level_dir):\n logger.debug(\n \"Repository top level directory is '{}'. Specified working directory is '{}'\".format(\n abs_top_level_dir, {abs_working_dir}))\n raise Exception(\n \"Experiment file is not inside current \"\n + self.get_type() + \" directory.\")\n\n result = abs_working_dir.replace(abs_top_level_dir, \"\")\n return self.norm_to_posix_path(result)", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def _abs_path(rel_path):\n return os.path.join(BASE_DIR, rel_path)", "def _get_resource_path(filename, path=Path.TEST):\n return os.path.normpath(os.path.join(path.value, filename))", "def get_filename(cls):\n return osp.join(cls.dir_location, *cls.file_path)", "def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")", "def getCurrentFilePath(self):\n return os.path.abspath(self.filePath)", "def _get_rel_path(self, file_path: Union[str, os.PathLike]) -> Optional[str]:\n file_path = Path(file_path).absolute()\n try:\n # use os.path.relpath instead of Path.relative_to in case file_path is not a child of self.base_path\n return os.path.relpath(file_path, self.base_path)\n except ValueError:\n # 2 paths are on different drives\n return None", "def file_path_short(self):\r\n if not hasattr(self, '_file_path_short'):\r\n if self.file_path:\r\n result = None\r\n\r\n for path in sys.path:\r\n candidate = os.path.relpath(self.file_path, path)\r\n if not result or (len(candidate.split('/')) < len(result.split('/'))):\r\n result = candidate\r\n\r\n self._file_path_short = result\r\n else: \r\n self._file_path_short = None\r\n\r\n return self._file_path_short", "def root_rel_path(self):\n return os.path.dirname(self.image.name)", "def get_file_path(filename):\n here_dir = os.path.dirname(os.path.abspath(__file__))\n file_dir = os.path.join(here_dir, \"../data/\", filename)\n\n return file_dir", "def get_full_path(self):\n try:\n full_path = os.path.abspath(self.FILENAME)\n return full_path\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")", "def get_file_path_in_project_directory(filename): \n DIR = os.path.dirname(os.path.abspath(\"__file__\")) \n path = os.path.join(DIR, filename)\n return path", "def get_path_relative_to_http_root(file_path):\n return os.path.relpath(file_path, get_http_path_prefix())", "def _filepath(self, filename):\n return os.path.join(self.root, self.version, filename)" ]
[ "0.7948632", "0.7901577", "0.78639334", "0.77487487", "0.77206314", "0.769371", "0.76692975", "0.7637351", "0.76340103", "0.75847465", "0.7584325", "0.75806475", "0.756804", "0.7539383", "0.74923253", "0.7433047", "0.742732", "0.7422048", "0.7402005", "0.73825824", "0.73681325", "0.7363147", "0.735968", "0.7355458", "0.73527104", "0.73331225", "0.731029", "0.730109", "0.7294129", "0.7291503", "0.7289132", "0.728504", "0.72757185", "0.72618365", "0.7259643", "0.7259095", "0.7228336", "0.7224706", "0.7224706", "0.7224706", "0.7224706", "0.7224706", "0.72187954", "0.7210427", "0.7209531", "0.72091645", "0.71984875", "0.7192329", "0.71842736", "0.71827155", "0.7172395", "0.7162321", "0.7151912", "0.71476936", "0.71429354", "0.7142235", "0.71252203", "0.7125171", "0.71249306", "0.7122606", "0.711771", "0.71058506", "0.7102951", "0.70819026", "0.7070204", "0.7062232", "0.7045992", "0.70450133", "0.7044805", "0.7034705", "0.70317256", "0.70233023", "0.70201886", "0.70199764", "0.7015574", "0.7015574", "0.70030487", "0.69987726", "0.69853055", "0.6983837", "0.6980616", "0.69733137", "0.6969328", "0.6965213", "0.6961649", "0.6958901", "0.6945022", "0.6944018", "0.69382817", "0.69240373", "0.69178593", "0.6907144", "0.6888326", "0.68778783", "0.68754727", "0.68720907", "0.687134", "0.68662816", "0.6866251", "0.686287", "0.6859303" ]
0.0
-1
Determine if a sysfs_gpu_name file indicates an AMD device
def _is_amd(sysfs_gpu_name): with open(sysfs_gpu_name) as src: return src.read().strip() == 'amdgpu'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def is_system_usable_block_device(pydev_device):\n if pydev_device.get(\"ID_BUS\") == \"usb\":\n # Skip USB devices\n return False\n if pydev_device.get(\"DM_VG_NAME\") or pydev_device.get(\"DM_LV_NAME\"):\n # Skip LVM devices\n return False\n if constants.DEVICE_NAME_MPATH in pydev_device.get(\"DM_NAME\", \"\") and pydev_device.get(\"DM_PART\", \"\"):\n # Skip mpath partition devices\n return False\n if pydev_device.get(\"ID_FS_TYPE\") == constants.DEVICE_FS_TYPE_MPATH:\n # Skip mpath member devices\n return False\n id_path = pydev_device.get(\"ID_PATH\", \"\")\n if \"iqn.\" in id_path or \"eui.\" in id_path:\n # Skip all iSCSI devices, they are links for volume storage.\n # As per https://www.ietf.org/rfc/rfc3721.txt, \"iqn.\" or \"edu.\"\n # have to be present when constructing iSCSI names.\n return False\n if ((\"-fc-\" in id_path or \"-lun-\" in id_path) and\n is_valid_multipath(pydev_device.get('DEVNAME'))):\n return False\n if pydev_device.get(\"ID_VENDOR\") == constants.VENDOR_ID_LIO:\n # LIO devices are iSCSI, should be skipped above!\n LOG.error(\"Invalid id_path. Device %s (%s) is iSCSI!\" %\n (id_path, pydev_device.get('DEVNAME')))\n return False\n return True", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def is_gpu_device(self, device):\n return device in self._gpu_devices", "def is_cuda_device(device):\n\treturn 'cuda' in str(device)", "def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)", "def _amd_index(sysfs_gpu_name):\n drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):]\n return drop_prefix.split('/')[0]", "def is_cambrionix(device_dict):\n return device_dict.get('_name') in usb_config.CAMBRIONIX_NAMES", "def gpu_availability():\n # assume if using tensorflow-gpu, then Nvidia GPU is available\n if is_built_with_cuda():\n return len(tf.config.list_physical_devices(\"GPU\")) > 0\n else:\n return False", "def isa(device_name):\n\n if not device_name:\n raise DmDeviceError(_(\"No device name given.\"))\n if device_name != os.path.basename(device_name):\n msg = _(\"Invalid device name %r given.\") % (device_name)\n raise DmDeviceError(msg)\n\n bd_dir = os.sep + os.path.join('sys', 'block', device_name)\n if not os.path.exists(bd_dir):\n return False\n\n dm_dir = os.path.join(bd_dir, 'dm')\n if not os.path.exists(dm_dir):\n return False\n\n return True", "def test_change_name_of_the_devicefalse():", "def _IsDevice(self, file_attribute_flags):\n if file_attribute_flags is None:\n return False\n return bool(file_attribute_flags & pyfsntfs.file_attribute_flags.DEVICE)", "def is_booted_storage_device(disk):\n cmdline = (\"grep -w /ahcexport /proc/mounts | cut -d ' ' -f 1 | \"\n \"sed -e 's/[0-9]*//g'\")\n if '/dev/' not in disk:\n disk = '/dev/%s' % disk\n grep_cmd = subprocess.Popen(cmdline,\n shell=True, stdout=subprocess.PIPE)\n for booted_disk in grep_cmd.stdout:\n booted_disk = booted_disk.decode(errors='ignore')\n booted_disk = booted_disk.rstrip('\\n').strip()\n if booted_disk == disk:\n return True\n return False", "def find_iio_device_name(self):\n self.iio_device_dir()\n self.console.runcmd(f\"cat name\", expected=\"\\r\\n\")\n iio_device_name = self.console.output()\n return iio_device_name", "def test_MCE_sysfs_initialized(self):\n num_of_mc_folders = self.get_num_of_mc_folders()\n code, num_cpus, err = systeminfo.Run([\"nproc\"])\n if int(num_of_mc_folders) == int(num_cpus):\n self.log.info(\"MCE sysfs device initialization successful\")\n else:\n self.fail(\"MCE sysfs device initialization failed\")", "def _on_gpu(self) -> bool:\n return self._current_device_index != CPU_INDEX", "def additional_capability_gpu_drivers_installed(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"additional_capability_gpu_drivers_installed\")", "def _get_available_gpus():\r\n #global _LOCAL_DEVICES\r\n if tfback._LOCAL_DEVICES is None:\r\n devices = tf.config.list_logical_devices()\r\n tfback._LOCAL_DEVICES = [x.name for x in devices]\r\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def is_filesystem_enabled(dbapi, host_id_or_uuid, fs_name):\n filesystems = dbapi.host_fs_get_by_ihost(host_id_or_uuid)\n for fs in filesystems:\n if fs.name == fs_name:\n return True\n return False", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tf_back._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tf_back._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tf_back._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_gpu_names() -> Sequence[str]:\n result = []\n for device in device_lib.list_local_devices():\n if device.device_type != \"GPU\":\n continue\n desc = device.physical_device_desc\n\n fields = desc.split(\",\")\n for field in fields:\n name, value = field.split(\":\", maxsplit=1)\n name = name.strip()\n value = value.strip()\n if name == \"name\":\n result.append(value)\n return result", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def GetGPU():\n return option['device_id']", "def _get_device_id() -> str:\n with open(\"/proc/cpuinfo\", \"r\") as f:\n for line in f.readlines():\n if line.startswith('Serial'):\n return line.split(':')[1].strip()\n return 'N/A'", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def export_gpu(entity=None): \n\tstatus = False \n\texportGrp = config.geoGrp\n\tres = entity.task_res()\n\tlibPath = entity.libPath()\n\n\tif res: \n\t\tabcName = entity.libName(config.libName.get('gpu'), res, ext='abc')\n\n\t\t# name without ext \n\t\tbasename = os.path.splitext(abcName)[0]\n\t\t\n\t\tgpuName = '{0}/{1}'.format(libPath, abcName)\n\n\t\tstart = pub_utils.file_time(gpuName)\n\n\t\t# export GPU command \n\t\tresult = maya_utils.exportGPUCacheGrp(exportGrp, libPath, basename, time='still')\n\t\t\n\t\tend = pub_utils.file_time(gpuName)\n\t\tsuccess = pub_utils.is_file_new(start, end)\n\n\t\tif success: \n\t\t\treturn True, 'Success %s' % gpuName\n\n\t\telse: \n\t\t\treturn False, 'Failed to export Gpu %s' % gpuName\n\n\telse: \n\t\treturn False, 'No res found'", "def test_change_name_of_the_devicetrue():", "def isOnNao():\n szCpuInfo = \"/proc/cpuinfo\";\n if not os.path.exists( szCpuInfo ): # already done by the getFileContents\n return False;\n szAllFile = getFileContents( szCpuInfo, bQuiet = True );\n if( szAllFile.find( \"Geode\" ) == -1 and szAllFile.find( \"Intel(R) Atom(TM)\" ) == -1 ):\n return False;\n return True;", "def check_gpu(self, values):\n try:\n process = subprocess.Popen(['nvidia-smi', '--query-gpu=name,pci.bus_id,driver_version,pstate,pcie.link.gen.max,pcie.link.gen.current,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used', '--format=csv'], stdout=subprocess.PIPE)\n out_str, _ = process.communicate()\n gpu_strs = out_str.split('\\n')\n\n # Get rid of the column headers.\n if len(gpu_strs) > 0:\n gpu_strs = gpu_strs[1:]\n\n # Process each GPU string.\n multi_gpu = len(gpu_strs) > 1\n gpu_index = 1\n for gpu_str in gpu_strs:\n out = gpu_str.split(',')\n if len(out) > 1:\n if multi_gpu:\n values[keys.KEY_GPUX_NAME.replace('X', str(gpu_index))] = out[0].strip(' \\t\\n\\r')\n values[keys.KEY_GPUX_TEMPERATURE.replace('X', str(gpu_index))] = int(out[6].strip(' \\t\\n\\r'))\n values[keys.KEY_GPUX_PERCENT.replace('X', str(gpu_index))] = int(out[7].strip(' \\t\\n\\r%%s'))\n gpu_index = gpu_index + 1\n else:\n values[keys.KEY_GPU_NAME] = out[0].strip(' \\t\\n\\r')\n values[keys.KEY_GPU_TEMPERATURE] = int(out[6].strip(' \\t\\n\\r'))\n values[keys.KEY_GPU_PERCENT] = int(out[7].strip(' \\t\\n\\r%%s'))\n except:\n logging.error(\"Error collecting GPU stats.\")", "def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False", "def check_kernel_module(params) -> None:\n if os.system(\"lsmod | grep v4l2loopback >/dev/null 2>&1\") == 0:\n print(\"Kernel module is loaded\")\n else:\n print(\"Kernel module is NOT loaded\")", "def is_dev_name_valid(self):\n return self._name_re.match(self.dev_name) is not None", "def is_gpu_available():\n ret = get_gpu_count() > 0\n if _HAS_PADDLE:\n import paddle\n if ret is True and not paddle.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n if _HAS_FLUID:\n from paddle import fluid\n if ret is True and not fluid.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n return ret", "def has_disk_dev(mapping, disk_dev):\n\n for disk in mapping:\n info = mapping[disk]\n if info['dev'] == disk_dev:\n return True\n return False", "def is_gpu_available() -> bool:\n return torch.cuda.is_available()", "def _get_available_gpus():\n global _LOCAL_DEVICES\n if _LOCAL_DEVICES is None:\n if _is_tf_1():\n devices = get_session().list_devices()\n _LOCAL_DEVICES = [x.name for x in devices]\n else:\n _LOCAL_DEVICES = tf.config.experimental_list_devices()\n return [x for x in _LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def dev_is_ssd(dev):\n\n dev = proc_dev_to_sysfs_dev(dev)\n try:\n with open('/sys/block/{}/queue/rotational'.format(dev)) as typefd:\n return int(typefd.read()) == 0\n except IOError:\n print 'UNKNOWN: unable to read device type'\n sys.exit(NAGIOS_UNKNOWN)", "def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False", "def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False", "def try_gpu(i=0): #@save\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')", "def hwinfo(device):\n base = os.path.basename(device)\n if os.path.exists('/sys/class/tty/%s/device' % (base,)):\n # PCI based devices\n sys_id_path = '/sys/class/tty/%s/device/id' % (base,)\n if os.path.exists(sys_id_path):\n return read_line(sys_id_path)\n # USB-Serial devices\n sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)\n if os.path.exists(sys_dev_path):\n sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))\n return usb_sysfs_hw_string(sys_usb)\n # USB-CDC devices\n if base.startswith('ttyACM'):\n sys_dev_path = '/sys/class/tty/%s/device' % (base,)\n if os.path.exists(sys_dev_path):\n return usb_sysfs_hw_string(sys_dev_path + '/..')\n return 'n/a' # XXX directly remove these from the list?", "def get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == \"GPU\"]", "def non_root_available(self):\n return self._adb_available and self._dev_emu", "def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0", "def check_fs(uuid):\n out, err = run_cmd(['lsblk', '-o', 'UUID,FSTYPE', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('uuid', blkdevice) and blkdevice['uuid'] == uuid:\n return blkdevice['fstype']", "def detect_sap_hana():\n if os.path.exists(HANA_BASE_PATH):\n for entry in os.listdir(HANA_BASE_PATH):\n # Does /hana/shared/{entry}/exe/linuxx86_64/hdb/sapcontrol exist?\n sap_on_intel = os.path.exists(os.path.join(HANA_BASE_PATH, entry, HANA_SAPCONTROL_PATH_X86_64))\n sap_on_power = os.path.exists(os.path.join(HANA_BASE_PATH, entry, HANA_SAPCONTROL_PATH_PPC64LE))\n if sap_on_intel or sap_on_power:\n return True\n return False", "def get_device_str(device_id, num_gpus):\n if num_gpus == 0:\n return \"/cpu:0\"\n device_str_output = \"/gpu:%d\" % (device_id % num_gpus)\n return device_str_output", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", "def get_available_devices():\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return [0]\n\n FNULL = open(os.devnull, 'w')\n\n available_devices = []\n for i in range(num_devices):\n try:\n if b\"NVIDIA\" in subprocess.check_output(\n [\"{}/test_device\".format(executable_path),\n str(i)], stderr=FNULL):\n available_devices.append(i)\n logging.info('Device {} is available for rendering'.format(i))\n except subprocess.CalledProcessError as e:\n logging.info(e)\n logging.info('Device {} is not available for rendering'.format(i))\n FNULL.close()\n\n return available_devices", "def device_info(devid: int = 0) -> str: # pragma: no cover\n numdev = jax.device_count()\n if devid >= numdev:\n raise RuntimeError(f\"Requested information for device {devid} but only {numdev} present.\")\n dev = jax.devices()[devid]\n if dev.platform == \"cpu\":\n info = \"CPU\"\n else:\n info = f\"{dev.platform.upper()} ({dev.device_kind})\"\n return info", "def system_valid(self):\n return self.udev.devices_exist", "def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)", "def is_system(self) -> bool:", "def usb_mode() -> str:", "def checkargs(argv):\n\n if len(argv) < 2:\n usage()\n return 2\n\n for name in argv[1:]:\n if not os.path.exists(IIO_DEVICES_SYSPATH + \"/\" + name):\n print(name, \": IIO device (or accelerometer) not found\")\n return 1\n return 0", "def get_gpu_stats(self):\n\t\tif which('nvidia-smi') is None:\n\t\t\treturn {'available': False}\n\t\t\n\t\ttry:\n\t\t\tcommand = ['nvidia-smi', '-q', '-x']\n\t\t\tresponse = subprocess.check_output(command)\n\t\texcept subprocess.CalledProcessError:\n\t\t\treturn {'available': False}\n\t\t\n\t\tgpu = xml.etree.ElementTree.fromstring(response).find(\"gpu\")\n\n\t\tgpu_name = gpu.find(\"product_name\").text\n\t\t\n\t\tgpu_temp = gpu.find(\"temperature\")\n\t\tgpu_temp_now = int(gpu_temp.find(\"gpu_temp\").text.rpartition('C')[0])\n\t\tgpu_temp_max = int(gpu_temp.find(\"gpu_temp_max_threshold\").text.rpartition('C')[0])\n\n\t\tgpu_util = gpu.find(\"utilization\")\n\t\tgpu_usage = int(gpu_util.find(\"gpu_util\").text.rpartition('%')[0])\n\t\tgpu_m_usage = int(gpu_util.find(\"memory_util\").text.rpartition('%')[0])\n\n\t\treturn {\n\t\t\t'available': True,\n\t\t\t'gpu_name': gpu_name,\n\t\t\t'gpu_usage': gpu_usage,\n\t\t\t'gpu_memory_usage': gpu_m_usage,\n\t\t\t'gpu_temp_now': gpu_temp_now, \n\t\t\t'gpu_temp_max': gpu_temp_max\n\t\t}", "def device_type(info):\n options = {(1 << 0): 'CL_DEVICE_TYPE_DEFAULT',\n (1 << 1): 'CL_DEVICE_TYPE_CPU',\n (1 << 2): 'CL_DEVICE_TYPE_GPU',\n (1 << 3): 'CL_DEVICE_TYPE_ACCELERATOR',\n (1 << 4): 'CL_DEVICE_TYPE_CUSTOM'}\n return options.get(info, 'Undefined Device Type')", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def is_vserver_kernel():\n\n kinfo = commands.getoutput('/bin/uname -a').split()[2]\n return '-vs' in kinfo", "def kdev_name(self):\n return self._sysfs", "def fs_ok(fs_info):\n if fs_info.mountpoint == '/':\n return True\n\n if (fs_info.device == fs_info.fstype or fs_info.fstype == 'nullfs' or\n '/docker' in fs_info.mountpoint or\n fs_info.mountpoint.startswith('/etc') or\n fs_info.mountpoint.startswith('/lib/modules')):\n return False\n\n if fs_info.device.startswith('/dev/'):\n return True\n\n return False", "def device_exists(device):\n return os.path.exists('/sys/class/net/%s' % device)", "def find(ctx, name):\n conf = settings.devices.get(name, dict())\n if conf.get('type') == 'command':\n return conf, name, name\n\n uuids = ctx.obj['uuids']\n context = Context()\n for dev in iter(context.list_devices()):\n if 'ID_FS_TYPE' in dev:\n if name == uuids.get(dev.get('ID_FS_UUID')):\n return (settings.devices[name], dev['DEVNAME'],\n settings.devices[name].get('label',\n dev.get('ID_FS_LABEL')))\n\n print('Device \"%s\" not found.' % name)\n sys.exit(1)", "def is_system_ready_for_benchmarking():\n\n # check if scaling_governor is set to 'performance' for all cpu cores\n cpu_governors = glob.glob('/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')\n if not cpu_governors:\n logger.error('no scaling_governor found. Do you run on a Linux System?')\n return False\n for governor in sorted(cpu_governors):\n with open(governor, 'r') as f:\n line = f.read().splitlines()[0]\n logger.debug('%s is set to \\\"%s\\\"', governor, line)\n if line != 'performance':\n logger.warning('please set all scaling_governor to \\\"performance\\\" (using \"sudo ./ondemand.sh start\")')\n return False\n\n return True", "def detect_gpus():\n def worker(q):\n # `device_lib` will not release the memory it took,\n # so we run it in a sub-process.\n try:\n from tensorflow.python.client import device_lib\n\n if is_tensorflow_version_higher_or_equal('1.8.0'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n devices = list(device_lib.list_local_devices(config))\n else:\n devices = list(device_lib.list_local_devices())\n gpus = [\n (device.name, device)\n for device in devices\n if device.device_type == 'GPU'\n ]\n union_set = {i: i for i in range(len(gpus))}\n\n for i, (name, device) in enumerate(gpus):\n assert (device.name == '/device:GPU:{}'.format(i))\n for link in device.locality.links.link:\n if link.device_id != i:\n union_set[i] = union_set[link.device_id]\n\n for i in six.iterkeys(union_set):\n while union_set[i] != union_set[union_set[i]]:\n union_set[i] = union_set[union_set[i]]\n\n root_devices = sorted(set(union_set.values()))\n gpu_groups = [[] for _ in range(len(root_devices))]\n dev_to_group = {j: i for i, j in enumerate(root_devices)}\n for i, (name, device) in enumerate(gpus):\n gpu_groups[dev_to_group[union_set[i]]].append(name)\n\n q.put((1, gpu_groups))\n except Exception:\n q.put((0, traceback.format_exc()))\n\n q = mp.Queue()\n p = mp.Process(target=worker, args=(q,))\n\n try:\n p.start()\n result = q.get()\n if result[0] == 1:\n return result[1]\n else:\n raise RuntimeError(\n 'Failed to retrieve GPU information, the traceback of '\n 'sub-process is:\\n {}'.\n format('\\n '.join(result[1].split('\\n')))\n )\n finally:\n p.terminate()\n p.join()", "def get_gpu_memory_available(gpu_id):\n #1MiB = 1048576 bytes\n MiB = 1048576\n \n result = subprocess.check_output(\n [\n 'nvidia-smi' , '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [x for x in result.strip().split('\\n')]\n vram_used = float(gpu_memory[gpu_id])\n #print(\"GPU id:\", str(gpu_id), \"GPU RAM used, including extra driver buffer from nvidia-smi:\", str(vram_used))\n total_mem = torch.cuda.get_device_properties(gpu_id).total_memory / MiB\n vram_available = total_mem-vram_used\n return vram_available", "def kernel_match(kernel, kernel_spec):\n return kernel.startswith(kernel_spec)", "def get_device(arn=None):\n pass", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def is_io_uring_supported():\n return compare_versions(get_kernel_version(), MIN_KERNEL_VERSION_FOR_IO_URING) >= 0", "def info_hardware():\n\n print(\"\\nHARDWARE:\")\n\n # CPU INFO\n try:\n import cpuinfo # pip py-cpuinfo\n\n cpu = cpuinfo.get_cpu_info().get(\"brand_raw\")\n print(f\"CPU:\\t{cpu}\")\n except ImportError:\n print(\"cpuinfo not found. (pip/conda: py-cpuinfo)\")\n\n # RAM INFO\n try:\n import psutil # pip py-cpuinfo\n\n ram = round(psutil.virtual_memory().total / (1024.0**3))\n print(f\"RAM:\\t{ram} GB\")\n except ImportError:\n print(\"psutil not found. (pip/conda psutil)\")\n\n # GPU INFO\n if not tf.test.gpu_device_name():\n print(\"-- No GPU --\")\n else:\n gpu_devices = tf.config.list_physical_devices(\"GPU\")\n details = tf.config.experimental.get_device_details(gpu_devices[0])\n gpu_name = details.get(\"device_name\", \"CUDA-GPU found\")\n print(f\"GPU:\\t{gpu_name}\")\n # print(f\"{tf.test.gpu_device_name()[1:]}\")", "def only_gpu(request):\n if request.node.get_closest_marker('gpu'):\n if 'device' in request.fixturenames:\n if not isinstance(request.getfixturevalue('device'),\n hoomd.device.GPU):\n pytest.skip('Test is run only on GPU(s).')\n else:\n raise ValueError('only_gpu requires the *device* fixture')", "def device():\n return G.DEVICE", "def osname_is_linux():\n return (\"Linux\" == g_osname)", "def detect(self):\n # Get PCI devices\n lines = subprocess.check_output([\"lspci\", \"-n\"]).decode().split(\"\\n\")\n for line in lines:\n if len(line) > 0:\n class_id = \"0x{0}\".format(line.split()[1].rstrip(\":\")[0:2])\n if class_id == self.class_id:\n dev = line.split()[2].split(\":\")\n vendor_id = \"0x{0}\".format(dev[0])\n product_id = \"0x{0}\".format(dev[1])\n if vendor_id == self.vendor_id and product_id in self.devices:\n return True\n return False", "def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )", "def _get_device_list(self):\n if self.app.config.cloud_type == 'ec2':\n # c5/m5 on AWS mounts EBS volumes as NVMe:\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html\n for itype in ['c5', 'm5']:\n if itype in self.app.cloud_interface.get_type():\n return frozenset(glob('/dev/nvme[0-26]n1'))\n return frozenset(glob('/dev/*d[a-z]'))", "def is_ida_module(module_name):\n return (\n module_name.startswith((\"_ida_\", \"ida\", \"idc\"))\n or module_name == \"sark\"\n or module_name == \"__main__\"\n )", "def has_efi():\n return os.path.exists(\"/sys/firmware/efi\")", "def assert_has_feature(self, feature_name):\n if not self.features.get(\"has_{}\".format(feature_name), False):\n self.raise_config_error(\"Platform {} does not support to configure {feature_name}. \"\n \"Please make sure the platform \"\n \"you configured for {feature_name} actually supports that type \"\n \"of devices.\".format(self.__class__, feature_name=feature_name), 99)", "def usefulFunction():\n print(platform.uname()) #displayed this computer's specifications", "def test_device_states_device_name_get(self):\n pass", "def getFsLabel(partitionDevice):\n if os.path.exists('/dev/{0}'.format(partitionDevice)) and S_ISBLK(os.stat('/dev/{0}'.format(partitionDevice)).st_mode):\n path = '/dev/{0}'.format(partitionDevice)\n elif os.path.isfile(partitionDevice):\n path = partitionDevice\n else:\n label = False\n path = False\n if path:\n try:\n label = execGetOutput(['/sbin/blkid', '-s', 'LABEL', '-o', 'value', path], shell = False)\n if label:\n label = label[0]\n else:\n label = ''\n except subprocess.CalledProcessError as e:\n label = False\n return label", "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def _valid_device(device):\n required_fields = ('name', 'type', 'group', 'canonical_name')\n if all(field in device for field in required_fields):\n return True\n return False", "def check_cuda():\n if OS_VERSION[0] == \"Linux\":\n check_cuda_linux()\n elif OS_VERSION[0] == \"Windows\":\n check_cuda_windows()", "def print_device_info(nodemap):\r\n\r\n #print('*** DEVICE INFORMATION ***\\n')\r\n\r\n try:\r\n result = True\r\n node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))\r\n\r\n if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):\r\n features = node_device_information.GetFeatures()\r\n #for feature in features:\r\n #node_feature = PySpin.CValuePtr(feature)\r\n #print('%s: %s' % (node_feature.GetName(),\r\n #node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))\r\n\r\n else:\r\n print('Device control information not available.')\r\n\r\n except PySpin.SpinnakerException as ex:\r\n print('Error: %s' % ex)\r\n return False\r\n\r\n return result", "def findDeviceDescriptor(self, string: str) -> cern.japc.core.DeviceDescriptor:\n ...", "def dump_sysfs():\n for fan in range(_wrapper_get_num_fans()):\n status = _wrapper_dump_sysfs(fan)\n\n if status:\n for i in status:\n click.echo(i)", "def model_device(model):\n # Source: https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180\n try:\n return str(next(model.parameters()).device)\n except StopIteration:\n # Model has no parameters\n pass\n return 'cpu'", "def test_gpu_cuda_code() -> None:\n if get_from_environ(\"DISABLE_GPU_FOR_TESTING\") is not None:\n print(\"GPU payload disabled for testing\")\n return\n\n # if the command exists it can run on the hardware below\n proc = subprocess.Popen([\"nvidia-smi\"], stdout=subprocess.PIPE)\n stdout, _ = proc.communicate()\n str_stdout = stdout.decode()\n assert \"NVIDIA-SMI\" in str_stdout, str_stdout\n assert proc.returncode == 0\n # search the history for the CUDA implementation", "def _find_device(self):\n for bus in usb.busses():\n for dev in bus.devices:\n if dev.idVendor == self.vendor_id and dev.idProduct == self.product_id:\n if self.device_id is None or dev.filename == self.device_id:\n log.info('found station on USB bus=%s device=%s' % (bus.dirname, dev.filename))\n return dev\n return None", "def load_device():", "def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0", "def get_root_device():\r\n return utils.system_output('rootdev -s -d')", "def module_present(module, load=True):\n with open('/proc/modules', 'r') as modules_file:\n if module.replace('-','_') in modules_file.read():\n return True\n cmd = '/sbin/modprobe {}{}'.format('' if load else '-n ',\n module)\n if os.system(cmd) != 0:\n return False\n else:\n return True", "def device(request):\n d = request.param()\n\n # enable GPU error checking\n if isinstance(d, hoomd.device.GPU):\n d.gpu_error_checking = True\n\n return d", "def _is_dunia2(self):\n\n dir_list = os.listdir(os.environ['PWD'])\n data_list = list(filter(lambda item: 'data_win' in item, dir_list))\n\n # Check .../data_win*/worlds/multicommon dir\n for data_dir in data_list:\n if os.path.exists(os.path.join(os.environ['PWD'], data_dir, 'worlds/multicommon')):\n return True\n\n return False" ]
[ "0.6442702", "0.6077788", "0.60640377", "0.60526884", "0.6037231", "0.6019427", "0.601595", "0.599092", "0.5918574", "0.5823835", "0.5786603", "0.5764074", "0.5732489", "0.5730832", "0.5723959", "0.5701175", "0.56283104", "0.5625314", "0.56229156", "0.56229156", "0.5601314", "0.5579826", "0.55786043", "0.5567019", "0.55604476", "0.5535356", "0.55134195", "0.54809695", "0.54776216", "0.5472382", "0.5469653", "0.546493", "0.5458886", "0.5456706", "0.5449675", "0.5442424", "0.5441685", "0.5441337", "0.54403794", "0.5423414", "0.5423414", "0.54159397", "0.5391822", "0.5386367", "0.5382044", "0.53696644", "0.5358753", "0.5354079", "0.5323647", "0.53228074", "0.53182244", "0.53064305", "0.5303842", "0.5302762", "0.52984333", "0.5297617", "0.52922404", "0.5286369", "0.5285857", "0.52824783", "0.5281841", "0.52676433", "0.5263277", "0.52624065", "0.5234418", "0.521864", "0.52148247", "0.51915324", "0.5189581", "0.51888865", "0.5178143", "0.51727307", "0.51721627", "0.51556075", "0.5146775", "0.51463866", "0.51447684", "0.5132893", "0.51212907", "0.51202613", "0.5119506", "0.51178885", "0.51167846", "0.5109598", "0.51001054", "0.5094112", "0.5087377", "0.50798196", "0.50697184", "0.5067146", "0.50625306", "0.5055927", "0.50482607", "0.5047831", "0.5040025", "0.50388086", "0.5038618", "0.5034058", "0.50300694", "0.50234246" ]
0.83339846
0
Determine the gpu index given a sysfs_gpu_name
def _amd_index(sysfs_gpu_name): drop_prefix = sysfs_gpu_name.strip()[len(_SYSFS_PREFIX):] return drop_prefix.split('/')[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next_device(self):\n if self._num_gpus == 0:\n return ''\n dev = '/gpu:%d' % self._cur_gpu\n if self._num_gpus > 1:\n self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)\n return dev", "def deviceid(gpu):\n\n # Return if this is already a torch device\n # pylint: disable=E1101\n if isinstance(gpu, torch.device):\n return gpu\n\n # Always return -1 if gpu is None or an accelerator device is unavailable\n if gpu is None or not Models.hasaccelerator():\n return -1\n\n # Default to device 0 if gpu is True and not otherwise specified\n if isinstance(gpu, bool):\n return 0 if gpu else -1\n\n # Return gpu as device id if gpu flag is an int\n return int(gpu)", "def try_gpu(i=0): #@save\n if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:\n return tf.device(f'/GPU:{i}')\n return tf.device('/CPU:0')", "def get_cuda_device(minor_idx):\n\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return 0\n\n for i in range(num_devices):\n output = subprocess.check_output([\"nvidia-smi\", '-q', '-i', str(i)])\n output_list = output.decode(\"utf-8\").split('\\n')\n output_list = [item for item in output_list if 'Minor' in item]\n num = int(output_list[0].split(':')[-1])\n if num == minor_idx:\n return i\n return 0", "def GetGPU():\n return option['device_id']", "def ConvertGpuToVendorName(gpu):\n if not gpu:\n return 'No GPU'\n elif '8086' in gpu:\n return 'Intel'\n elif '10de' in gpu:\n return 'NVIDIA'\n elif '1002' in gpu:\n return 'AMD'\n return gpu", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def try_gpu(i=0):\n if torch.cuda.device_count() >= i + 1:\n return torch.device(f'cuda:{i}')\n return torch.device('cpu')", "def get_gpus():\n try:\n re = subprocess.check_output([\"nvidia-smi\", \"-L\"], universal_newlines=True)\n except OSError:\n return []\n return range(len([i for i in re.split('\\n') if 'GPU' in i]))", "def _current_device_index(self) -> int:\n device = PArray._get_current_device()\n if device is None: # not called inside current task\n return self._coherence.owner\n elif device.architecture == cpu:\n return CPU_INDEX\n else:\n # assume GPU here, won't check device.architecture == gpu\n # to avoid import `gpu`, which is slow to setup.\n return device.index", "def _get_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, \"\n f\"but only {n_gpu} are available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n self.logger.info(f'Using device: {device}, {list_ids}')\n return device, list_ids", "def get_gpu_utilization(gpu_num = None, verbose=False):\n if gpu_num != None:\n check_num(gpu_num)\n if verbose:\n cmd = \"nvidia-smi --query-gpu=index,gpu_name,gpu_bus_id,utilization.gpu,memory.used --format=csv\"\n res = str(subprocess.check_output(cmd, shell=True))\n [print(a) for a in res.split('\\\\n')[:-1]]\n cmd = \"nvidia-smi --query-gpu=utilization.gpu,memory.used --format=csv,nounits\"\n res = str(subprocess.check_output(cmd, shell=True))\n res= res.split('\\\\n')\n if gpu_num== None:\n return array([list(map(int,a.split(','))) for a in res[1:-1]])\n else:\n return array(list(map(int,res[gpu_num+1].split(','))))", "def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0", "def device_index(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_index\")", "def get_gpu_count():\n\n gpu_count = 0\n\n env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if env_cuda_devices is not None:\n assert isinstance(env_cuda_devices, str)\n try:\n if not env_cuda_devices:\n return 0\n gpu_count = len(\n [x for x in env_cuda_devices.split(',') if int(x) >= 0])\n logger.info(\n 'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))\n except:\n logger.info('Cannot find available GPU devices, using CPU now.')\n gpu_count = 0\n else:\n try:\n gpu_count = str(subprocess.check_output([\"nvidia-smi\",\n \"-L\"])).count('UUID')\n logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))\n except:\n logger.info('Cannot find available GPU devices, using CPU now.')\n gpu_count = 0\n return gpu_count", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n print(\"Current usage: %i of 11178\" % gpu_memory_map[1])", "def get_gpu_count():\n\n gpu_count = 0\n\n env_cuda_devices = os.environ.get('CUDA_VISIBLE_DEVICES', None)\n if env_cuda_devices is not None:\n assert isinstance(env_cuda_devices, str)\n try:\n if not env_cuda_devices:\n return 0\n gpu_count = len(\n [x for x in env_cuda_devices.split(',') if int(x) >= 0])\n logger.info(\n 'CUDA_VISIBLE_DEVICES found gpu count: {}'.format(gpu_count))\n except:\n logger.info(\n 'Cannot find available GPU devices, using CPU or other devices now.'\n )\n gpu_count = 0\n else:\n try:\n gpu_count = str(subprocess.check_output([\"nvidia-smi\",\n \"-L\"])).count('UUID')\n logger.info('nvidia-smi -L found gpu count: {}'.format(gpu_count))\n except:\n logger.info(\n 'Cannot find available GPU devices, using CPU or other devices now. (Please check whether you can execute `nvidia-smi` command.)'\n )\n gpu_count = 0\n return gpu_count", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def get_device_str(device_id, num_gpus):\n if num_gpus == 0:\n return \"/cpu:0\"\n device_str_output = \"/gpu:%d\" % (device_id % num_gpus)\n return device_str_output", "def get_free_gpu_memory(cuda_device_index):\n if sys.platform == \"darwin\":\n # No GPUs on darwin...\n return 0\n result = sp.check_output('nvidia-smi --query-gpu=memory.free '\n '--format=csv,nounits,noheader',\n shell=True)\n result = result.decode('utf-8').split('\\n')[:-1]\n log.verbose(f'The system has {len(result)} gpu(s).')\n free_mem = int(result[cuda_device_index])\n log.info(f'The {cuda_device_index}-th GPU has {free_mem} MB free.')\n if cuda_device_index >= len(result):\n raise ValueError(f\"Couldn't parse result for GPU #{cuda_device_index}\")\n return int(result[cuda_device_index])", "def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gpu_per_unit\")", "def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gpu_per_unit\")", "def gpu_per_unit(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"gpu_per_unit\")", "def get_device(i=0):\n if torch.cuda.is_available():\n return torch.device(\"cuda:%d\" % i)\n else:\n return torch.device(\"cpu\")", "def get_list_comp_ind(gpu):\n if gpu not in [0, 1, 2, 3, -1]:\n print('Your gpu index is not correct, check again')\n quit()\n data_dir = '/home/sr365/Bruce/cvdata'\n ind_list = []\n for file in os.listdir(data_dir):\n #print(file)\n # Check if this is a comp file\n if not file.endswith('.npy') or (not file[:-4].isdigit()):\n print('This file is {}, does not satisfy requirement, continue'.format(file))\n continue\n ind = int(file[:-4])\n #print('current comp ind is {}'.format(ind))\n ind_list.append(ind)\n #print(ind_list)\n length = len(ind_list)\n print(length)\n # If GPU == -1, return all list values\n if gpu == -1:\n return ind_list\n gpu_specific_list = ind_list[gpu*int(length / 4):(gpu+1)*int(length / 4)]\n print(len(gpu_specific_list))\n return gpu_specific_list", "def get_device_index(self, chip_name):\n index = self._dll.JLINKARM_DEVICE_GetIndex(chip_name.encode('ascii'))\n\n if index <= 0:\n raise errors.JLinkException('Unsupported device selected.')\n\n return index", "def check_gpu(self, values):\n try:\n process = subprocess.Popen(['nvidia-smi', '--query-gpu=name,pci.bus_id,driver_version,pstate,pcie.link.gen.max,pcie.link.gen.current,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used', '--format=csv'], stdout=subprocess.PIPE)\n out_str, _ = process.communicate()\n gpu_strs = out_str.split('\\n')\n\n # Get rid of the column headers.\n if len(gpu_strs) > 0:\n gpu_strs = gpu_strs[1:]\n\n # Process each GPU string.\n multi_gpu = len(gpu_strs) > 1\n gpu_index = 1\n for gpu_str in gpu_strs:\n out = gpu_str.split(',')\n if len(out) > 1:\n if multi_gpu:\n values[keys.KEY_GPUX_NAME.replace('X', str(gpu_index))] = out[0].strip(' \\t\\n\\r')\n values[keys.KEY_GPUX_TEMPERATURE.replace('X', str(gpu_index))] = int(out[6].strip(' \\t\\n\\r'))\n values[keys.KEY_GPUX_PERCENT.replace('X', str(gpu_index))] = int(out[7].strip(' \\t\\n\\r%%s'))\n gpu_index = gpu_index + 1\n else:\n values[keys.KEY_GPU_NAME] = out[0].strip(' \\t\\n\\r')\n values[keys.KEY_GPU_TEMPERATURE] = int(out[6].strip(' \\t\\n\\r'))\n values[keys.KEY_GPU_PERCENT] = int(out[7].strip(' \\t\\n\\r%%s'))\n except:\n logging.error(\"Error collecting GPU stats.\")", "def _get_gpu_names() -> Sequence[str]:\n result = []\n for device in device_lib.list_local_devices():\n if device.device_type != \"GPU\":\n continue\n desc = device.physical_device_desc\n\n fields = desc.split(\",\")\n for field in fields:\n name, value = field.split(\":\", maxsplit=1)\n name = name.strip()\n value = value.strip()\n if name == \"name\":\n result.append(value)\n return result", "def find_iio_device_name(self):\n self.iio_device_dir()\n self.console.runcmd(f\"cat name\", expected=\"\\r\\n\")\n iio_device_name = self.console.output()\n return iio_device_name", "def next_joystick_device():\n for i in range(100):\n dev = \"/dev/input/js{0}\".format(i)\n if not os.path.exists(dev):\n return dev", "def get_gpu_memory_map():\n # https://stackoverflow.com/questions/49595663/find-a-gpu-with-enough-memory\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ])\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map", "def magma_getdevice():\n\n dev = c_int_type()\n _libmagma.magma_getdevice(ctypes.byref(dev))\n return dev.value", "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def get_device_of(self, tensor):\n if not tensor.is_cuda:\n return -1\n else:\n return tensor.get_device()", "def return_free_GPU():\r\n if torch.cuda.is_available():\r\n gpu_num = torch.cuda.device_count()\r\n device = torch.device('cuda:{}'.format(gpu_num-1))\r\n print('Using GPU:[{}]/[{}] for training...'.format(gpu_num-1,gpu_num-1))\r\n return device\r\n \r\n raise ValueError('GPU not available for training. Check CUDA env with function \"check_cuda_env\"')", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ])#, encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map", "def get_gpu_memory_map():\r\n result = subprocess.check_output(\r\n [\r\n 'nvidia-smi', '--query-gpu=memory.free',\r\n '--format=csv,nounits,noheader'\r\n ], encoding='utf-8')\r\n # Convert lines into a dictionary\r\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\r\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\r\n return gpu_memory_map", "def get_gpu_memory_map():\n\tresult = subprocess.check_output(\n\t\t[\n\t\t\t'nvidia-smi', '--query-gpu=memory.free',\n\t\t\t'--format=csv,nounits,noheader'\n\t\t])\n\t# Convert lines into a dictionary\n\tresult=result.decode('utf-8')\n\tprint(result)\n\tgpu_memory = [int(x) for x in result.strip().split('\\n')]\n\tgpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n\treturn gpu_memory_map", "def get_device_of(tensor: torch.Tensor) -> int:\n if not tensor.is_cuda:\n return -1\n else:\n return tensor.get_device()", "def dev_node(self, dev, fake=False):\n if fake:\n return self.__FAKE_DEV.get(dev, -1)\n filename = '/sys/class/net/{0}/device/numa_node'.format(dev)\n if not os.path.isfile(filename):\n return -1\n with open(filename) as fd:\n return int(fd.read().strip())", "def detect_gpus():\n def worker(q):\n # `device_lib` will not release the memory it took,\n # so we run it in a sub-process.\n try:\n from tensorflow.python.client import device_lib\n\n if is_tensorflow_version_higher_or_equal('1.8.0'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n devices = list(device_lib.list_local_devices(config))\n else:\n devices = list(device_lib.list_local_devices())\n gpus = [\n (device.name, device)\n for device in devices\n if device.device_type == 'GPU'\n ]\n union_set = {i: i for i in range(len(gpus))}\n\n for i, (name, device) in enumerate(gpus):\n assert (device.name == '/device:GPU:{}'.format(i))\n for link in device.locality.links.link:\n if link.device_id != i:\n union_set[i] = union_set[link.device_id]\n\n for i in six.iterkeys(union_set):\n while union_set[i] != union_set[union_set[i]]:\n union_set[i] = union_set[union_set[i]]\n\n root_devices = sorted(set(union_set.values()))\n gpu_groups = [[] for _ in range(len(root_devices))]\n dev_to_group = {j: i for i, j in enumerate(root_devices)}\n for i, (name, device) in enumerate(gpus):\n gpu_groups[dev_to_group[union_set[i]]].append(name)\n\n q.put((1, gpu_groups))\n except Exception:\n q.put((0, traceback.format_exc()))\n\n q = mp.Queue()\n p = mp.Process(target=worker, args=(q,))\n\n try:\n p.start()\n result = q.get()\n if result[0] == 1:\n return result[1]\n else:\n raise RuntimeError(\n 'Failed to retrieve GPU information, the traceback of '\n 'sub-process is:\\n {}'.\n format('\\n '.join(result[1].split('\\n')))\n )\n finally:\n p.terminate()\n p.join()", "def get_gpu_tempfunc():\n res = os.popen('/opt/vc/bin/vcgencmd measure_temp').readline()\n return res.replace(\"temp=\", \"\")", "def get_gpu_memory_map():\n\tresult = subprocess.check_output(\n\t\t[\n\t\t\t'nvidia-smi', '--query-gpu=memory.used',\n\t\t\t'--format=csv,nounits,noheader'\n\t\t])\n\tresult = result.decode('utf-8')\n\t# Convert lines into a dictionary\n\tgpu_memory = [int(x) for x in result.strip().split('\\n')]\n\tgpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n\treturn gpu_memory_map", "def _set_gpu_num(self, gpu=''):\n self.gpu_num = 0\n if gpu:\n self.gpu_num = len(gpu.split(','))\n pynvml.nvmlInit()\n device_count = pynvml.nvmlDeviceGetCount()\n pynvml.nvmlShutdown()\n if self.gpu_num > device_count:\n error_message = \"gpu:{} exceeds device_count:{}\".format(gpu, device_count)\n logger.error(error_message)\n assert self.gpu_num <= device_count, error_message", "def get_free_gpus(gpu_num = None, bool=False, verbose=False):\n if gpu_num != None:\n check_num(gpu_num)\n return (get_gpu_utilization(gpu_num, verbose)<(1,1)).min()\n res = (get_gpu_utilization(gpu_num, verbose)<(1,1)).min(axis=1)\n if bool:\n return res\n return res.nonzero()[-1]", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", "def _get_device_id() -> str:\n with open(\"/proc/cpuinfo\", \"r\") as f:\n for line in f.readlines():\n if line.startswith('Serial'):\n return line.split(':')[1].strip()\n return 'N/A'", "def cudaMemGetInfo(mb=False):\n print 'gpu: '\n free = ctypes.c_size_t()\n total = ctypes.c_size_t()\n ret = cuda.cudaMemGetInfo(ctypes.byref(free), ctypes.byref(total))\n\n if ret != 0:\n err = cuda.cudaGetErrorString(status)\n raise RuntimeError(\"CUDA Error (%d): %s\" % (status, err))\n\n if mb:\n scale = 1024.0**2\n return free.value / scale, total.value / scale\n else:\n return free.value, total.value", "def _prepare_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\n \"Warning: There\\'s no GPU available on this machine, training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(\n \"Warning: The number of GPU\\'s configured to use is {}, but only {} are available on this machine.\".format(\n n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def unique_id(self) -> str:\n return get_frigate_entity_unique_id(\n self._config_entry.entry_id, \"gpu_load\", self._gpu_name\n )", "def _is_amd(sysfs_gpu_name):\n with open(sysfs_gpu_name) as src:\n return src.read().strip() == 'amdgpu'", "def finddevice():\n\n return next((device for device in [\"xpu\"] if hasattr(torch, device) and getattr(torch, device).is_available()), None)", "def _prepare_device(self, n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n self.logger.warning(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n self.logger.warning(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def _name2idx(name):\n match = re.search(r\"eth(\\d+)\", name, re.I)\n if not match:\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails name {!r}\"\n .format(name)\n )\n return int(match.group(1))", "def variable_on_gpu(name, shape, initializer):\n # Use the /cpu:0 device for scoped operations\n with tf.device('/device:GPU:0'):\n # Create or get apropos variable\n var = tf.get_variable(name=name, shape=shape, initializer=initializer)\n return var", "def min_gpu(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min_gpu\")", "def get_cpu_number():\n try:\n output = subprocess.check_output('lscpu').decode(\"utf-8\")\n for line in output.splitlines():\n m = re.match(r'NUMA node0.*:\\s*\\d+-(\\d+)', line)\n if m:\n return m.group(1)\n except OSError:\n pass\n sys.stderr.write(\"Warning: Unable to select CPU ID, using 0\\n\")\n return 0", "def max_gpu(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_gpu\")", "def get_busy_gpus(gpu_num = None, bool=False, verbose=False):\n if gpu_num != None:\n check_num(gpu_num)\n return (get_gpu_utilization(gpu_num, verbose)>(1,1)).min()\n res = (get_gpu_utilization(gpu_num, verbose)>(1,1)).min(axis=1)\n if bool:\n return res\n return res.nonzero()[-1]", "def _get_input():\n value = 0\n df_output = subprocess.run(['df'], stdout=subprocess.PIPE)\n lst = df_output.stdout.decode().split('\\n')\n for row in lst:\n try:\n if '/dev' in row.split()[-1]:\n value = row.split()[-2].strip('%')\n except IndexError:\n continue\n return value", "def get_gpu_ids(gpus: str) -> List[int]:\n num_available_gpu = torch.cuda.device_count()\n gpu_ids = []\n for gpu_id in gpus.split(\",\"):\n if not gpu_id.isnumeric():\n raise ValueError(\"--gpus argument should be numbers separated by ','.\")\n gpu_ids.append(int(gpu_id))\n\n wrong_gpus = []\n for gpu_idx in gpu_ids:\n if gpu_idx >= num_available_gpu:\n wrong_gpus.append(gpu_idx)\n\n for wrong_gpu in wrong_gpus:\n gpu_ids.remove(wrong_gpu)\n\n if wrong_gpus:\n logger.warning(f\"Wrong gpu indices are excluded. {','.join([str(val) for val in gpu_ids])} GPU will be used.\")\n\n return gpu_ids", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(\"Warning: The number of GPU\\'s configured to use is {}, but only {} are available \"\n \"on this machine.\".format(n_gpu_use, n_gpu))\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def _getFIdx(self, featureName):\n return np.where(self.featureNames == featureName)[0][0]", "def get_device(model):\n\tif next(model.parameters()).is_cuda:\n\t\treturn 'cuda:{}'.format(torch.cuda.current_device())\n\telse:\n\t\treturn 'cpu'", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def device() -> str:\n import torch\n\n if torch.cuda.is_available() and torch.cuda.device_count() > 0:\n if hasattr(Config().trainer,\n 'parallelized') and Config().trainer.parallelized:\n device = 'cuda'\n else:\n device = 'cuda:' + str(\n random.randint(0,\n torch.cuda.device_count() - 1))\n else:\n device = 'cpu'\n\n return device", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tf_back._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tf_back._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tf_back._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_register_index_from_name(self, register):\n regs = list(self.register_name(idx) for idx in self.register_list())\n if isinstance(register, six.string_types):\n try:\n result = regs.index(register)\n except ValueError:\n error_message = \"No register found matching name: {}. (available registers: {})\"\n raise errors.JLinkException(error_message.format(register, ', '.join(regs)))\n return result", "def _get_available_gpus():\r\n #global _LOCAL_DEVICES\r\n if tfback._LOCAL_DEVICES is None:\r\n devices = tf.config.list_logical_devices()\r\n tfback._LOCAL_DEVICES = [x.name for x in devices]\r\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def get_gpu_stats(self):\n\t\tif which('nvidia-smi') is None:\n\t\t\treturn {'available': False}\n\t\t\n\t\ttry:\n\t\t\tcommand = ['nvidia-smi', '-q', '-x']\n\t\t\tresponse = subprocess.check_output(command)\n\t\texcept subprocess.CalledProcessError:\n\t\t\treturn {'available': False}\n\t\t\n\t\tgpu = xml.etree.ElementTree.fromstring(response).find(\"gpu\")\n\n\t\tgpu_name = gpu.find(\"product_name\").text\n\t\t\n\t\tgpu_temp = gpu.find(\"temperature\")\n\t\tgpu_temp_now = int(gpu_temp.find(\"gpu_temp\").text.rpartition('C')[0])\n\t\tgpu_temp_max = int(gpu_temp.find(\"gpu_temp_max_threshold\").text.rpartition('C')[0])\n\n\t\tgpu_util = gpu.find(\"utilization\")\n\t\tgpu_usage = int(gpu_util.find(\"gpu_util\").text.rpartition('%')[0])\n\t\tgpu_m_usage = int(gpu_util.find(\"memory_util\").text.rpartition('%')[0])\n\n\t\treturn {\n\t\t\t'available': True,\n\t\t\t'gpu_name': gpu_name,\n\t\t\t'gpu_usage': gpu_usage,\n\t\t\t'gpu_memory_usage': gpu_m_usage,\n\t\t\t'gpu_temp_now': gpu_temp_now, \n\t\t\t'gpu_temp_max': gpu_temp_max\n\t\t}", "def mat_name_to_index(val):\n return bpy.data.materials.find(val)", "def get_dev_count_for_disk_bus(disk_bus):\n\n if disk_bus == \"ide\":\n return 4\n else:\n return 26", "def setup_device(gpuid=None):\n\n if gpuid is not None and not isinstance(gpuid, str):\n gpuid = str(gpuid)\n\n if gpuid is not None:\n nb_devices = len(gpuid.split(','))\n else:\n nb_devices = 1\n\n if gpuid is not None and (gpuid != '-1'):\n device = '/gpu:' + gpuid\n os.environ['CUDA_VISIBLE_DEVICES'] = gpuid\n\n # GPU memory configuration differs between TF 1 and 2\n if hasattr(tf, 'ConfigProto'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n tf.keras.backend.set_session(tf.Session(config=config))\n else:\n tf.config.set_soft_device_placement(True)\n for pd in tf.config.list_physical_devices('GPU'):\n tf.config.experimental.set_memory_growth(pd, True)\n else:\n device = '/cpu:0'\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n return device, nb_devices", "def get_available_device():\n if torch.cuda.is_available():\n free_mem, device_idx = 0.0, 0\n for d in range(torch.cuda.device_count()):\n mem = torch.cuda.get_device_properties(d).total_memory - torch.cuda.memory_allocated(d)\n if mem > free_mem:\n device_idx = d\n free_mem = mem\n return torch.device(f'cuda:{device_idx}')\n else:\n return torch.device('cpu')", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_max_gpu_processes(self):\n mem_usage = self._get_gpu_mem_usage()\n print('Mem Usage:', mem_usage)\n\n num_processes = int(1 / mem_usage)\n return num_processes", "def device_num(self) -> str:\n return pulumi.get(self, \"device_num\")", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def _get_cindex(circ, name, index):\n ret = 0\n for reg in circ.cregs:\n if name != reg.name:\n ret += reg.size\n else:\n return ret + index\n return ret + index", "def guess_vserver_device():\n\n s = commands.getoutput('/bin/mount | /bin/grep tagxid | /usr/bin/head -n 1')\n device = s.split()[0]\n\n return device", "def kdev_name(self):\n return self._sysfs", "def get_phylogenetic_metric(name):\r\n # looks for name, inserting possible dist_ to find functions\r\n # in qiime.beta_metrics\r\n try:\r\n return getattr(qiime.beta_metrics, 'dist_' + name.lower())\r\n except AttributeError:\r\n try:\r\n return getattr(qiime.beta_metrics,\r\n name.replace('binary', 'binary_dist').lower())\r\n except AttributeError:\r\n return getattr(qiime.beta_metrics,\r\n name.lower())", "def get_hardware_id():\r\n try:\r\n return utils.run('crossystem hwid').stdout.strip()\r\n except:\r\n logging.info(\"Not Found\")\r\n return -1", "def try_gpu(x):\n global _GPUS_EXIST\n\n if _GPUS_EXIST:\n try:\n return x.cuda()\n except (AssertionError, RuntimeError):\n # actually, GPUs don't exist\n print 'No GPUs detected. Sticking with CPUs.'\n _GPUS_EXIST = False\n return x\n else:\n return x", "def get_device_number_if_usb_soundcard(index_info):\n\n index, info = index_info\n\n if \"USB Audio Device\" in info[\"name\"]:\n return index\n return False", "def get_device():\n c_dev = ct.c_int(0)\n safe_call(backend.get().af_get_device(ct.pointer(c_dev)))\n return c_dev.value", "def device(self):\n return self._vars[0].device", "def get_gpu_info(**kwargs):\n # Set GPU info fields\n conn_gpu_count = None\n source_db_gpu_count = None\n source_db_gpu_mem = None\n source_db_gpu_driver_ver = \"\"\n source_db_gpu_name = \"\"\n if kwargs[\"no_gather_conn_gpu_info\"]:\n logging.debug(\n \"--no-gather-conn-gpu-info passed, \"\n + \"using blank values for source database GPU info fields \"\n + \"[run_gpu_count, run_gpu_mem_mb] \"\n )\n else:\n logging.debug(\n \"Gathering source database GPU info fields \"\n + \"[run_gpu_count, run_gpu_mem_mb] \"\n + \"using pymapd connection info. \"\n )\n conn_hardware_info = kwargs[\"con\"]._client.get_hardware_info(\n kwargs[\"con\"]._session\n )\n conn_gpu_count = conn_hardware_info.hardware_info[0].num_gpu_allocated\n if conn_gpu_count == 0 or conn_gpu_count is None:\n no_gather_nvml_gpu_info = True\n if conn_gpu_count == 0:\n logging.warning(\n \"0 GPUs detected from connection info, \"\n + \"using blank values for source database GPU info fields \"\n + \"If running against cpu-only server, make sure to set \"\n + \"--no-gather-nvml-gpu-info and --no-gather-conn-gpu-info.\"\n )\n else:\n no_gather_nvml_gpu_info = kwargs[\"no_gather_nvml_gpu_info\"]\n source_db_gpu_count = conn_gpu_count\n try:\n source_db_gpu_mem = int(\n conn_hardware_info.hardware_info[0].gpu_info[0].memory\n / 1000000\n )\n except IndexError:\n logging.error(\"GPU memory info not available from connection.\")\n if no_gather_nvml_gpu_info:\n logging.debug(\n \"--no-gather-nvml-gpu-info passed, \"\n + \"using blank values for source database GPU info fields \"\n + \"[gpu_driver_ver, run_gpu_name] \"\n )\n elif (\n kwargs[\"conn_machine_name\"] == \"localhost\"\n or kwargs[\"gather_nvml_gpu_info\"]\n ):\n logging.debug(\n \"Gathering source database GPU info fields \"\n + \"[gpu_driver_ver, run_gpu_name] \"\n + \"from local GPU using pynvml. \"\n )\n import pynvml\n\n pynvml.nvmlInit()\n source_db_gpu_driver_ver = pynvml.nvmlSystemGetDriverVersion().decode()\n for i in range(source_db_gpu_count):\n handle = pynvml.nvmlDeviceGetHandleByIndex(i)\n # Assume all cards are the same, overwrite name value\n source_db_gpu_name = pynvml.nvmlDeviceGetName(handle).decode()\n pynvml.nvmlShutdown()\n # If gpu_count argument passed in, override gathered value\n if kwargs[\"gpu_count\"]:\n source_db_gpu_count = kwargs[\"gpu_count\"]\n if kwargs[\"gpu_name\"]:\n source_db_gpu_name = kwargs[\"gpu_name\"]\n gpu_info = {\n \"conn_gpu_count\": conn_gpu_count,\n \"source_db_gpu_count\": source_db_gpu_count,\n \"source_db_gpu_mem\": source_db_gpu_mem,\n \"source_db_gpu_driver_ver\": source_db_gpu_driver_ver,\n \"source_db_gpu_name\": source_db_gpu_name,\n }\n return gpu_info", "def get_gpu_memory_available(gpu_id):\n #1MiB = 1048576 bytes\n MiB = 1048576\n \n result = subprocess.check_output(\n [\n 'nvidia-smi' , '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [x for x in result.strip().split('\\n')]\n vram_used = float(gpu_memory[gpu_id])\n #print(\"GPU id:\", str(gpu_id), \"GPU RAM used, including extra driver buffer from nvidia-smi:\", str(vram_used))\n total_mem = torch.cuda.get_device_properties(gpu_id).total_memory / MiB\n vram_available = total_mem-vram_used\n return vram_available", "def getGpus():\n nvmlInit()\n gpu_list = []\n for i in range(0, nvmlDeviceGetCount()):\n handle = nvmlDeviceGetHandleByIndex(i)\n gpu_list.append(NvidiaGPU(handle))\n return gpu_list", "def gpu_instance_profile(self) -> Optional[pulumi.Input[Union[str, 'GPUInstanceProfile']]]:\n return pulumi.get(self, \"gpu_instance_profile\")", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def set_max_gpu(self):\r\n gpu_getter = GPUGetter()\r\n gpu = str(gpu_getter.get_free_gpu())\r\n\r\n if gpu:\r\n print(\"Using GPU: %s\" % gpu)\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\r\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu\r\n\r\n if not gpu:\r\n print('No GPU detected')", "def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)", "def try_gpu(x):\n global _GPUS_EXIST\n if _GPUS_EXIST:\n try:\n return x.cuda()\n except (AssertionError, RuntimeError):\n print('No GPUs detected. Sticking with CPUs.')\n _GPUS_EXIST = False\n return x" ]
[ "0.68445116", "0.6772102", "0.6700925", "0.6621827", "0.65894985", "0.6331592", "0.62258136", "0.62258136", "0.62258136", "0.61923295", "0.61809945", "0.6132849", "0.6131118", "0.61252695", "0.6123489", "0.6101626", "0.6023584", "0.6001534", "0.5931374", "0.59023625", "0.5886564", "0.5853852", "0.5853852", "0.5853852", "0.5814684", "0.5797789", "0.5765374", "0.57587636", "0.57504016", "0.5730266", "0.5664175", "0.5659895", "0.5656986", "0.5649641", "0.56384486", "0.5587484", "0.55836654", "0.5574918", "0.55595565", "0.5549091", "0.55478716", "0.5524664", "0.55142766", "0.5502352", "0.5491034", "0.54893345", "0.54495794", "0.5439993", "0.5434279", "0.5418378", "0.540845", "0.5398236", "0.5391844", "0.5375933", "0.5371516", "0.53629744", "0.5361426", "0.5357946", "0.5347763", "0.5347318", "0.5327362", "0.5318231", "0.53116995", "0.53116995", "0.529845", "0.52916974", "0.52798784", "0.5274217", "0.5274217", "0.5272154", "0.5259422", "0.5251435", "0.5243033", "0.52419496", "0.5232531", "0.523179", "0.5229741", "0.5226332", "0.52254575", "0.52146876", "0.5209781", "0.5198893", "0.5198893", "0.51852447", "0.5176474", "0.5174271", "0.5174236", "0.5173597", "0.51716083", "0.51651376", "0.5162177", "0.5151168", "0.51428825", "0.51373124", "0.51338047", "0.5133541", "0.513238", "0.5128925", "0.5126651", "0.5119558" ]
0.786728
0
Determines the path of the configuration file
def _cfg_path(argv): cfg_path = argv[1] if len(argv) > 1 else None _is_file = os.path.isfile if not cfg_path or not _is_file(cfg_path): if cfg_path: _info("no config at {}, trying the default location".format( cfg_path)) cfg_path = _DEFAULT_PATH if not _is_file(cfg_path): _info("no config at {}, exiting".format(cfg_path)) return None return cfg_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME", "def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath", "def configPath(self):\n return os.path.dirname(__file__)", "def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None", "def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()", "def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path", "def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)", "def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))", "def cfg_path(self):\n return self._cfg_path", "def config_file(self):\n return join_path(self.prefix.etc.bohrium, \"config.ini\")", "def get_configuration_file():\n path = os.path.abspath(os.curdir)\n while path != os.sep:\n config_path = os.path.join(path, CONFIG_FILE_NAME)\n if os.path.exists(config_path):\n return config_path\n path = os.path.dirname(path)\n return None", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def full_path(self, config_path=CONFIG_PATH):\n return os.path.join(config_path, self.filename)", "def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;", "def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')", "def _build_config_file_path(cls, filename):\n if os.path.exists(filename):\n return filename\n res = os.path.join(os.path.dirname(__file__), '..', 'config', filename)\n if not os.path.exists(res):\n raise ValueError(\"requested config file %s does not exist!\" % filename)\n return res", "def determine_config() -> str:\n if os.environ.get(PortholeConfig.CONFIG_ENV_NAME) is not None:\n return os.environ.get(PortholeConfig.CONFIG_ENV_NAME)\n if os.path.isfile(PortholeConfig.DEFAULT_CONFIG_FILE):\n return PortholeConfig.DEFAULT_CONFIG_FILE\n for file_path in PortholeConfig.OTHER_ALLOWED_CONFIG_PATHS:\n if os.path.isfile(file_path):\n return file_path\n raise FileNotFoundError(\n \"Porthole is unable to locate a useable config file. \"\n \"Try setting the PORTHOLE_CONFIG environment variable, \"\n \"or creating a porthole.ini file in your main project directory.\"\n )", "def config_file(self):\n return self[CONFIG_FILE_KEY]", "def get_config_filepath():\n scs_installation_dirs = _path_utils.get_addon_installation_paths()\n\n # SEARCH FOR CONFIG...\n scs_config_file = ''\n for i, location in enumerate(scs_installation_dirs):\n test_path = os.path.join(location, 'config.txt')\n if os.path.isfile(test_path):\n scs_config_file = test_path\n break\n\n # IF NO CONFIG FILE, CREATE ONE...\n if scs_config_file == '':\n lprint(\"S Creating new 'config.txt' file:\\n\\t %r\", (os.path.join(scs_installation_dirs[0], 'config.txt'),))\n scs_config_file = new_config_file(os.path.join(scs_installation_dirs[0], 'config.txt'))\n\n # print('SCS Blender Tools Config File:\\n \"%s\"\\n' % os.path.join(scs_installation_dirs[0], 'config.txt'))\n return scs_config_file", "def get_config_file_path(config_file: str) -> str:\n\n if not isinstance(config_file, str):\n raise ValueError(\"value for 'config_file' of 'parse_ini' must be of type str\")\n\n if len(config_file) == 0:\n raise ValueError(f\"value for 'config_file' can't be empty\")\n\n base_dir = os.sep.join(__file__.split(os.sep)[0:-3])\n if config_file[0] != os.sep:\n config_file = f\"{base_dir}{os.sep}{config_file}\"\n\n return os.path.realpath(config_file)", "def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")", "def get_config_file_path(filename):\n # Use __file__ to derive a path relative to this module's location which points to the tests data directory.\n relative_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"..\", \"config_files\"\n )\n return os.path.join(os.path.abspath(relative_path), filename)", "def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH", "def find_conf():\n path = os.path.abspath(os.path.expanduser(os.getcwd()))\n while path not in ('', '/'):\n conf_path = os.path.join(path, 'dataplicity.conf')\n if os.path.exists(conf_path):\n return conf_path\n path = os.path.dirname(path)\n return None", "def get_instance_config_path():\n return join(settings.PROJECT_DIR, \"conf\", \"eoxserver.conf\")", "def get_production_config_file_path(path: pathlib.Path) -> pathlib.Path:\n return get_production_config_dir_path(path) / \"config.py\"", "def config_dir(self) -> Path:\n return self._config_dir", "def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file", "def _config_path(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n return res['path']", "def get_config_path() -> Path:\n config = os.getenv('TOM_CONFIG', '')\n return Path(config)", "def _find_config_file(self) -> str or None:\n import os\n\n for path in self.paths:\n path = os.path.expanduser(path)\n for extension in self.file_extensions:\n for file_name in self.file_names:\n file_path = os.path.join(path, \"{}.{}\".format(file_name, extension))\n if os.path.isfile(file_path):\n return file_path\n\n return None", "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath", "def cfgpath(p):\n p = Path(p)\n if p.is_absolute():\n return p\n else:\n for d in reversed(cfgdirs):\n try:\n fp = (d / p).resolve()\n except FileNotFoundError:\n continue\n if fp.is_file():\n return fp\n else:\n return p", "def conf_dir(self):\r\n return self._conf_dir", "def _app_config_file() -> str:\n if 'AISCALATOR_HOME' in os.environ:\n home = os.environ['AISCALATOR_HOME']\n file = os.path.join(home, \"config\", \"aiscalator.conf\")\n if os.path.exists(file):\n return file\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator',\n 'config', 'aiscalator.conf')", "def config_directory(self):\n\n return self.get_raw(\"config_directory\")", "def config_dir(template_file_path=None):\n if template_file_path:\n return os.path.dirname(template_file_path)\n\n return os.getcwd()", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))", "def get_path(self, key):\n value = self.getn(key)\n if value is None:\n logger.warning(\"Specified config '%s' is None or not exist\" % key)\n return None\n if not isinstance(value, str):\n msg = \"Specified config '%s' is non-string: %s\" % (key, value)\n logger.error(msg)\n raise ValueError(msg)\n #\n path = os.path.expanduser(value)\n if not os.path.isabs(path):\n # Got relative path, try to convert to the absolute path\n if hasattr(self, \"userconfig\"):\n # User configuration loaded\n path = os.path.join(os.path.dirname(self.userconfig), path)\n else:\n logger.warning(\"Cannot convert to absolute path: %s\" % path)\n return os.path.normpath(path)", "def getConfigFile(self):\n if not self.__args.configfile:\n msg = \"not set configfile\"\n self.__logger.error(msg)\n return \"\"\n cf = os.getcwd() + os.sep + self.__args.configfile\n if not os.path.exists(self.__args.configfile):\n msg = \"file \" + cf + \" not exist!\"\n self.__logger.error(msg)\n return \"\"\n return cf", "def find_config_file(self):\n filename = self.values.get('config_file', Default('noy.json'))\n\n ignore_missing = False\n if isinstance(filename, Default):\n filename = filename.val\n ignore_missing = True\n\n filename = os.path.abspath(filename)\n if os.path.exists(filename):\n return filename\n elif not ignore_missing:\n raise MissingConfigFile(\"Config file doesn't exist at {}\".format(filename))", "def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)", "def get_github_config_path(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path", "def get_kube_config_file_path(self):\n return self._kube_config", "def _get_config_path(config_arg: Optional[str]) -> Path:\n if config_arg:\n config_file = Path(config_arg)\n elif os.environ.get(ENV_VAR_FOR_CONFIG_FILE_PATH):\n config_file = Path(os.environ[ENV_VAR_FOR_CONFIG_FILE_PATH])\n else:\n config_file = None\n\n if not config_file or not config_file.is_file():\n logging.fatal(f\"Config file not found: {config_file}\")\n sys.exit(1)\n return config_file", "def _find_config_root(self) -> str:\n location = [\"apache2.conf\", \"httpd.conf\", \"conf/httpd.conf\"]\n for name in location:\n if os.path.isfile(os.path.join(self.root, name)):\n return os.path.join(self.root, name)\n raise errors.NoInstallationError(\"Could not find configuration root\")", "def _findConfigPath(self, name):\n for path in reversed(self._makeAllConfigPaths(name)):\n if os.path.exists(path):\n return path", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def system_conf_dir(self):\n return buildconfig.SPD_CONF_PATH", "def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path", "def get_config_file_for_auto_config(self) -> Optional[Text]:\n return self.config_file", "def confDir(self):\r\n return self._confDir", "def default_configfile(self):\r\n config = None\r\n for path in self.searchpaths:\r\n if os.path.exists(path):\r\n config = path\r\n break\r\n if config is None and self.require_configfile:\r\n self.usage('No config file found at default paths (%s); '\r\n 'use the -c option to specify a config file '\r\n 'at a different path' % ', '.join(self.searchpaths))\r\n return config", "def current_config_file(self):\n return self.mp_conf_file.current_file", "def _get_config_fname():\n directory = _get_vispy_app_dir()\n if directory is None:\n return None\n fname = op.join(directory, 'vispy.json')\n if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:\n fname = op.join(_TempDir(), 'vispy.json')\n return fname", "def _github_config(self, config_file_name):\n home = os.path.abspath(os.environ.get('HOME', ''))\n config_file_path = os.path.join(home, config_file_name)\n return config_file_path", "def get_config_file_name(self):\n argv = sys.argv\n config_type = \"dev\" # default configuration type\n if None != argv and len(argv) > 1 :\n config_type = argv[1]\n config_file = config_type + \".cfg\"\n logger.info(\"get_config_file_name() return : \" + config_file)\n return config_file", "def config_dir(self) -> str:\n if not self._config_dir:\n self._config_dir = self._detect_config_dir()\n return self._config_dir", "def get_default_config_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / CONFIG_FILE", "def get_dataset_config_path(dataset_dir: str) -> str:\n return os.path.join(dataset_dir, DATASET_CONFIG_NAME)", "def cfgPath( *args ):\n return '/'.join( [str( k ) for k in args] )", "def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)", "def configDir():\n return os.path.join(os.environ['HARNESSEDJOBSDIR'], 'config', getSiteName())", "def getConfigFileName(self):\n return self._configFileName", "def _get_deployment_config_file():\n config_path = cfg.CONF.find_file(\n cfg.CONF.paste_deploy['api_paste_config'])\n if config_path is None:\n return None\n\n return os.path.abspath(config_path)", "def config_abex_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / ABEX_CONFIG", "def config_file_name(self):\n return self._config_file_name", "def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")", "def get_config_dir():\n return Path(environ.get(CONFIG_DIR_ENV_VAR, _default_dir))", "def _findconfigfile():\n\n # A ordered list of possible config files\n configfiles = [\"~/.githubhooksrc\",\n \"/etc/githubhooks\"]\n\n for configfile in configfiles:\n if os.path.isfile(os.path.expanduser(configfile)):\n return os.path.expanduser(configfile)\n\n # No valid config file found\n print \"ERROR: No valid config file found in any of the following locations:\"\n for configfile in configfiles:\n print \" - %s\" % configfile\n sys.exit(1)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def JconfPath(self):\n return self.GetSurface(\"JconfDict\").GetPath(self.GetSurface(\"JconfSelection\"))", "def get_workbook_path():\n working_dir = get_working_dir()\n if 'config.txt' not in os.listdir(working_dir):\n create_config_file()\n create_progress_workbook()\n\n with open(working_dir + os.sep + 'config.txt') as config_file:\n workbook_path = config_file.read().strip()\n return workbook_path", "def get_conf_filename (self, directory):\n return os.path.join(directory, \"_%s_configdata.py\" % self.get_name())", "def path_config_docker(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_EXT)", "def get_config_path(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetConfigPath', self.handle)", "def _get_dicom_file_path_from_config_file():\n conf_file = _get_config_file()\n\n parser = configparser.SafeConfigParser()\n\n if os.path.exists(conf_file):\n parser.read(conf_file)\n \n try:\n return parser.get(section='dicom', option='path')\n except (configparser.NoSectionError,\n configparser.NoOptionError):\n msg = (\"Could not find `dicom` configuration section or \"\n \" `path` configuration option under that section.\"\n \"A template config file will be written to {}.\")\n warnings.warn(msg.format(conf_file))\n\n parser.add_section('dicom')\n parser.set('dicom', 'path', '')\n\n with open(conf_file, 'w') as f:\n parser.write(f)\n\n return parser.get(section='dicom', option='path')", "def find_config_file(filename):\n if os.path.exists( filename) :\n return filename\n if os.path.exists( filename + \".cfg\") :\n return filename + \".cfg\"\n\n # Search in script folder\n progname = sys.argv[0]\n basedir = os.path.dirname( progname)\n filename2 = os.path.join( basedir, filename)\n\n if os.path.exists( filename2) :\n return filename2\n if os.path.exists( filename2 + \".cfg\") :\n return filename2 + \".cfg\"\n\n # Otherwise, we are screwed\n raise IOError(\"cannot find configuration file\")", "def __setup_config_file_abspath():\n if \"APPDATA\" in os.environ:\n basedir = os.environ[\"APPDATA\"]\n elif \"HOME\" in os.environ:\n basedir = os.environ[\"HOME\"]\n else:\n raise AssertionError(\"APPDATA or HOME env vars must be defined \"\n \"to store config file\")\n abs_dir_path = os.path.join(\n basedir, TestManager.APPDATA_SUBDIRECTORY_NAME)\n os.makedirs(abs_dir_path, exist_ok=True, mode=0o660)\n return os.path.join(abs_dir_path, ConfigManager.CONFIG_FILE_NAME)", "def _get_config_file(filepath=None):\n if filepath:\n if os.path.isfile(filepath):\n return filepath\n raise ConfigFileNotFound(filepath)\n\n _config_files = (_path / _config_filename for _path in _config_basepaths)\n for _filepath in _config_files:\n if os.path.isfile(_filepath):\n return _filepath\n raise ConfigFileNotFound(_config_files)", "def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path", "def configFilename(self):\n return self.name()+'.py'", "def _get_dev_conf_dir(self):\r\n is_ok, file_dir = (\r\n GlobalModule.EM_CONFIG.read_sys_common_conf(\r\n \"Cgwsh_device_dir_path\"))\r\n if not is_ok:\r\n raise IOError(\"Failed to get Config : Cgwsh_device_dir_path\")\r\n return file_dir", "def module_path(self):\n return self.config['cwd']", "def get_http_config_file_path(node_uuid):\n return os.path.join(get_http_boot_dir(), node_uuid, 'config')", "def _get_config_filename():\n return 'pylidc.conf' if sys.platform.startswith('win') else '.pylidcrc'", "def filename(self):\n return f'{self._peer.interface}.conf'", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def get_ansible_config(self): # type: () -> str\n ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command)\n ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path)\n\n if not os.path.exists(ansible_config_path):\n # use the default empty configuration unless one has been provided\n ansible_config_path = super().get_ansible_config()\n\n return ansible_config_path", "def get_base_config(eva_installation_dir: Path) -> Path:\n # if eva package is installed into environment\n if importlib_resources.is_resource(\"eva\", EVA_CONFIG_FILE):\n with importlib_resources.path(\"eva\", EVA_CONFIG_FILE) as yml_path:\n return yml_path\n else:\n # For local dev environments without package installed\n return eva_installation_dir / EVA_CONFIG_FILE", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def config_path_fixture(fixtures_dir: Path) -> Path:\n _file_path = fixtures_dir / \"config.json\"\n return _file_path", "def get_global_config_path():\n\n return \"/etc/dapsenv/dapsenv.conf\"", "def get_config(_config_file):\n ''' script absolute location '''\n abs_path = os.path.dirname(inspect.getfile(inspect.currentframe()))\n\n if _config_file[0] not in ('/', '~'):\n if os.path.isfile(os.path.join(abs_path, _config_file)):\n config_path = os.path.join(abs_path, _config_file)\n else:\n raise IOError('Failed to find config file')\n else:\n if os.path.isfile(_config_file):\n config_path = _config_file\n else:\n raise IOError('Failed to find config file')\n\n with open(config_path) as cjson:\n config_data = json.load(cjson)\n # config must not be empty:\n if len(config_data) > 0:\n return config_data\n else:\n raise Exception('Failed to load config file')", "def _resolve_conf_name(conf_type):\n fnm = config_names[conf_type]\n if os.path.isfile(fnm):\n return fnm\n elif os.path.isfile(config_path + fnm):\n return config_path + fnm\n else:\n raise RuntimeError(\"Missing configuration file for %s\" % conf_type)", "def idk_config_dir(self):\n return self.cm.config_dir", "def _get_egg_path(self):\n try:\n _dist = get_distribution('janitoo_nut')\n return _dist.__file__\n except AttributeError:\n return 'src-nut/config'", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def get_production_config_dir_path(path: pathlib.Path) -> pathlib.Path:\n return path / \"shot-builder\"", "def config_data_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / DATA_CONFIG" ]
[ "0.84382236", "0.83876586", "0.83710814", "0.8215269", "0.8205837", "0.8173699", "0.8055139", "0.7999764", "0.79920894", "0.792743", "0.78715116", "0.78524226", "0.7781477", "0.7710906", "0.76966274", "0.76633596", "0.7605815", "0.76037824", "0.75543994", "0.75123537", "0.7464987", "0.74349874", "0.7414439", "0.7381314", "0.73575115", "0.735409", "0.7353932", "0.73494905", "0.73117155", "0.7300581", "0.7290725", "0.7239561", "0.7233185", "0.7214484", "0.7205068", "0.7187725", "0.71733767", "0.71677816", "0.7145223", "0.7139865", "0.7080458", "0.7062772", "0.70517", "0.7042371", "0.70366216", "0.70250696", "0.70072037", "0.697195", "0.69615585", "0.693377", "0.69280374", "0.69267416", "0.6923341", "0.6912865", "0.6867793", "0.68625355", "0.68487674", "0.68388104", "0.6834452", "0.6828855", "0.6825334", "0.679788", "0.6796681", "0.6786315", "0.6780309", "0.67609173", "0.6760332", "0.6756187", "0.6754131", "0.6719338", "0.6719122", "0.6710602", "0.6709966", "0.67039376", "0.6690319", "0.6689214", "0.66858464", "0.6681639", "0.6666894", "0.66665417", "0.66627306", "0.6657581", "0.66567886", "0.66520864", "0.6642915", "0.66316324", "0.66286844", "0.66202587", "0.6591957", "0.6590685", "0.65891963", "0.6587233", "0.6585216", "0.65738606", "0.6573638", "0.65511346", "0.6547635", "0.65453184", "0.6533357", "0.6527665" ]
0.7623583
16
Configures logging logging_config.json should have been placed in the directory AUTOMINE_LOG_DIR, to which this process must have read and write access
def _configure_logger(): try: log_dir = os.environ['AUTOMINE_LOG_DIR'] log_name = _log_name() cfg_path = os.path.join(log_dir, 'logging_config.json') with open(cfg_path) as src: cfg = json.load(src) handlers = cfg.get('handlers') for handler in iter(handlers.values()): filename = handler.get('filename') if filename: filename = filename.replace('{{AUTOMINE_LOG_DIR}}', log_dir) filename = filename.replace('{{__name__}}', log_name) handler['filename'] = filename loggers = cfg.get('loggers') if '__name__' in loggers: loggers[log_name] = loggers.pop('__name__') # add logging to the console if env var is set log_to_console = 'AUTOMINE_LOG_TO_CONSOLE' in os.environ if log_to_console and 'console' in handlers: logger_handlers = loggers[log_name].get('handlers') if logger_handlers: logger_handlers.append('console') dictConfig(cfg) except Exception as err: # pylint: disable=broad-except logging.basicConfig() raise err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logging():\n name_json = 'logging_config.json'\n path_json = os.path.join(os.path.dirname(__file__), name_json)\n with open(path_json, 'r') as f_json:\n dict_config = json.load(f_json)\n logging.config.dictConfig(dict_config)", "def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO):\n log_config = Path(log_config)\n if log_config.is_file():\n config = read_json(log_config)\n # modify logging paths based on run config\n for _, handler in config['handlers'].items():\n if 'filename' in handler:\n handler['filename'] = str(save_dir / handler['filename'])\n\n logging.config.dictConfig(config)\n else:\n print(\"Warning: logging configuration file is not found in {}.\".format(log_config), file=sys.stderr)\n logging.basicConfig(level=default_level)", "def initialize_logging(self):\n logging_config_path = self.pyleus_config.get('logging_config_path')\n if logging_config_path:\n logging.config.fileConfig(logging_config_path)\n elif os.path.isfile(DEFAULT_LOGGING_CONFIG_PATH):\n logging.config.fileConfig(DEFAULT_LOGGING_CONFIG_PATH)", "def _configure_logging(self):\n pass", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def start_logging(self):\n text = _DEFAULT_LOG_CONFIG\n path = self.bindings.get('LOG_CONFIG', None)\n if path:\n try:\n with open(path, 'r') as f:\n text = f.read()\n except Exception as ex:\n print 'ERROR reading LOGGING_CONFIG from {0}: {1}'.format(path, ex)\n raise\n config = ast.literal_eval(args_util.replace(text, self.bindings))\n logging.config.dictConfig(config)\n log_path = os.path.join(\n self.bindings['LOG_DIR'], self.bindings['LOG_FILEBASE'] + '.log')\n os.chmod(log_path, 0600)\n\n self.__journal = global_journal.get_global_journal()\n if self.__journal is None:\n # force start\n journal_path = os.path.join(\n self.bindings['LOG_DIR'],\n self.bindings['LOG_FILEBASE'] + '.journal')\n self.__journal = global_journal.new_global_journal_with_path(journal_path)", "def setup_logging(log_dir: Optional[str] = None) -> None:\n config: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\"console\": {\"format\": \"%(asctime)s:\\t%(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n LOG_NAME: {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": False}\n },\n }\n if log_dir is not None:\n config[\"loggers\"][LOG_NAME][\"handlers\"].append(\"file\")\n config[\"formatters\"][\"file\"] = {\n \"format\": \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n }\n config[\"handlers\"][\"file\"] = {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"file\",\n \"filename\": os.path.join(log_dir, LOG_NAME + \".log\"),\n \"maxBytes\": 1000000,\n \"backupCount\": 3,\n }\n logging.config.dictConfig(config)", "def setup_root_logger(loglevel=logging.DEBUG, logdir=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Logs'),\n log_config_file=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Utils', 'cent_logger.json')):\n try:\n\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n if log_config_file is not None and os.path.exists(log_config_file):\n with open(log_config_file, 'rt') as logconf:\n config = json.load(logconf)\n # create absolute path for logfile\n config['handlers']['file_handler']['filename'] = logdir + '/' + config['handlers']['file_handler']['filename']\n config['handlers']['longterm']['filename'] = logdir + '/' + config['handlers']['longterm']['filename']\n config['handlers']['single_run']['filename'] = logdir + '/' + config['handlers']['single_run']['filename']\n root_logger = logging.getLogger(\"framework\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the framework logger\")\n root_logger.info(\"Configured basic root logger from: {}\".format(log_config_file))\n test_logger = logging.getLogger(\"tests\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the tests logger\")\n test_logger.info(\"Configured basic tests logger from: {}\".format(log_config_file))\n\n # disable logs from below external modules\n for disabled_module in config['disable_module_logs']:\n root_logger.debug('Disabled logging for module: {}'.format(disabled_module))\n logging.getLogger(disabled_module).disabled = True\n\n except Exception as e:\n print(\"Error configuring logger: {}\".format(e), file=sys.stderr)\n raise e#", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def configLogging():\n # define a basic logger to write to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='/tmp/execute_pomset.log',\n filemode='w')\n\n # end def configureLogging\n pass", "def init():\n global logger\n\n with open(\"/app/log.json\", \"r\") as fd:\n logging.config.dictConfig(json.load(fd))\n\n logger = logging.getLogger()", "def setup_logging(\n default_conf_path='logging.json', \n default_level=logging.INFO,\n env_key='LOG_CFG',\n logging_path=None\n):\n path_found = False\n path = default_conf_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n print('Found logging configuration file at ' + default_conf_path + '\\n')\n with open(path, 'rt') as f:\n config = json.load(f)\n\n if logging_path and 'handlers' in config:\n logging_path = os.path.abspath(logging_path)\n print('Writing log at ' + logging_path + '\\n')\n mkdir_p(os.path.abspath(os.path.dirname(logging_path)))\n for key, value in config['handlers'].iteritems():\n if 'filename' in value:\n value['filename'] = logging_path\n path_found = True\n\n logging.config.dictConfig(config)\n else:\n print('Could not find logging configuration at '+ default_conf_path + '\\n')\n print('Using default logging option on console' + '\\n')\n logging.basicConfig(level=default_level)\n\n logging.captureWarnings(capture=True)\n return path_found", "def pytest_logger_logsdir(self, config):", "def init_config() -> None:\n config_file = importlib.resources.files(\"houdini_toolbox.logging\").joinpath(\n \"config.json\"\n )\n\n with config_file.open(encoding=\"UTF-8\") as handle:\n config = json.load(handle)\n logging.config.dictConfig(config)", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def configure_logging():\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760,backupCount=300, encoding='utf-8')\n # file_handler.setLevel(logging.INFO)\n\n if len(logging.getLogger().handlers) > 0:\n for h in logging.getLogger().handlers:\n if isinstance(h, logging.StreamHandler):\n # Then we found a logger to the terminal\n h.setLevel(logging.DEBUG)\n h.setFormatter(default_formatter)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(default_formatter)\n logging.root.addHandler(console_handler)\n\n\n logging.root.setLevel(logging.WARNING)", "def test_logging_config(self):\n topdir = os.path.dirname(os.path.dirname(__file__))\n # logging config from default\n os.system('rm %s/logging.conf' % topdir)\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)\n # logging config from file\n os.system('cp %s/logging.conf.sample %s/logging.conf' %\n (topdir, topdir))\n cmd, output = runCmdOutput(['-p', '7788'])\n self.assertEqual(cmd.returncode, os.EX_OK)", "def _initialize_log_file(config):\n for settings in config[\"handlers\"].values():\n if _is_file_handler(settings):\n log_path = Path(settings[\"filename\"])\n log_path.parent.mkdir(parents=True, exist_ok=True)\n log_path.touch(exist_ok=True)", "def _setup_logging(log_config: Path = LOG_CONFIG_FILE, silent: bool = False) -> None:\n\n if not log_config.is_file():\n raise RuntimeError(\n \"Logging file {log_file} not found\".format(log_file=log_config)\n )\n\n with log_config.open() as log_file:\n config_orig = yaml.safe_load(log_file.read()) # type: Any\n\n def prepare_filenames(config: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Prepend `LOGS_DIR` to all 'filename' attributes listed for handlers in logging.yaml\n :param config: Configuration dictionary\n :return: Configuration with 'filename's prepended with LOGS_DIR\n \"\"\"\n for handler_name in config[\"handlers\"].keys():\n handler_config = config[\"handlers\"][handler_name]\n if \"filename\" in handler_config:\n filename = Path(handler_config[\"filename\"]).name\n handler_config[\"filename\"] = str(LOGS_DIR.joinpath(filename))\n return config\n\n config = prepare_filenames(config_orig)\n # for some reason, pyright fails with \"'config' is not a known member of module\"\n # even though this is an officially documented member of logging\n # for now we ignore the type\n logging.config.dictConfig(config) # type: ignore\n if silent:\n _remove_non_file_handlers()", "def setup_logging():\n if not app.debug:\n if app.config.get('LOG_CFG'):\n # initialize the Flask logger (removes all handlers)\n _ = app.logger\n dictConfig(app.config.get('LOG_CFG'))\n else:\n # capability with previous config settings\n # Should have LOG_FILE and LOG_LEVEL set\n if app.config.get('LOG_FILE') is not None:\n handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100)\n else:\n handler = StreamHandler(stream=sys.stderr)\n\n handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(app.config.get('LOG_LEVEL', DEBUG))\n app.logger.addHandler(handler)", "def setup_logging(\n module,\n default_level=logging.INFO,\n env_key='LOG_CFG',\n logpath=os.getcwd(),\n config_path=None\n):\n\n if not os.path.exists(os.path.dirname(logpath)):\n os.makedirs(os.path.dirname(logpath))\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")\n fpath = os.path.join(logpath, module, timestamp)\n\n path = config_path if config_path is not None else os.getenv(env_key, None)\n if path is not None and os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n for h in config['handlers'].values():\n if h['class'] == 'logging.FileHandler':\n h['filename'] = os.path.join(logpath, module, timestamp, h['filename'])\n touch(h['filename'])\n for f in config['filters'].values():\n if '()' in f:\n f['()'] = globals()[f['()']]\n logging.config.dictConfig(config)\n else:\n lpath=os.path.join(logpath, timestamp)\n if not os.path.exists(lpath):\n os.makedirs(lpath)\n logging.basicConfig(level=default_level, filename=os.path.join(lpath,\"base.log\"))", "def configure_logging(logdir=None):\n logconfig = LOGCONFIG_DICT.copy()\n if logdir:\n debugfile = os.path.join(logdir, DEBUGFILE)\n logconfig['handlers']['debugfile']['filename'] = debugfile\n errorfile = os.path.join(logdir, ERRORFILE)\n logconfig['handlers']['errorfile']['filename'] = errorfile\n\n logging.config.dictConfig(logconfig)", "def test_logging_config_file(self, monkeypatch):\n # We still want the Formatter to be configured.\n assert logging.Formatter.converter == time.gmtime\n assert logging.Formatter.default_time_format == '%Y-%m-%dT%H:%M:%S'\n assert logging.Formatter.default_msec_format == '%s.%03d'\n\n # Set NETDUMPLINGS_LOGGING_CONFIG to point to a test logging config.\n logging_config_file = 'tests/data/logging.json'\n monkeypatch.setenv('NETDUMPLINGS_LOGGING_CONFIG', logging_config_file)\n\n configure_logging()\n\n # The test config file sets all the loggers to ERROR.\n assert logging.getLogger('netdumplings').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplinghub').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingkitchen').level == logging.ERROR\n assert logging.getLogger(\n 'netdumplings.dumplingeater').level == logging.ERROR", "def _configure_logging(config):\n # Initialize exception logging to Sentry with client DSN URL from SENTRY_DSN envvar;\n # does nothing if SENTRY_DSN does not exist, is empty, or is not recognized by Sentry\n sentry_sdk.init()\n if \"publisher\" in config[\"logging\"]:\n # Publish log messages to distributed logging aggregator\n logging_config = config[\"logging\"][\"publisher\"]\n logging_config[\"handlers\"][\"zmq_pub\"][\"context\"] = context\n host = config[\"zmq\"][\"host\"]\n port = config[\"zmq\"][\"ports\"][\"logging\"][NAME]\n addr = f\"tcp://*:{port}\"\n logging_config[\"handlers\"][\"zmq_pub\"][\"interface_or_socket\"] = addr\n logging.config.dictConfig(logging_config)\n for handler in logger.root.handlers:\n if isinstance(handler, zmq.log.handlers.PUBHandler):\n handler.root_topic = NAME\n handler.formatters = {\n logging.DEBUG: logging.Formatter(\"%(message)s\\n\"),\n logging.INFO: logging.Formatter(\"%(message)s\\n\"),\n logging.WARNING: logging.Formatter(\"%(message)s\\n\"),\n logging.ERROR: logging.Formatter(\"%(message)s\\n\"),\n logging.CRITICAL: logging.Formatter(\"%(message)s\\n\"),\n }\n # Not sure why, but we need a brief pause before we start logging\n # messages\n time.sleep(0.25)\n msg = f\"publishing logging messages to {addr}\"\n else:\n # Write log messages to local file system\n #\n # Replace logging RotatingFileHandlers with WatchedFileHandlers so\n # that we notice when log files are rotated and switch to writing to\n # the new ones\n logging_config = config[\"logging\"]\n logging_handlers = logging_config[\"handlers\"]\n rotating_handler = \"logging.handlers.RotatingFileHandler\"\n watched_handler = \"logging.handlers.WatchedFileHandler\"\n for handler in logging_handlers:\n if logging_handlers[handler][\"class\"] == rotating_handler:\n logging_handlers[handler][\"class\"] = watched_handler\n del logging_handlers[handler][\"backupCount\"]\n logging.config.dictConfig(logging_config)\n msg = \"writing logging messages to local file system\"\n return msg", "def setup_logging(log_basedir=\"logs\"):\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n LOGDIR = os.path.join(BASEDIR,log_basedir)\n \n # Check if the logs directory exists and is writable\n if not os.path.isdir(LOGDIR):\n print('ERROR: Log directory {} does not exist.'.format(LOGDIR))\n sys.exit(1)\n if not os.access(LOGDIR, os.W_OK):\n print('ERROR: No permissions to write to log directory {}.'.format(LOGDIR))\n sys.exit(1)\n\n # Set the log message format\n fmt = '%(levelname)s - %(asctime)s.%(msecs).03d %(process)d [%(filename)s:%(lineno)d] %(message)s'\n datefmt = '%m%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt)\n\n # Log to console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(console_handler)\n\n # Log to file, use a rotating file\n file_name = os.path.join(LOGDIR, '{}.log'.format(\"flask_api_otrs\") )\n\n file_handler = logging.handlers.RotatingFileHandler(file_name, backupCount=7)\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)", "def configure_logging():\n configuration = get_configuration()\n logging.basicConfig(**configuration.get('logging', {}))\n\n logging.debug('Logging configured.')", "def setup_logging():\r\n import ConfigParser # change this to configparser for Python 3\r\n # import logging\r\n import logging.config\r\n global logger\r\n\r\n try:\r\n \tlogging.config.fileConfig(\"celog.conf\")\r\n except ConfigParser.NoSectionError: \r\n\t# if there is no configuration file setup a default configuration\r\n logging.basicConfig(filename='code_extract.log',level= _logging_level,\r\n\t\t\tformat='%(asctime)s %(levelname)s - %(message)s',\r\n\t\t\tdatefmt='%Y %b %d, %a %H:%M:%S'\r\n\t\t\t)\r\n \r\n logger = logging.getLogger('%s' % __name__)\r\n\r\n logger.debug('logger ready')", "def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)", "def setup_logger(config):\n filename = config[\"LOGGER_FILE\"]\n log_dir = '/'.join(filename.split('/')[0:-1]) + \"/\"\n\n check_and_create_directory(log_dir)\n\n level = config[\"LOGGER_LOGLEVEL\"].upper()\n filemode = 'a'\n _format = '%(asctime)s %(name)8s %(module)15s %(funcName)12s %(' \\\n 'levelname)7s: %(message)s'\n _dateformat = '(%d.%m.%Y, %H:%M:%S)'\n\n logging.basicConfig(filename=filename, filemode=filemode, level=level,\n format=_format, datefmt=_dateformat)\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"werkzeug\").setLevel(logging.WARNING)\n\n # Display log simultaneously on console\n if config[\"CONSOLE_LOGGING\"]:\n add_terminal_logging(_format, level)", "def setup_logging_with_config(config: DynaBox):\n global logger\n logger = setup_logging_threatbus(config, logger_name)", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def configure():\n # TODO: Simple configuration of what to log and where to log it to\n level_name = getenv(\"LOGLEVEL\", \"INFO\")\n level = getattr(logging, level_name)\n logging.basicConfig(stream=sys.stdout, filemode=\"w\", level=level)\n\n for handler in logging.root.handlers:\n handler.addFilter(Whitelist(\"mara\", \"tests\"))", "def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)", "def get_log_config(conf_file: str):\n with open(conf_file, 'r') as c:\n config = json.load(c)\n if not os.path.exists('log'):\n os.mkdir('log')\n logging.config.dictConfig(config)\n # disable urllib3 DEBUG messages\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)", "def config():\n global base_dir, log_path\n\n # Set paths\n base_dir = os.path.dirname(os.path.realpath(__file__))\n cfg.path = base_dir + '/config.json'\n log_path = base_dir + '/log.log'\n\n # Start logging\n logging.basicConfig(filename=log_path, format='%(asctime)-16s | %(levelname)-5s | %(message)s', level=logging.DEBUG)\n sys.excepthook = _excepthook\n\n # Load configuration\n cfg.load()\n logging.info('Loaded configuration')\n\n # Print configuration and check if is complete\n cfg.print()\n if not cfg.check:\n logging.info('Exiting...')\n sys.exit(1)", "def _setup_logging(self):\n global log\n\n # Parse the ini file to validate it\n parser = ConfigParser.ConfigParser()\n parser.read(self.ini_file)\n\n # Check for the presence of [loggers] in self.ini_file\n if not parser.has_section('loggers'):\n self._fail('Config file does not have [loggers] section', use_log=False)\n\n logging.config.fileConfig(self.ini_file)\n\n # Use \"name.pid\" to avoid importer confusions in the logs\n logger_name = 'debexpo.importer.%s' % os.getpid()\n log = logging.getLogger(logger_name)", "def configure_logging(config, disable_existing=False):\n logging_config = config.getpath('app.logging', None)\n if logging_config is not None:\n logging.config.fileConfig(\n logging_config, disable_existing_loggers=disable_existing)", "def _config_log(self):\n config_worker = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'handlers': {\n 'queue': {\n 'class': 'hqc_meas.utils.log.tools.QueueHandler',\n 'queue': self.log_queue,\n },\n },\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['queue']\n },\n }\n logging.config.dictConfig(config_worker)", "def config():\n\n # Remove all log files from the assets folder.\n for log_file in get_log_files(TESTS_ASSETS_VISION_DIR):\n os.remove(log_file)\n\n # Reconfigure the logger to use a separate folder (instead of the real logs)\n Log.reconfigure(log_directory=TESTS_ASSETS_VISION_DIR)", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def set_config(self, file_path_name):\n level = logging.DEBUG\n format = '%(asctime)s %(levelname)-8s %(message)s' \n datefmt = '%a, %d %b %Y %H:%M:%S'\n filemode = 'a'\n \n\n logging.basicConfig(level = level,\n format = format,\n datefmt = datefmt,\n filename = file_path_name,\n filemode = filemode)", "def setup_logger(app_name, log_directory, log_level):\n # Setting up logger\n # log_levels: NOTSET=0, DEBUG=10, INFO=20, WARN=30, ERROR=40, and CRITICAL=50\n # TODO - on linux we want /var/log ... error on MacOs ... protected directory\n # log_file_name = Path('/var/log/{}.log'.format(app_name))\n log_file_name = Path('{}/{}.log'.format(log_directory, app_name))\n\n short_file_format = \"%(asctime)s:%(levelname)s:%(message)s\"\n long_file_format = \"%(asctime)s %(HOST)s %(AppId)d %(AppVersion)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s %(uid)\"\n long_file_format = \"%(asctime)s %(levelname)s %(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s\"\n # long_file_format = \"%(asctime)s:%(levelname)s%(name)s %(message)s %(filename)s %(funcName)s %(levelname)s %(lineno)d %(message)s %(module)s %(msecs)d %(name)s %(pathname)s %(process)d %(processName)s %(relativeCreated)d %(thread)d %(threadName)s\"\n log_file_format = short_file_format\n\n # make sure valid log level is passed in, default to DEBUG ...\n valid_log_levels = [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR, logging.CRITICAL]\n if log_level not in valid_log_levels:\n log_level = logging.DEBUG\n\n extra_attributes = {'Host': '10.0.0.1',\n 'AppId': 1024,\n 'AppVersion': '1.0.0',\n 'uid': 12345}\n logger = logging.getLogger()\n logging.LoggerAdapter(logger, extra_attributes)\n\n # add in our custom UTC timezone converter\n logging.Formatter.converter = time_tz\n logging.basicConfig(level=log_level, filename=log_file_name, filemode=\"a\",\n format=log_file_format)\n\n # configure stdout same as file\n sh = logging.StreamHandler(sys.stdout)\n sh.setFormatter(logging.Formatter(log_file_format))\n logging.getLogger().addHandler(sh)\n\n logging.info('App:{} startup'.format(app_name))\n return", "def init_logging(input_dir, file_name):\n create_dir(input_dir)\n config(file_name, log_level=logging.DEBUG)", "def setup_logging( cfg ):\n global _LOGGING_FORMAT_, _DATE_FORMAT_\n format,date = _LOGGING_FORMAT_,_DATE_FORMAT_\n \n if not cfg.get('logging', True):\n logging.basicConfig(handler=logging.NullHandler)\n return\n \n #check passed in cfgs if formats changed\n if cfg.get('log_format', False):\n format = cfg.get('log_format')\n if cfg.get('log_date_format',False):\n date = cfg.get('log_date_format')\n \n if cfg.get('log_debug', False):\n logging.basicConfig(level=logging.DEBUG,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path', 'errors.log'))\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n logging.getLogger().addHandler(console)\n \n elif cfg.get('log_warnings', False):\n logging.basicConfig(level=logging.WARNING,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))\n \n else:# Errors are always logged. deal.\n logging.basicConfig(level=logging.ERROR,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))", "def configure(config_file: str):\n logging.config.fileConfig(fname=config_file)", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def setup_logging(config: Any) -> Logger:\n green = \"\\033[32m\"\n reset = \"\\033[0m\"\n logger = setup_logger(\n name=f\"{green}[ignite]{reset}\",\n level=logging.DEBUG if config.debug else logging.INFO,\n format=\"%(name)s: %(message)s\",\n filepath=config.output_dir / \"training-info.log\",\n )\n return logger", "def configure_logging():\n class TimeFormatter(logging.Formatter):\n def formatTime(self, record, datefmt=None):\n datefmt = datefmt or '%Y-%m-%d %H:%M:%S'\n return time.strftime(datefmt, time.localtime(record.created))\n\n class SeverityFilter(logging.Filter):\n def filter(self, record):\n record.severity = record.levelname[0]\n return True\n\n if not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n log_file = logging.handlers.RotatingFileHandler(LOG_FILE, backupCount=100)\n log_file.addFilter(SeverityFilter())\n log_file.setFormatter(TimeFormatter('%(asctime)s %(severity)s: %(message)s'))\n logger.addHandler(log_file)\n\n # Log all uncaught exceptions.\n def log_exception(exception_type, value, stack_trace):\n logging.error(\n ''.join(traceback.format_exception(exception_type, value, stack_trace)),\n )\n sys.excepthook = log_exception\n\n # Rotate log files once on startup to get per-execution log files.\n if os.path.exists(LOG_FILE):\n log_file.doRollover()", "def _configure_logging(self):\n self.log_level = Scaffold.LOG_LEVEL_MAP.get(self.log_level, ERROR)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # assign the windmill instance logger\n #logging.basicConfig()\n self.log = logging.getLogger(self.name)\n self.log.setLevel(self.log_level)\n\n if self.log_path:\n file_path = None\n if self.log_path.endswith('.log'):\n file_path = self.log_path\n else:\n file_path = os.path.join(self.log_path, self.name + '.log')\n assert file_path\n file_handler = logging.FileHandler(file_path)\n file_handler.setLevel(self.log_level)\n file_handler.setFormatter(formatter)\n self.log.addHandler(file_handler)\n\n # if we are in verbose mode, then we send log output to console\n if self.verbose:\n # add the console logger for verbose mode\n console_handler = logging.StreamHandler()\n console_handler.setLevel(self.log_level)\n console_handler.setFormatter(formatter)\n self.log.addHandler(console_handler)\n\n self.log.info('Logging configured for: %s', self.name)", "def configure_logging(config):\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"path\"]:\n logfile_path = os.path.expanduser(config[\"path\"])\n else:\n logfile_path = config[\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n log_level = get_logging_level(config.get(\"level\", \"info\"))\n rootlogger.setLevel(log_level)\n formatter_str = set_formatter_string(config)\n formatter = logging.Formatter(formatter_str)\n handler = None\n\n if config.get(\"rich\") is not False:\n handler = RichHandler(\n rich_tracebacks=True,\n show_time=config.get(\"timestamp\", True),\n show_path=config.get(\"extended\", True),\n )\n\n if logfile_path:\n file_handler = RotatingFileHandler(\n logfile_path, maxBytes=config.get(\"file-size\", 50e6)\n )\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n # If we are running in a non-interactive shell (without a tty)\n # then use simple logging instead of rich logging\n # Config value always overrides\n running_in_non_interactive_shell = False\n console = config.get(\"test_logging_console\", sys.stderr)\n if config.get(\"console\") is True:\n handler = logging.StreamHandler(stream=console)\n handler.setFormatter(formatter)\n else:\n if config.get(\"console\") is None and not console.isatty():\n running_in_non_interactive_shell = True\n handler = logging.StreamHandler(stream=console)\n handler.setFormatter(formatter)\n\n # If we still don't have the handler, we are assuming that\n # the user wants to switch off logging, let's log only\n # Critical errors\n if not handler:\n handler = logging.StreamHandler(stream=console)\n handler.setFormatter(formatter)\n log_level = get_logging_level(\"critical\")\n\n if config.get(\"filter\") and handler:\n handler.addFilter(ParsingFilter(config, config[\"filter\"]))\n if handler:\n handler.setLevel(log_level)\n rootlogger.addHandler(handler)\n\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"Started opsdroid %s.\"), __version__)\n if running_in_non_interactive_shell:\n _LOGGER.warning(\n \"Running in non-interactive shell - falling back to simple logging. You can override this using 'logging.config: false'\"\n )", "def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')", "def setup_logging(loglevel: str):\n dictConfig(\n {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"standard\": {\n \"format\": \"%(asctime)s [%(levelname)s] %(name)s: %(message)s\"\n }\n },\n \"handlers\": {\n \"default\": {\n \"level\": loglevel,\n \"formatter\": \"standard\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n \"\": {\"handlers\": [\"default\"], \"level\": loglevel, \"propagate\": True}\n },\n }\n )", "def setup():\n config['global']['log.access_file'] = ''\n config['global']['log.error_file'] = ''\n config['global']['log.screen'] = False\n log_level = getattr(logging, config.log_level)\n logging.root.setLevel(logging.NOTSET)\n file_log.setLevel(log_level)\n logging.root.addHandler(file_log)\n if config.log_screen:\n console_log.setLevel(log_level)\n logging.root.addHandler(console_log)", "def configure_logging():\n\n level = logging.INFO\n logging.getLogger().setLevel(level)\n logging.basicConfig(\n level=level,\n format=(\n \"[%(asctime)s][%(levelname)s][%(filename)s:%(lineno)d]\"\n + \"[%(processName)s] %(message)s\"\n ),\n )", "def __init__(\n self, path=\"logger.yml\", default_level=logging.INFO, env_key=\"LOG_CFG\"\n ):\n\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(os.path.normpath(path)):\n with open(path, \"rt\") as f:\n config = yaml.safe_load(f.read())\n to_log = \"\"\n # If directory is non existent create it\n # Todo: Here a dir will be made after installation, so if this prohibited go to the other dir\n if \"file\" in config[\"handlers\"]:\n pathtologfile = os.path.normpath(config[\"handlers\"][\"file\"][\"filename\"]).split(os.sep)\n if not os.path.isdir(\n os.path.join(os.getcwd(), *pathtologfile[:-1])\n ):\n os.mkdir(os.path.join(os.getcwd(), *pathtologfile[:-1]))\n else:\n to_log = (\n \"Logging to file failed, since no file handler was defined!\"\n )\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n self.log_LEVELS = {\n \"NOTSET\": 0,\n \"DEBUG\": 10,\n \"INFO\": 20,\n \"WARNING\": 30,\n \"ERROR\": 40,\n \"CRITICAL\": 50,\n }\n\n self.welcome_string = (\n \"\\n\"\n \" __ ______ ______ ______ __ __ ______ \\n\" \n \" /\\ \\ /\\ __ \\ /\\ ___\\ /\\ ___\\ /\\ \\ /\\ \\ /\\ ___\\ \\n\" \n \" \\ \\ \\____ \\ \\ \\/\\ \\ \\ \\ \\__ \\ \\ \\ __\\ \\ \\ \\ \\ \\ \\____ \\ \\ __\\ \\n\" \n \" \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_____\\ \\ \\_____\\ \\n\" \n \" \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_____/ \\/_____/\\n\\n\\n\"\n )\n\n snoopy = (\"\\n\\n\\n XXXX\\n\"\n \" X XX\\n\"\n \" X *** X XXXXX\\n\"\n \" X ***** X XXX XX\\n\"\n \" XXXX ******* XXX XXXX XX\\n\"\n \" XX X ****** XXXXXXXXX XX XXX\\n\"\n \" XX X **** X X** X\\n\"\n\" X XX XX X X***X\\n\"\n\" X //XXXX X XXXX\\n\"\n\" X // X XX\\n\"\n\"X // X XXXXXXXXXXXXXXXXXX/ \\n\"\n\"X XXX// X X\\n\"\n\"X X X X X\\n\"\n\"X X X X X\\n\"\n\" X X X X X XX\\n\"\n\" X X X X X XXX XX\\n\"\n\" X XXX X X X X X X\\n\"\n\" X X X XX X XXXX\\n\"\n\" X X XXXXXXXX/ XX XX X\\n\"\n\" XX XX X X X XX\\n\"\n\" XX XXXX XXXXXX/ X XXXX\\n\"\n\" XXX XX*** X X\\n\"\n\" XXXXXXXXXXXXX * * X X\\n\"\n\" *---* X X X\\n\"\n\" *-* * XXX X X\\n\"\n\" *- * XXX X\\n\"\n\" *- *X XXX\\n\"\n\" *- *X X XXX\\n\"\n\" *- *X X XX\\n\"\n\" *- *XX X X\\n\"\n\" * *X* X X X\\n\"\n\" * *X * X X X\\n\"\n\" * * X** X XXXX X\\n\"\n\" * * X** XX X X\\n\"\n\" * ** X** X XX X\\n\"\n\" * ** X* XXX X X\\n\"\n\" * ** XX XXXX XXX\\n\"\n\" * * * XXXX X X\\n\"\n\" * * * X X X\\n\"\n\" >>>>>>>******* * * X X XXXXXXXX/ \\n\"\n\" * * * /XXXXX XXXXXXXX/ <\\n\"\n\" >>>>>********** * X < / <\\n\"\n\" >>>>* * X / / <XXXXX\\n\"\n\">>>>>>>>>********** XXXXXXXXXXXXXXXXXXXXXX\\n\")\n\n # Create a logger Object\n self.LOG = logging.getLogger(\"Logfile\")\n # Print welcome message\n self.LOG.info(self.welcome_string)\n self.LOG.debug(snoopy)\n if to_log:\n self.LOG.info(to_log)", "def _setup_logging(config):\n if config.debug:\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(message)s\", level=logging.DEBUG\n )\n else:\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(message)s\", level=logging.INFO\n )", "def setup_logging():\n log.setup('keystone')", "def setup_logging(filename):\n try:\n LOG_PATH.mkdir(parents=True, exist_ok=True)\n LOG_CONFIG['handlers']['file_handler']['filename'] = LOG_PATH / filename\n logging.config.dictConfig(LOG_CONFIG)\n except OSError:\n logging.basicConfig(level=logging.ERROR)\n logging.exception('Could not initialize logging to file')\n\n sys.excepthook = log_uncaught_exceptions", "def setup_logs(arg_log_dir, log_level='debug'):\n assert log_level.lower() in ('debug', 'info', 'warning', 'error', 'critical')\n global logger\n cl_logger = log.LogManager(app_name=APP_NAME,\n log_name=__name__,\n log_dir=arg_log_dir)\n logger = cl_logger.logger\n logger.setLevel(log_level.upper())", "def set_config(config):\n global _config\n logging.config.dictConfig(config)\n _configure_ulog_bridge()\n _config = config", "def _configure_logging(self):\n logger = logging.getLogger('BatchAppsBlender')\n\n console_format = logging.Formatter(\n \"BatchApps: [%(levelname)s] %(message)s\")\n\n file_format = logging.Formatter(\n \"%(asctime)-15s [%(levelname)s] %(module)s: %(message)s\")\n\n console_logging = logging.StreamHandler()\n console_logging.setFormatter(console_format)\n logger.addHandler(console_logging)\n\n logfile = os.path.join(self.props.data_dir, \"batch_apps.log\")\n\n file_logging = logging.FileHandler(logfile)\n file_logging.setFormatter(file_format)\n logger.addHandler(file_logging)\n\n logger.setLevel(int(self.props.log_level))\n return logger", "def test_root_logger_config(self):\n with debug_env:\n logging.config.dictConfig(django12factor.factorise()[\"LOGGING\"])\n self.assertTrue(has_handler(logging.root, \"stdout\"))", "def _set_output_file(self):\n dictConfig(self.DEFAULT_LOGGING)", "def _initialize_logging(self):\n if self._custom_logger:\n self._logger.debug(\"Skipping logging init: custom logger detected\")\n return\n\n try:\n log_config = self._ez_client.get_logging_config(\n local=bool(self._config.runner_id)\n )\n except Exception as ex:\n self._logger.warning(\n \"Unable to retrieve logging configuration from Beergarden, the default \"\n \"configuration will be used instead. Caused by: {0}\".format(ex)\n )\n return\n\n try:\n configure_logging(\n log_config,\n namespace=self._system.namespace,\n system_name=self._system.name,\n system_version=self._system.version,\n instance_name=self._config.instance_name,\n )\n except Exception as ex:\n # Reset to default config as logging can be seriously wrong now\n logging.config.dictConfig(default_config(level=self._config.log_level))\n\n self._logger.exception(\n \"Error encountered during logging configuration. This most likely \"\n \"indicates an issue with the Beergarden server plugin logging \"\n \"configuration. The default configuration will be used instead. Caused \"\n \"by: {0}\".format(ex)\n )\n return\n\n # Finally, log uncaught exceptions using the configuration instead of stderr\n self._set_exception_hook(self._logger)", "def __init_logging(self):\n\n logger = logging.getLogger('__name__')\n if os.path.exists(constants.LOG_FILE):\n logger.setLevel(logging.DEBUG)\n logger_file_handler = logging.FileHandler(constants.LOG_FILE)\n logger_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n logger_file_handler.setFormatter(logger_formatter)\n logger.addHandler(logger_file_handler)\n else:\n logger.disabled = True", "def log_settings(config):\n LOGGER.propagate = False\n formatter = ViseronLogFormat(config.logging)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.addFilter(DuplicateFilter())\n LOGGER.addHandler(handler)\n\n LOGGER.setLevel(LOG_LEVELS[config.logging.level])\n logging.getLogger(\"apscheduler.scheduler\").setLevel(logging.ERROR)\n logging.getLogger(\"apscheduler.executors\").setLevel(logging.ERROR)", "def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)", "def _begin_logging(self):\n logconf.set_up_root_logger(self.opts.logfile)", "def _setup_file_logger(self):\n if self._file_log_handler is not None:\n raise RuntimeError(\"{}: File logger already exists\".format(self))\n\n # Note that in unit test driver's runpath might not be set\n if self.cfg.file_logger and self.runpath is not None:\n formatter = logging.Formatter(\n \"%(asctime)s %(levelname)s %(message)s\"\n )\n self._file_log_handler = logging.FileHandler(\n os.path.join(self.runpath, self.cfg.file_logger)\n )\n self._file_log_handler.setFormatter(formatter)\n self.logger.addHandler(self._file_log_handler)\n self.logger.propagate = False # No console logs", "def configure_logging(app):\n\n #if app.debug or app.testing:\n # Skip debug and test mode. Just check standard output.\n #return\n\n import logging.handlers\n import logging\n\n app.logger.setLevel(app.config['LOG_LEVEL'])\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s [in view %(pathname)s:%(lineno)d]:\\n%(message)s',\n '%d/%m/%Y %H:%M:%S')\n\n info_log = os.path.join(app.config['LOG_FOLDER'], 'info.log')\n info_file_handler = logging.handlers.RotatingFileHandler(info_log, maxBytes=100000, backupCount=10)\n info_file_handler.setLevel(logging.INFO)\n info_file_handler.setFormatter(formatter)\n\n error_log = os.path.join(app.config['LOG_FOLDER'], 'error.log')\n error_log = logging.handlers.RotatingFileHandler(error_log, maxBytes=100000, backupCount=10)\n error_log.setLevel(logging.ERROR)\n error_log.setFormatter(formatter)\n\n app.logger.addHandler(info_file_handler)\n app.logger.addHandler(error_log)\n\n # USAGE\n # from flask import current_app as ca\n # ca.logger.debug(pformat({'key': 'val'}))\n # ca.logger.info(pformat({'key': 'val'}))\n # ca.logger.warn('logger warn')\n # ca.logger.error('logger error')\n # ca.logger.fatal('logger fatal')", "def write_configs(logconf_dir):\n for name in list_logging_conf():\n conf = load_logging_conf(name)\n with io.open(os.path.join(logconf_dir, name), 'w') as f:\n f.write(json.dumps(conf))", "def configure(base_path):\n\n log_path = os.path.join(\n base_path,\n 'logs',\n )\n current_time = datetime.datetime.now().strftime(\"%d.%m.%Y %H:%M:%S\")\n\n log_fmt = '%(asctime)s [%(threadName)-12.12s] [%(levelname)-3.4s] %(message)s'\n\n logging.basicConfig(\n level=logging.INFO,\n format=log_fmt,\n handlers=[\n TimedRotatingFileHandler(\n filename=f\"{log_path}/analysis-service.({current_time}).log\",\n encoding='utf-8',\n when=\"d\"\n ),\n logging.StreamHandler()\n ]\n )", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def configure_logger():\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def setup_logging():\n formatter = logging.Formatter(LOG_FORMAT)\n level = logging.INFO\n\n file_handler = logging.FileHandler('db.log')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n console_handler.setLevel(level)\n\n logger = logging.getLogger()\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n logger.setLevel(level)", "def dallinger_housekeeper():\n from logging.config import fileConfig\n\n fileConfig(\n os.path.join(os.path.dirname(__file__), \"..\", \"logging.ini\"),\n disable_existing_loggers=False,\n )", "def config_logger( self, ):\r\n logger = logging.getLogger( self.logger_id )\r\n\r\n logger.handlers = []\r\n logger.setLevel( self.parameters.logging_level ) # DEBUG , INFO WARNING ERROR CRITICAL\r\n\r\n # create the logging file handler.....\r\n fh = logging.FileHandler( self.parameters.pylogging_fn )\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n fh.setFormatter( formatter )\r\n logger.addHandler( fh )\r\n\r\n msg = \"Done config_logger\"\r\n print( msg )\r\n logger.info( msg ) # .debug .info .warn .error\r\n AppGlobal.set_logger( logger )\r\n\r\n return logger", "def setup_logging():\n logging.basicConfig(\n filename=os.getenv(\"SERVICE_LOG\", \"server.log\"),\n level=logging.DEBUG,\n format=\"%(levelname)s: %(asctime)s pid:%(process)s module:%(module)s %(message)s\",\n datefmt=\"%d/%m/%y %H:%M:%S\",\n )", "def setupLogging(loglevel=logging.INFO):\n\n # The following configures two loggers, the root logger and a logger named \"phone_ctlr_log\". Messages sent to the\n # root logger will be sent to the system log using the syslog protocol, and messages to the \"phone_ctlr_log\" logger will\n # be written to the Phone_Agent.log file which will be rotated once the log reaches 1Mb.\n\n configdict = {\n 'version': 1, # Configuration schema in use; must be 1 for now\n #'disable_existing_loggers': True, # Disables all existing logging configurations\n\n 'formatters': {\n 'brief': {\n 'format' : '%(levelname)-8s %(asctime)s (%(created)s) %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'standard': {\n 'format' : '%(levelname)-8s %(asctime)s %(name)-15s %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'console': {\n 'format' : '%(levelname)-8s %(asctime)s -- %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'custom': {\n 'format' : '%(asctime)s - %(message)s',\n 'datefmt': '%Y-%m-%dT%H:%M:%S.%Z' } ### Ex,: 2038-01-01T05:05:02\n },\n\n 'handlers': {'applog': {'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '/opt/tools/phone_agent/Phone_Agent.log',\n #'filename': 'Phone_Agent.log',\n 'backupCount': 3,\n 'formatter': 'custom',\n 'level': 'INFO',\n 'maxBytes': 1024*1024},\n 'conlog': {'class': 'logging.StreamHandler',\n 'formatter': 'console',\n #'stream': 'console',\n 'level': 'DEBUG'},\n 'syslog': {'class': 'logging.handlers.SysLogHandler',\n 'formatter': 'standard',\n 'level': 'ERROR'}},\n\n # Specify all the subordinate loggers\n 'loggers': {\n 'phone_ctlr_log': {\n 'handlers': ['applog']\n },\n 'console_log': {\n 'handlers': ['conlog']\n }\n },\n # Specify properties of the root logger\n 'root': {\n 'handlers': ['syslog']\n },\n }\n\n # Set up configuration\n logging.config.dictConfig(configdict)", "def _setup_logging(self, config, channel):\r\n\r\n logfile = getattr(config, '%s_logfile' % channel)\r\n if not logfile:\r\n return\r\n\r\n maxbytes = getattr(config, '%s_logfile_maxbytes' % channel)\r\n backups = getattr(config, '%s_logfile_backups' % channel)\r\n fmt = '%(message)s'\r\n if logfile == 'syslog':\r\n warnings.warn(\"Specifying 'syslog' for filename is deprecated. \"\r\n \"Use %s_syslog instead.\" % channel, DeprecationWarning)\r\n fmt = ' '.join((config.name, fmt))\r\n self.mainlog = loggers.handle_file(\r\n config.options.getLogger(),\r\n filename=logfile,\r\n fmt=fmt,\r\n rotating=not not maxbytes, # optimization\r\n maxbytes=maxbytes,\r\n backups=backups)\r\n\r\n if getattr(config, '%s_syslog' % channel, False):\r\n fmt = config.name + ' %(message)s'\r\n loggers.handle_syslog(self.mainlog, fmt)", "def config_logger(log_level):\n try:\n logfile = os.path.expanduser(os.path.join(\"~\", \".parallelcluster\", \"awsbatch-cli.log\"))\n logdir = os.path.dirname(logfile)\n os.makedirs(logdir)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(logdir):\n pass\n else:\n fail(\"Cannot create log file (%s). Failed with exception: %s\" % (logfile, e))\n\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s\")\n\n logfile_handler = RotatingFileHandler(logfile, maxBytes=5 * 1024 * 1024, backupCount=1)\n logfile_handler.setFormatter(formatter)\n\n logger = logging.getLogger(\"awsbatch-cli\")\n logger.addHandler(logfile_handler)\n try:\n logger.setLevel(log_level.upper())\n except (TypeError, ValueError) as e:\n fail(\"Error setting log level. Failed with exception: %s\" % e)\n\n return logger", "def _setup_logging(self, logger=None, **kwargs):\n if logger or len(logging.root.handlers) != 0:\n self._custom_logger = True\n else:\n # log_level is the only bootstrap config item\n boot_config = load_config(bootstrap=True, **kwargs)\n logging.config.dictConfig(default_config(level=boot_config.log_level))\n\n self._custom_logger = False\n\n return logger or logging.getLogger(__name__)", "def setup_log(self, log_file):\n directory = os.path.dirname(log_file)\n if directory:\n os.makedirs(directory, exist_ok=True)\n\n logger = logging.getLogger(log_file)\n formatter = logging.Formatter(config.LOG_FORMAT)\n\n file_handler = logging.FileHandler(log_file, mode='a')\n file_handler.setFormatter(formatter)\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "def configure_logger(self, detached):\n\n log_level = self.log_conf['level'].upper()\n if not hasattr(logging, log_level):\n raise mcadminpanel.agent.errors.ConfigurationError(\n 'Improperly configured log level: {}'.format(log_level),\n )\n log_level = getattr(logging, log_level)\n\n handlers = []\n\n file_handler = logging.handlers.TimedRotatingFileHandler(\n self.log_conf['file'],\n when='midnight',\n )\n file_handler.setLevel(log_level)\n handlers.append(file_handler)\n\n if not detached:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(log_level)\n handlers.append(stream_handler)\n\n logging.basicConfig(\n level=log_level,\n datefmt=self.log_conf['date_format'],\n format=self.log_conf['format'],\n handlers=handlers,\n )", "def init_logging():\n global logger\n logging.basicConfig(\n format='%(levelname)s - %(message)s',\n )\n logger = logging.getLogger('runner')\n logger.setLevel(os.environ.get('LOGGING_LEVEL', 'INFO'))", "def _setup_logger_from_json_file(self, config_file):\n try:\n with open(config_file, 'rt') as file:\n config = json.load(file)\n dictConfig(config)\n except Exception as ex:\n raise Exception(f'Can not open the log config file because of {ex}')", "def logging_setup(args, log_dir):\n timestamp_file = datetime.now().strftime(\"%Y%m%d-%H.%M_rcf_abb.log\")\n log_file = Path(log_dir) / timestamp_file\n\n handlers = []\n\n if not args.skip_logfile:\n handlers.append(log.FileHandler(log_file, mode=\"a\"))\n if not args.quiet:\n handlers.append(log.StreamHandler(sys.stdout))\n\n log.basicConfig(\n level=log.DEBUG if args.debug else log.INFO,\n format=\"%(asctime)s:%(levelname)s:%(funcName)s:%(message)s\",\n handlers=handlers,\n )", "def pytest_logger_config(self, logger_config):", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def logging_config(logfile_path, logfile_level='debug', console_level='debug'):\n cfg = dict(LOGGING_CONFIG)\n cfg['handlers']['logfile']['filename'] = logfile_path\n cfg['handlers']['logfile']['level'] = logfile_level.upper()\n cfg['handlers']['console']['level'] = console_level.upper()\n return cfg", "def _configure_logging(self):\n workflowProperty = '-Dapp.workflow={}'.format(self.appName)\n\n # self.sparkProperties[SparkProperties.SPARK_DRIVER_EXTRAJAVAOPTIONS] = workflowProperty\n # self.sparkProperties[SparkProperties.SPARK_EXECUTOR_EXTRAJAVAOPTIONS] = workflowProperty\n\n self._append_or_create_property(SparkProperties.SPARK_DRIVER_EXTRAJAVAOPTIONS, workflowProperty, ' ')\n self._append_or_create_property(SparkProperties.SPARK_EXECUTOR_EXTRAJAVAOPTIONS, workflowProperty, ' ')\n # Add environment variables to the executors\n self.extra_property('spark.executorEnv.APP_WORKFLOW', self.appName)\n # Add environment variables to the driver process. TODO: with executor_env ?\n self.executor_env('APP_WORKFLOW', self.appName)", "def initialize_logging(config_path, log_dirname=None):\n try:\n config = load_yaml(config_path)\n except Exception as e:\n # if fail\n logging.basicConfig(level=logging.INFO)\n logging.info(f\"{e}. Falling back to default logger.\")\n else:\n # if successful\n if log_dirname is not None:\n for handler_name in config[\"handlers\"]:\n handler = config[\"handlers\"][handler_name]\n if \"filename\" in handler:\n # must be a file handler\n filename = Path(handler[\"filename\"]).name\n handler[\"filename\"] = log_dirname / filename\n\n # install coloredlogs for console handler only\n console_format = config[\"formatters\"][\n config[\"handlers\"][\"console\"][\"formatter\"]\n ][\"format\"]\n console_level = config[\"handlers\"][\"console\"][\"level\"]\n console_stream = config[\"handlers\"][\"console\"][\"stream\"]\n coloredlogs.install(fmt=console_format, level=console_level, sys=console_stream)\n\n logging.config.dictConfig(config)\n finally:\n logging.info(f\"Logging initialized.\")", "def logger_setup(app_config):\n # set up logger\n app_log_file = get_config_item(app_config, 'app_log_file.file')\n\n app_logger = logging.getLogger('AppLogger')\n app_logger.setLevel(logging.DEBUG)\n\n try:\n # Add the log message handler to the logger\n handler = logging.handlers.RotatingFileHandler(\n app_log_file, maxBytes=get_config_item(app_config, 'app_log_file.rotate_at_in_bytes'),\n backupCount=4)\n formatter = logging.Formatter(get_config_item(app_config, 'app_log_file.log_format'))\n handler.setFormatter(formatter)\n\n app_logger.addHandler(handler)\n except IOError:\n print(\"Can not open the log file: {}... exiting...\".format(app_log_file))\n return False\n # end try\n\n return app_logger", "def _set_logging():\n log_level = os.getenv(\"TRIKI_LOG_LEVEL\", \"INFO\")\n quiet = os.getenv(\"TRIKI_NO_LOG_FILE\")\n handlers = [logging.StreamHandler()]\n if not quiet:\n handlers.append(logging.FileHandler(\"triki_click_analysis.log\"))\n logging.basicConfig(\n level=log_level,\n format=\"%(asctime)-15s %(levelname)s: %(message)s\",\n handlers=handlers,\n )", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def config_logger(log_cfg_file, experiment_name=None, output_dir='logs'):\n timestr = time.strftime(\"%Y.%m.%d-%H%M%S\")\n exp_full_name = timestr if experiment_name is None else experiment_name + '___' + timestr\n logdir = os.path.join(output_dir, exp_full_name)\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n log_filename = os.path.join(logdir, exp_full_name + '.log')\n if os.path.isfile(log_cfg_file):\n logging.config.fileConfig(log_cfg_file, defaults={'logfilename': log_filename})\n msglogger = logging.getLogger()\n msglogger.logdir = logdir\n msglogger.log_filename = log_filename\n msglogger.info('Log file for this run: ' + os.path.realpath(log_filename))\n\n # Create a symbollic link to the last log file created (for easier access)\n try:\n os.unlink(\"latest_log_file\")\n except FileNotFoundError:\n pass\n try:\n os.unlink(\"latest_log_dir\")\n except FileNotFoundError:\n pass\n try:\n os.symlink(logdir, \"latest_log_dir\")\n os.symlink(log_filename, \"latest_log_file\")\n except OSError:\n msglogger.debug(\"Failed to create symlinks to latest logs\")\n return msglogger", "def config_logging():\n logging.basicConfig(level=logging.INFO)\n logging.getLogger('deepcomp').setLevel(logging.WARNING)\n logging.getLogger('deepcomp.main').setLevel(logging.INFO)\n logging.getLogger('deepcomp.util.simulation').setLevel(logging.INFO)\n # logging.getLogger('deepcomp.env.entities.user').setLevel(logging.DEBUG)\n # logging.getLogger('deepcomp.env.multi_ue.multi_agent').setLevel(logging.DEBUG)\n logging.getLogger('matplotlib').setLevel(logging.WARNING)\n logging.getLogger('tensorflow').setLevel(logging.ERROR)\n gym.logger.set_level(gym.logger.ERROR)\n # structlog.configure(logger_factory=LoggerFactory())\n structlog.configure(logger_factory=LoggerFactory(),\n processors=[\n structlog.stdlib.filter_by_level,\n FloatRounder(digits=LOG_ROUND_DIGITS, not_fields=['sinr', 'signal', 'interference']),\n structlog.dev.ConsoleRenderer()\n ])", "def _configure_logging() -> logging.Logger:\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(\n logging.Formatter(fmt=\"%(asctime)s %(process)d %(levelname)s %(funcName)s %(message)s\")\n )\n logger.addHandler(handler)\n\n # Check the environment variables for log levels:\n if \"SPARK_CONNECT_LOG_LEVEL\" in os.environ:\n logger.setLevel(os.environ[\"SPARK_CONNECT_LOG_LEVEL\"].upper())\n else:\n logger.disabled = True\n return logger", "def setup_logging():\n product_name = \"plasma\"\n logging.setup(cfg.CONF, product_name)\n LOG.info(\"Logging enabled!\")\n LOG.debug(\"command line: %s\", \" \".join(sys.argv))" ]
[ "0.82155377", "0.75840616", "0.73912275", "0.7389693", "0.72844446", "0.7030196", "0.70190215", "0.7017492", "0.7016571", "0.6946439", "0.69230664", "0.68668836", "0.6850633", "0.6848561", "0.6812513", "0.6809024", "0.6786257", "0.6777338", "0.6761215", "0.6757546", "0.67443126", "0.6727407", "0.67229307", "0.671431", "0.6709877", "0.670922", "0.6704961", "0.6687837", "0.667207", "0.66378266", "0.66279596", "0.6621621", "0.6607204", "0.66015196", "0.65993035", "0.65788245", "0.65749574", "0.657352", "0.65498936", "0.6543253", "0.65367776", "0.6529649", "0.6520213", "0.6518209", "0.6511994", "0.6500178", "0.6498341", "0.6478949", "0.64777637", "0.64625883", "0.6456341", "0.64557886", "0.6454999", "0.6454364", "0.64457387", "0.6426016", "0.6423724", "0.64226556", "0.6406769", "0.6403098", "0.6397779", "0.63797575", "0.6379107", "0.6371238", "0.6362189", "0.6362168", "0.6348998", "0.63430935", "0.6339065", "0.6338467", "0.6328093", "0.63226515", "0.632156", "0.6316693", "0.6315697", "0.6294127", "0.62934333", "0.62892723", "0.62609565", "0.62567645", "0.6254543", "0.62518007", "0.6244983", "0.624222", "0.6236944", "0.6235911", "0.6226405", "0.6226195", "0.6222155", "0.62173", "0.6205063", "0.6198791", "0.61965954", "0.61952466", "0.6189217", "0.6186719", "0.618553", "0.61794746", "0.6177902", "0.61751693" ]
0.81726515
1
The command line entry point
def main(argv=None): if argv is None: argv = sys.argv try: _configure_logger() cfg_path = _cfg_path(argv) if not cfg_path: return 1 the_cfg = json.load(open(cfg_path)).get('amdgpu') if not isinstance(the_cfg, dict): raise ValueError("missing config in {}".format(cfg_path)) _info("loaded config from {0}".format(cfg_path)) perform_overclock(the_cfg) return 0 except ValueError: _LOG.error( "error using the config: %s, exiting", cfg_path, exc_info=True) return 1 except Exception: # pylint: disable=broad-except _LOG.error('could not perform overclock', exc_info=True) return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(args=None):", "def main(args=None):", "def main(args):", "def main(args):", "def main(args=None):\n pass", "def main():\n return", "def main():\n pass", "def main(self) -> None:\n pass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main():\n\tcli = Cli()\n\tcli.run()", "def entry_point():", "def entry_point():", "def entry_point():", "def main() -> None:\n return", "def main(ctx, verbose):\n return", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main(args=None):\n app()\n return 0", "def main():\n CLI_APP.run()", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)", "def main():\n args = parse_args()\n process_args(args)", "def cli():\n pass", "def main(self):\r\n pass", "def cli(args): # noqa; pylint: disable=unused-argument", "def main():\n\tpass", "def main(args):\n # Results: print to console and also write to output file\n pass", "def main(self, **kwargs) -> None:\n ...", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run_main():\n main(sys.argv)", "def main(self):", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def cli():\r\n pass", "def main():\n Main()", "def main_cli():\n pass", "def entry_point():\n\n\n plac.call(main)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def entrypoint(cls):\n try:\n cls().run(sys.argv[1:])\n except KeyboardInterrupt:\n pass", "def entry_point():\n pass", "def entry_point() -> int:\n return run(argv=sys.argv[1:], stdout=sys.stdout, stderr=sys.stderr)", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)" ]
[ "0.82658285", "0.82658285", "0.8260238", "0.8260238", "0.8001488", "0.78304154", "0.77982444", "0.7783042", "0.7692148", "0.7692148", "0.7692148", "0.7692148", "0.76892644", "0.7674975", "0.7674975", "0.7674975", "0.76543075", "0.76094264", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.7606937", "0.760293", "0.7601792", "0.75876385", "0.75678605", "0.7487532", "0.7448024", "0.7436476", "0.7400882", "0.7369244", "0.7352942", "0.7300636", "0.7286985", "0.7286985", "0.7286985", "0.7286985", "0.7286985", "0.7286985", "0.7286985", "0.7286985", "0.72759384", "0.7263538", "0.72455865", "0.72455865", "0.72455865", "0.72393733", "0.72370994", "0.72286963", "0.71911174", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71864307", "0.71848464", "0.7182042", "0.7177592", "0.71727216", "0.71727216" ]
0.0
-1
Endpoint to display individual item.
def read_item(item_name, catagory_name): item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) return render_template('item.html', item=item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self, item_id):\n pass", "def showItem(category_item_id):\n return render_template('item.html', item=db.findItem(id=category_item_id))", "def get(self, item_id: int):\n\n try:\n\n controller = self.controller()\n schema = self.schema()\n raw_data = controller.read(id=item_id)\n data = {'item': schema.dump(raw_data)}\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def read_item(id):\n\n username = login_session.get('username', None)\n item = session.query(Item).filter_by(id=id).one()\n item_display = {'id': item.id, 'title': item.title, 'desc': item.desc}\n return render_template(\n 'read_item.html',\n item_display=item_display,\n username=username)", "def item_detail(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n current_user.id == Item.user_id\n ).first()\n if not item:\n flash(\"Couldn't find this item\", category='warning')\n return redirect(url_for('url.index'))\n return render_template('detail.html', item=item)", "def view_item(item_id):\n session['target'] = url_for('view_item', item_id=item_id)\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"view_item.html\", item=item)", "def item_detail(request, item_id):\n # Select product based on URL param\n item = SELECT('item', where=f'id = {item_id}', _print=False)\n\n context = {\n 'item': item,\n 'photos': [item['photo_primary']] + item['photos']\n }\n return render(request, 'item_detail.html', context)", "def item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to view the item because\n # the item is private and was created by a different user, send a\n # 403\n elif item.private and current_user != item.user:\n abort(403)\n\n return render_template('item.html', item=item)", "def show_item_details(item_id):\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return render_template('item_details.html', item=item, login_session=login_session)", "def item_detail(request, slug):\n\n item = get_object_or_404(Item, slug=slug)\n\n context = {\n 'item': item,\n }\n\n return render(request, 'items/item_detail.html', context)", "def get(self, request, *args, **kwargs):\n items = self.get_items()\n return self.print(request, items)", "def api_item_details(item_id):\n if request.method == 'GET':\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return jsonify(item.Item.to_json())\n # TODO - Add a POST method + HTTP Auth to allow a RESTful item modification", "def viewItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n item = session.query(Item).filter_by(id=item_id).one()\n return render_template('viewitem.html', sport_id=sport_id, item_id=item_id,\n item=item, sport=sport)", "def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)", "def get_item_detail(item_id):\n pass", "def _show(self, **kwargs):\n\n resource_name = self._get_resource_name(**kwargs)\n\n return self._make_request(\n uri='%s/%s' % (self._metadata['uri'], resource_name)\n )", "def showItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(Item).filter_by(category_id=category_id).all()\n\n return render_template('items.html', items=items, category=category)", "def show(self, *args, **kwargs) -> None:\n pass", "def show(self, *args, **kwargs) -> None:\n pass", "def show(self, *args, **kwargs) -> None:\n pass", "async def get(self):\n identifier = self.data[\"id\"]\n item = self.core.item_manager.items.get(identifier)\n if not item:\n return self.error(\n ERROR_ITEM_NOT_FOUND,\n f\"No item found with identifier {identifier}\", status_code=404)\n\n return self.json(data=list(item.actions.keys()))", "def item_detail(request, pk):\n\n data = request.data\n try:\n item = validations_utils.item_validation(pk) # Validates if user exists or not.\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n if request.method == 'GET':\n item_serializer = ItemSerializer(item)\n return Response(item_serializer.data, status=status.HTTP_200_OK)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"show\"), kwargs)", "def show_item(category, item):\n # Detect login status\n login_status = None\n if 'email' in login_session:\n login_status = True\n # Provide state token to enable Google Sign-In\n state = login_session['state']\n # Query database with SQLAlchemy to show selected category and item\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '), category_id=category.id)\n .one())\n # Render webpage\n return render_template('show_item.html',\n item=item,\n category=category,\n login_status=login_status,\n CLIENT_ID=CLIENT_ID,\n STATE=state)", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def GET(self, item_id):\n\n asset = sandbox.access(sandbox_name, asset_ids=[item_id])[0]\n return render.info(asset=asset)", "def render_item_page(self, client_id, state, user_id, user_name, item_id):\r\n item = self._db_manager.get_item(item_id)\r\n if item is None:\r\n flash(\"Invalid item.\")\r\n return\r\n category = self._db_manager.get_category(item[\"category_id\"])\r\n if category is None:\r\n # this should not happen unless there is a concurrent delete\r\n flash(\"Sorry, something went wrong.\")\r\n return\r\n return render_template(\r\n \"item_view.html\",\r\n client_id=client_id,\r\n state=state,\r\n is_logged_in=user_id is not None,\r\n is_creator=item[\"user_id\"] == user_id,\r\n user_name=user_name,\r\n category=category,\r\n item=item\r\n )", "def item_details(request, product_id):\n\n item = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': item,\n }\n\n return render(request, 'products/item_details.html', context)", "def index(self, page=1):\n users_obj = User.query.paginate(page)\n return render_template('item/list.html', items=users_obj)", "def get(self, request, *args, **kwargs):\n instance = get_object_or_404(self.queryset, **kwargs)\n\n return render(\n request,\n self.get_template_name(),\n {\n \"object\": instance,\n },\n )", "def get(self, request, *args, **kwargs):\n instance = get_object_or_404(self.queryset, **kwargs)\n\n return render(\n request,\n self.get_template_name(),\n {\n \"object\": instance,\n },\n )", "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "def get_list_fs_item(self):\n if self.request.path[1:].rfind('/') > 1:\n list_id = self.request.path[1:self.request.path.rfind('/')]\n item_id = self.request.path[self.request.path.rfind('/')+1:]\n #self.response.out.write(list_id + \";\" +item_id)\n listspec = self.get_fs_list_spec(list_id)\n if listspec != None:\n lst = List(**listspec)\n for entry in listspec['entries']:\n if '_headline' in entry and self.get_slug(entry['_headline']) == item_id:\n page = self.get_page_from_entry(entry)\n return ModelAndView(view='list-item.html',\n model={\n 'list': lst,\n 'page': page,\n 'syntax_list': get_syntax_list([page])})\n raise NotFoundException", "def detail(self, req):\n return self.index(req)", "def display(self, *args, **kwargs):\n return self.show(*args, **kwargs)", "def show(self, req, id):\n context = req.environ['meteos.context']\n\n try:\n model = self.engine_api.get_model(context, id)\n except exception.NotFound:\n raise exc.HTTPNotFound()\n\n return self._view_builder.detail(req, model)", "def food_item(request, food_id):\n\n food = get_object_or_404(Nutrition, pk=food_id)\n\n context = {\n 'food': food,\n }\n\n return render(request, 'nutrition/food.html', context)", "def item_detail_view_service(self, item_info, connection):\n item_dao = ItemDao()\n item_detail_view = item_dao.item_detail_view_dao(item_info, connection)\n item_detail_view['images'] = item_dao.item_detail_image_dao(item_info, connection)\n item_detail_view['amenities'] = item_dao.item_detail_view_amenitie_dao(item_info,connection)\n item_detail_view['review'] = item_dao.item_detail_view_review_dao(item_info, connection)\n item_detail_view['review_avg'] = item_dao.detail_count_avg_review_dao(item_info, connection)\n\n return {'data' : item_detail_view}", "def item(self, item_id):\n response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)\n return response", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))['item']", "def show_item_by_id(plugin, item_id):\n import alltheitems.item_page\n return alltheitems.item_page.item_page(plugin + ':' + item_id)", "def get_item_by_id(request, pk):\n item = get_object_or_404(StockItem, pk=pk)\n res_dict = {\n 'id': item.id,\n 'name': item.name,\n 'count': item.count,\n 'date_added': item.date_added,\n 'exp': item.date_of_expiration,\n 'added_by': item.added_by,\n 'cat': str(item.fk_category),\n 'subcat': str(item.fk_subcategory),\n 'notes': item.notes\n }\n return JsonResponse(res_dict)", "def show(*args):\n I = Items()\n for arg in args:\n I.add_item(arg)\n I.write()", "def get_item(self):\n return self.item", "def get_item(self):\n return self.item", "def listing_view(self, request):\n self._object = self.get_page_for_url(request)\n if self._object is not None:\n self.kwargs.update({'pk': self._object.pk})\n # pylint: disable=attribute-defined-outside-init\n self.action = 'detail_view'\n return self.detail_view(request, pk=self._object.pk)\n return super().listing_view(request)", "def show():\n session = current_app.config['db']\n items = session\\\n .query(WineABV)\\\n .order_by(asc(func.lower(WineABV.name)))\n if is_json_request(request):\n return jsonify(items=[x.serialize for x in items])\n else:\n return render_template(template_prefix+'view.html', items=items)", "def item_retrieve(id):\n item = getItem(id)\n if item is None:\n return jsonify({}), 204\n else:\n return jsonify(item=item.serialize)", "def get(self):\n items = self.request.get_all(\"food\")\n # pases the value in item variable into the jinja2 template\n self.render(\"shopping_list.html\", items=items)", "def __repr__(self):\n return f\"Item=(id={self.id},item_name={self.item_name},item_slug={self.item_slug})\"", "def resource_show(resource_id, extra_args=None, cibfile=None):\n return item_show(\n item=\"resource\", item_id=resource_id, extra_args=extra_args, cibfile=cibfile\n )", "def showItemJSON(category_id, item_id):\r\n session = DBSession()\r\n item = session.query(Item).filter_by(id=item_id).one()\r\n return jsonify(Item=item.serialize)", "def get_object(self):\n if not self._item:\n self._item = get_object_or_404(Item, pk=self.kwargs['item_id'])\n return self._item", "def index():\n db = get_db()\n items = db.execute(\"select item_id, location from items where location != ''\").fetchall()\n return render_template(\"tracker/index.html\", items=items)", "def retrieve(self, request, pk=None):\n return Response({\"retrieve_get\": 'GET'})", "async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)", "def display_todo_list_view(request: HttpRequest, pk: int) -> HttpResponse:\n todo_list = TodoListModel.objects.get(id=pk)\n\n return render(request, 'todo/display_todo_list.html', {'todo_list': todo_list})", "def list_items(self):\n click.echo(\"ID --|-- Item Title\")\n for index, item in enumerate(self.items):\n click.echo(\" {} --|-- {}\".format(index, item.title))", "def __str__(self):\n return \"Item('\"+ self.get_id() + \"')\"", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def show(self, req, id):\n context = req.environ['manila.context']\n\n try:\n message = self.message_api.get(context, id)\n except exception.MessageNotFound as error:\n raise exc.HTTPNotFound(explanation=error.msg)\n\n return self._view_builder.detail(req, message)", "def _get(self, table, _id):\n data = {\"Key\": _id}\n return self._response_handler(table, \"get_item\", data)", "def item_from_feed(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)", "def render(self, item):\r\n item = self.model._default_manager.get(pk=item)\r\n t = select_template(self.item_render_template)\r\n return t.render({\r\n \"MEDIA_URL\": settings.MEDIA_URL,\r\n \"STATIC_URL\": settings.STATIC_URL,\r\n \"item\": item,\r\n })", "def retrieve(self, request, pk=None):\n try:\n category = ItemCategory.objects.get(pk=pk)\n serializer = ItemCategorySerializer(category, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def __repr__(self):\n return \"Item('\"+ self.get_id() + \"')\"", "def get_item(self):\n raise NotImplementedError", "def show_item(request, itemID):\n\ttry:\n\t\titem = get_object_or_404(Item, itemID = itemID)\n\n\t# Handle when the given itemID is not UUID\n\texcept ValidationError:\n\t\traise Http404\n\n\tcontext_dict = {}\n\tsearch_form = Search_bar()\n\tcontext_dict['search_bar'] = search_form\n\tcontext_dict['item'] = item\n\tcontext_dict['seller_rating'] = range(int(round(item.seller.rating, 1)))\n\n\trelated = Item.objects.filter(category = item.category).exclude(itemID = item.itemID)\n\t\n\tif len(related) > 3:\n\t\tcontext_dict['trendingItems'] = related[0:3]\n\telse:\n\t\tcontext_dict['trendingItems'] = related\n\n\tresponse = render(request, 'tailored/product.html', context_dict)\n\t\n\tif first_visit(request, response, str(item.itemID)):\n\t\titem.dailyVisits += 1\n\t\titem.save()\n\t\t\n\tcontext_dict['itemID'] = item.itemID\n\n\tif item.seller.user != request.user:\n\t\treturn response\n\n\tsold_form = SoldItemForm()\n\n\tif request.method == 'POST':\n\t\tsold_form = SoldItemForm(request.POST, request.FILES)\n\n\t\tif sold_form.is_valid():\n\t\t\tuser_query = User.objects.filter(username = sold_form.cleaned_data['sold_to'])\n\t\t\tif not user_query:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\n\t\t\telif user_query[0] != request.user:\n\t\t\t\ttry:\n\t\t\t\t\titem.sold_to = UserProfile.objects.get(user = user_query[0])\n\t\t\t\t\titem.save()\n\t\t\t\texcept UserProfile.DoesNotExist:\n\t\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError('The given user does not exist.'))\n\t\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\telse:\n\t\t\t\tsold_form.add_error('sold_to', forms.ValidationError(\"You can't sell an item to yourself.\"))\n\t\t\t\tcontext_dict['form'] = sold_form\n\t\t\t\treturn render(request, 'tailored/product.html', context_dict)\n\t\t\titem.save()\n\t\t\treturn HttpResponseRedirect(reverse('tailored:index'))\n\n\tcontext_dict['form'] = sold_form\n\treturn render(request, 'tailored/product.html', context_dict)", "def show_item(self, show_item):\n\n self._show_item = show_item", "def get(self, request ):\n return render(request, \"main_display_cards.html\")", "def retrieve(self, request, pk=None):\n return Response({'http_method': 'GET'})", "def item_from_browse(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)", "def display(self):\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def create_item_page():\n catagories = [c.name for c in Catagory.fetch_all()]\n return render_template('add_item.html', catagories=catagories, values={})", "def detail(request, pk):\n mineral = get_object_or_404(Mineral, pk=pk)\n return render(request, 'detail.html', {'mineral': mineral})", "def detail(): \n\n # get contentid\n content_id = request.args.get('contentid')\n\n # get shortest places\n title, places = get_shortest(content_id)\n print(content_id)\n\n return render_template('detail.html', \n title=title,\n content_id=content_id,\n places=places, \n count=len(places))", "def items(request, pk):\n\n if Item.objects.filter(category_id=pk).exists(): # Checks if product_category exists with given id.\n all_items = Item.objects.filter(category_id=pk)\n else:\n return Response(messages.ITEMS_DOES_NOT_EXIST, status=status.HTTP_404_NOT_FOUND)\n if request.method == 'GET':\n item_serializer = ItemSerializer(all_items, many=True)\n return Response(item_serializer.data[::-1], status=status.HTTP_200_OK)", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "def purchase_item(item_id):\n\n item = get_item(item_id)\n\n return render_template('item_page.html', item=item)", "def retrieve(self, request, pk=None):\n\n return Response({'http_method':'GET'})", "def display(auth_context):\n\n cart = carts.get_cart(auth_context.get('uid'))\n for item in cart:\n product = product_catalog.get_product(item.item_id)\n item.info = product\n\n return render_template('cart.html',\n cart=cart,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)", "def news_detail(request, pk, slug=None):\n item = get_object_or_404(models.NewsItem.objects, pk=pk)\n\n return render(request, 'news/detail.html', {\n 'object': item,\n })", "def show(self, req, id):\n db_api.create_one_net()\n aa = db_api.get_one_net()\n return {\"show\":\"show\"}", "def product_view(request, product):\n product = Products.objects.get(product=product)\n\n context = {\n \"product\": product,\n }\n\n return render(request, \"products/product_detail.html\", context)", "def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:", "def index(self, action=None, item_id=None, item_path=None, reload=None):\n item = self.plugin.get_sh().return_item(item_path)\n\n tmpl = self.tplenv.get_template('index.html')\n # add values to be passed to the Jinja2 template eg: tmpl.render(p=self.plugin, interface=interface, ...)\n return tmpl.render(p=self.plugin,\n language=self.plugin._sh.get_defaultlanguage(), now=self.plugin.shtime.now())" ]
[ "0.7974844", "0.69679177", "0.69007516", "0.68516976", "0.68477976", "0.6822715", "0.6821025", "0.6692358", "0.6668874", "0.662971", "0.65989023", "0.65882397", "0.6572797", "0.6545979", "0.6537464", "0.652906", "0.6496628", "0.646886", "0.646886", "0.646886", "0.64111596", "0.6410683", "0.6380855", "0.6380855", "0.6380855", "0.6380855", "0.6380855", "0.6380855", "0.6380855", "0.6380855", "0.63606846", "0.6352916", "0.6333146", "0.6312308", "0.6292688", "0.62916034", "0.62087697", "0.62087697", "0.61509556", "0.61496055", "0.61426723", "0.61351025", "0.6128823", "0.6115406", "0.60743874", "0.60502976", "0.601328", "0.5994859", "0.59577477", "0.59520406", "0.5940685", "0.5940685", "0.5915664", "0.591331", "0.58979136", "0.58905303", "0.58711314", "0.5869418", "0.584627", "0.5836406", "0.5835801", "0.58218145", "0.5812046", "0.57570267", "0.57550085", "0.57538205", "0.57500345", "0.57423985", "0.5731268", "0.5726156", "0.5701094", "0.5699358", "0.5697887", "0.5694018", "0.56896037", "0.56789917", "0.5661179", "0.5661043", "0.56469005", "0.564101", "0.5613678", "0.5611185", "0.5595938", "0.5595503", "0.5594313", "0.55935115", "0.55935115", "0.55935115", "0.55935115", "0.55935115", "0.5592969", "0.5584352", "0.55802006", "0.55747324", "0.55662596", "0.55654895", "0.55465305", "0.55250126", "0.55250126", "0.55237764" ]
0.56846815
75
Endpoint to display create item page.
def create_item_page(): catagories = [c.name for c in Catagory.fetch_all()] return render_template('add_item.html', catagories=catagories, values={})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def create_item():\n name = request.form['name']\n catagory = request.form['catagory']\n description = request.form['description']\n errors = form_errors(request.form)\n if errors:\n catagories = [c.name for c in Catagory.fetch_all()]\n values = {\n 'name': name, 'catagory': catagory, 'description': description\n }\n return render_template(\n 'add_item.html',\n catagories=catagories,\n values=values,\n errors=errors\n )\n Item.create(name, catagory_name=catagory, description=description)\n return redirect(url_for(\n 'read_item', catagory_name=catagory, item_name=name\n ))", "def new_item():\n if request.method == 'POST':\n new_item = Item(\n category_id=int(request.form['category']),\n name=request.form['name'],\n description=request.form['description'],\n created_date=datetime.datetime.now(),\n user_id=login_session['user_id'])\n session.add(new_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=new_item.category_id,\n item_id=new_item.id))\n else:\n categories = session.query(\n Category).all()\n return render_template(\n 'views/add.html',\n categories=categories)", "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def insert_item_page(request):\n validate(instance=request.body, schema=item_schema)\n body = json.loads(request.body)\n item = Item.new_item(body['cart_id'], body['food_id'], body['count'])\n return JsonResponse(model_to_json(item))", "def create_item(self, user: User, **kwargs) -> None:", "def newItem():\n if request.method == 'POST':\n if not checkLogin():\n return requests(url_for('catelog'))\n\n if request.form['name'].strip() == '':\n flash('item create failed: name is empty!')\n return redirect(url_for('newItem'))\n\n category = session.query(\n Category).filter_by(\n name=request.form['category']).one()\n\n ifCategory = session.query(Category).filter_by(\n name=request.form['category']).one()\n ifItem = session.query(Item).filter_by(\n category_id=ifCategory.id,\n name=request.form['name']).all()\n if (len(ifItem) > 0):\n flash('item create failed: item(%s) \\\n is already exist in category(%s)' % (\n ifItem[0].name,\n ifCategory.name))\n return redirect(url_for('catelog'))\n\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n category=category,\n auth=getLoginUser(),\n time=getIntTime())\n session.add(newItem)\n session.commit()\n\n flash('new item created: %s' % newItem.name)\n\n return redirect(url_for(\n 'itemDetail',\n category_name=category.name,\n item_name=newItem.name))\n else:\n all_category = session.query(Category).all()\n return render_template(\n 'new-item.html',\n all_category=all_category,\n isLogin=checkLogin())", "def createItem(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n if request.method == 'POST':\r\n session = DBSession()\r\n item = Item(name=request.form['name'],\r\n description=request.form['description'],\r\n category_id=category_id,\r\n user_id=login_session['user_id'])\r\n session.add(item)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('newitem.html', category_id=category_id)", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def add_new_item():\n\n lst = item_list()\n return render_template('index.html', sell_flag=1, items=lst)", "def todos_create_page():\n todo = Todo()\n if todo.form_submit():\n todo.update(mongo.db)\n print('Created new TODO: {text}'.format(**todo.doc))\n return redirect('/')\n else:\n return render_template(\n template_name_or_list='todo.html',\n todo=todo,\n handle='Create')", "def issueCreate(request):\n args = { 'statusForm' : forms.itemStatusForm(), }\n return render_to_string('issueCreate.html', args,\n context_instance=RequestContext(request))", "def add_item():\n\n form = ItemForm()\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n new_item = Item(\n category_id=form.category_id.data.id,\n name=form.name.data.capitalize(),\n description=form.description.data,\n user_id=current_user.id)\n db.session.add(new_item)\n db.session.commit()\n flash(\"New item '{}' was successfully created\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title='Add Item',\n form=form,\n form_name='item',\n action=url_for('url.add_item'))", "def create():\r\n form = ArticleForm(request.form)\r\n\r\n # Check request method and validate form\r\n if request.method == 'POST' and form.validate():\r\n data = {}\r\n data['article_id'] = uuid.uuid4().hex\r\n data['title'] = form.title.data\r\n data['description'] = form.description.data\r\n\r\n data = dict((k, v) for k, v in data.items() if v)\r\n\r\n # Save data in DynamoDb table\r\n response = table.put_item(Item=data)\r\n\r\n if response:\r\n flash('Article is successfully added')\r\n return redirect(url_for('article.list'))\r\n\r\n return render_template('article/form.html', add_article=True,\r\n form=form, title='Add Article')", "def insert_item():\n if 'userinfo' not in session.keys():\n session['target'] = url_for('insert_item')\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n creator_email = session['userinfo']['email']\n sqlsession = SQLSESSION()\n user = sqlsession.query(User).filter_by(email=creator_email).first()\n item = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=int(request.form['category']),\n creator_id=user.id)\n sqlsession.add(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n return render_template(\"new_item.html\",\n categories=categories)", "def create(request):\n if request.method == \"POST\":\n form = InitialInvoice(data=request.POST)\n if form.is_valid():\n data = form.cleaned_data\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": ItemForm(),\n \"stage\": \"2\",\n \"initial_data\": data\n })\n\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": InitialInvoice(),\n \"stage\": \"1\"\n })", "def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass", "def goto_create(self):\n\n self.create.click()", "def new(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item', id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(atras)\n tmpl_context.widget = self.new_form\n return dict(value=kw, \n page=u\"Nuevo Atributo\", \n action=url_action, \n atras=url_action)", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def test_create_item(self):\n\n url = reverse('stock-item-create')\n\n response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(url, {'part': 999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from a valid item, valid location\n response = self.client.get(url, {'location': 1, 'copy': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from an invalid item, invalid location\n response = self.client.get(url, {'location': 999, 'copy': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)", "def _create_item(request):\r\n usage_key = UsageKey.from_string(request.json['parent_locator'])\r\n category = request.json['category']\r\n\r\n display_name = request.json.get('display_name')\r\n\r\n if not has_course_access(request.user, usage_key.course_key):\r\n raise PermissionDenied()\r\n\r\n parent = get_modulestore(category).get_item(usage_key)\r\n dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)\r\n\r\n # get the metadata, display_name, and definition from the request\r\n metadata = {}\r\n data = None\r\n template_id = request.json.get('boilerplate')\r\n if template_id:\r\n clz = parent.runtime.load_block_type(category)\r\n if clz is not None:\r\n template = clz.get_template(template_id)\r\n if template is not None:\r\n metadata = template.get('metadata', {})\r\n data = template.get('data')\r\n\r\n if display_name is not None:\r\n metadata['display_name'] = display_name\r\n\r\n get_modulestore(category).create_and_save_xmodule(\r\n dest_usage_key,\r\n definition_data=data,\r\n metadata=metadata,\r\n system=parent.runtime,\r\n )\r\n\r\n # TODO replace w/ nicer accessor\r\n if not 'detached' in parent.runtime.load_block_type(category)._class_tags:\r\n parent.children.append(dest_usage_key)\r\n get_modulestore(parent.location).update_item(parent, request.user.id)\r\n\r\n return JsonResponse({\"locator\": unicode(dest_usage_key), \"courseKey\": unicode(dest_usage_key.course_key)})", "def new():\n session = current_app.config['db']\n if request.method == \"POST\":\n new_name = request.form['itemname']\n try:\n item = WineABV(name=new_name)\n session.add(item)\n session.commit()\n except exc.IntegrityError:\n session.rollback()\n flash(\"Duplicate values!\", 'danger')\n item = WineABV(name=new_name)\n return render_template(template_prefix+'/new_form.html', item=item)\n\n flash(\"Successfully Added '%s'\" % (new_name,), 'success')\n return redirect(url_for('.show'))\n else:\n item = WineABV(name=\"\")\n return render_template(template_prefix+'new_form.html', item=item)", "def go_to_create_tag():\n\n posts = Post.query.all()\n return render_template('tags/new.html', posts=posts)", "def newItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'], category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItem', category_id=category.id))\n else:\n return render_template('newitem.html', category_id=category.id)", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def create():\n if request.method == 'POST':\n if request.form.get('title') and request.form.get('content'):\n entry = Entry.create(\n title = request.form.get('title'),\n content = request.form.get('content'),\n published = request.form.get('published') or False)\n flash('Entry created successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n else:\n flash('Title and Content are required!', 'danger')\n return render_template('create.html')", "def add_items_handler():\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n category_id = rq['category_id']\n item = addItem(name, picture, description, category_id, g.user.id)\n return jsonify(item=item.serialize)", "def new_item(request):\n try:\n \"\"\"this logic here is for filling from the view_date view's\n output. If a new item needs to be made from there, it\n will have these GET parameters\n \"\"\"\n date_to_publish = datetime.datetime.strptime(request.GET['date_to_publish'], \"%Y-%m-%d\").date()\n position = int(request.GET['position'])\n new_item = NewsItem(date_to_publish=date_to_publish, position=position)\n except:\n \"\"\"in case someone url hacks here, or if we want to just make\n a 'create new' button somewhere\n \"\"\"\n new_item = NewsItem()\n new_item.save()\n return HttpResponseRedirect('%s/newsletter/%d/edit/' % (SUBSITE, new_item.pk))", "def newMenuItemPage(restaurant_id):\n restaurant = db_methods.searchResByID(restaurant_id)\n res_id = restaurant_id\n user_id = login_session['user_id']\n if request.method == 'POST':\n item_name = request.form['item_name']\n item_price = request.form['item_price']\n item_desc = request.form['item_desc']\n item_course = request.form['item_course']\n if item_name and item_price and item_desc and item_course:\n db_methods.addNewMenuItem(user_id, item_name, item_price, \n item_desc, item_course, res_id)\n time.sleep(0.1)\n return redirect(\"/restaurants/%s/menu/\" % res_id)\n else:\n error = \"Please be sure to fill out all required fields.\"\n return render_template('newmenuitem.html', error = error)\n else:\n return render_template('newmenuitem.html', res_id = res_id)", "def create(self, *args, **kwargs):\n pass", "def _create_item(self, parent_location, category, display_name, **kwargs):\n return ItemFactory.create(\n parent_location=parent_location,\n category=category,\n display_name=display_name,\n publish_item=False,\n user_id=self.user.id,\n **kwargs\n )", "def newListItem(category_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n category = session.query(Category).filter_by(id=category_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n if request.method == 'POST':\n if \"btn_new\" in request.form:\n newItem = ListItem(name=request.form['name'],\n description=request.form['description'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Catalog Item: %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('newitem.html',\n category_id=category_id,\n user=getUserInfo(login_session['user_id']))", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def restaurantMenuItemNew(restaurant_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n newItem = MenuItem(name=request.form['name'], description=request.form[\n 'description'], price=request.form['price'], course=request.form['course'], restaurant_id=restaurant_id)\n session.add(newItem)\n session.commit()\n\n flash('Menu Item Created', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemNew.html', restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)", "def add_view(self, request):\r\n instance_form = self.get_minimal_add_form()\r\n form = instance_form(request.POST, request.FILES, prefix=self.base_url())\r\n\r\n new_instance = None\r\n if form.is_valid():\r\n new_instance = form.save()\r\n template = select_template(self.item_add_template)\r\n context = RequestContext(request)\r\n context.update({\r\n \"insert\": self,\r\n \"form\": form,\r\n \"object\": new_instance\r\n })\r\n response = HttpResponse(template.render(context))\r\n response.status_code = 201\r\n return response\r\n response = HttpResponse(form.errors)\r\n response.status_code = 400\r\n return response", "def new(): \n pages_object = Pages()\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Creation new page\n if request.method == 'POST':\n if pages_object.new():\n return redirect(url_for('pages.overview'))\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/new.html'.format(MODULE_DIR), **locals())", "def get(self):\n\n self.render(\"newpost.html\", user=self.user)", "def add_new_item(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['calendar_plan'] = cpdoc.id\n\n item_ser = self.get_serializer(data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)", "def post(self, item):\n\n db.session.add(item)\n\n return item", "def create_page(self):", "def create(owner):\n data = request_content(request)\n resource = logic.resource.create(owner, data)\n return redirect(url_for('.get', owner=owner, \n resource=resource.name))", "def create():\n pass", "def create():\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n body = request.form['body']\r\n error = None\r\n db = get_db()\r\n cur = db.cursor()\r\n\r\n cur.execute('SELECT title FROM novel.post WHERE title = %s', title)\r\n newTitle = cur.fetchone()\r\n\r\n if not title:\r\n error = 'Title is required.'\r\n\r\n if newTitle and newTitle['title'] == title:\r\n error = 'Title is repeated.'\r\n\r\n if error is not None:\r\n flash(error)\r\n else:\r\n db = get_db()\r\n db.cursor().execute(\r\n 'INSERT INTO novel.post (title, body, author_id) VALUES (\"{0}\", \"{1}\", \"{2}\")'\r\n .format(title, body, g.user['id'])\r\n )\r\n db.commit()\r\n return redirect(url_for('novel.index'))\r\n\r\n return render_template('novel/create.html')", "def test_create_view(self):\n supplement = SupplementFactory(user=self.user_1)\n time = get_utc_now()\n\n post_data = {\n \"supplement_uuid\": str(supplement.uuid),\n \"time\": time.isoformat(),\n \"quantity\": 5,\n }\n\n response = self.client_1.post(self.url, data=post_data)\n self.assertEqual(response.status_code, 200, response.data)\n\n data = response.data\n supplement_name = data[\"supplement\"][\"name\"]\n self.assertEqual(supplement.name, supplement_name)\n self.assertIsNotNone(data[\"display_name\"])", "def create(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"create\"), kwargs)", "def create(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"create\"), kwargs)", "def create():\n form = request.form\n try:\n # create a new BancBox client from the input form\n resp = api.create_client(form)\n except Exception, e:\n logger.error('Error creating new client: %s', e)\n return render_template('created.html', error=e.message)\n\n if resp.status == 1:\n # If the create request was successful, let's render a success\n # message with some data about the new client and a link to the\n # detail page\n new_client = {\n 'firstName': form['firstName'],\n 'lastName': form['lastName'],\n 'clientId': resp.clientId\n }\n return render_template('created.html', new_client=new_client)\n else:\n # If an error was returned by BancBox, let's render it\n if hasattr(resp, 'errors') and hasattr(resp.errors, 'message'):\n message = resp.errors.message\n else:\n message = \"Error creating new client.\"\n return render_template('created.html', error=message)", "def createItem(name, description, category_id, image, user_id):\n i = Item(name=name, description=description, category_id=category_id,\n image=image, user_id=user_id, pub_date=datetime.utcnow())\n db_session.add(i)\n db_session.commit()\n return i", "def create_view(request, title, modelform, **kwargs):\n instance_form = modelform(request.POST or None)\n if instance_form.is_valid():\n instance = instance_form.save(commit=False)\n for default in kwargs.keys():\n setattr(instance, default, kwargs[default])\n instance.save()\n messages.success(request, _(\"%s was created.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Create\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def new():\n \n\n default_room_areas = [ \n x['room_area_name']\n for x in libroom.get_default_room_areas()\n ]\n\n if request.method == 'GET':\n # If this is a get request, these will be query parameters\n user_id = request.args['user_id']\n rci_id = request.args['rci_id']\n\n return render_template('damage/new.html', \n room_areas=default_room_areas,\n user_id=user_id,\n rci_id=rci_id)\n\n rci_id = request.form['rci_id']\n item = request.form['item']\n text = request.form['text']\n logged_in_user = g.user\n\n libdamage.create_damage(logged_in_user=logged_in_user,\n rci_id=rci_id,\n item=item,\n text=text,\n image_url=None)\n\n return redirect(url_for('rci.edit', rci_id=rci_id))", "def add_item():\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Get form fields\n name = request.form['name']\n url = request.form['url']\n photo_url = request.form['photo_url']\n description = request.form['description']\n category = request.form['item_category']\n # Retrieve the database ID of the selected category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Retrieve user's database ID for the item's database entry\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print('Database ID of category is {}.'.format(category_id.id))\n # Flash messages for incomplete item info\n if not request.form['name']:\n flash('Please add item name')\n return redirect(url_for('add_item'))\n if not request.form['url']:\n flash('Please add item URL')\n return redirect(url_for('add_item'))\n if not request.form['photo_url']:\n flash('Please add item photo URL')\n return redirect(url_for('add_item'))\n if not request.form['description']:\n flash('Please add a description')\n return redirect(url_for('add_item'))\n # Query database for item name\n item_name_in_db = (session.query(Items.name)\n .filter_by(name=name)\n .all())\n # If the item name is already in the database, don't add\n if item_name_in_db:\n print('Item name \"{}\" already in database.'.format(name))\n flash('Item name \"{}\" already in database.'.format(name))\n return redirect(url_for('add_item'))\n # Create object with form field info to add to database\n new_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n session.add(new_item)\n session.commit()\n print('Item \"{}\" created.'.format(new_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('add_item.html',\n categories=categories,\n login_status=login_status)", "def add_item(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n new_item = form.save()\n messages.success(request, 'Your product was added to the '\n 'store successfully.')\n return redirect(reverse('item_info', args=[new_item.id]))\n else:\n messages.error(request, 'There was an issue adding the '\n 'product. Please ensure the form is valid.')\n else:\n form = ProductForm()\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def test_add_item_using_post(self):\n pass", "def create(self):\n ...", "def render_create_user_page():\n\n return render_template(\"create_user.html\")", "def get(self):\n return render_template('add.html')", "def post(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item',id_tipo_item=id_tipo_item)\n \n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(url_action)\n \n if kw.has_key(\"sprox_id\"):\n del kw[\"sprox_id\"]\n\n tipo = TipoItem.por_id(id_tipo_item)\n try:\n tipo.agregar_atributo(**kw)\n except NombreDeAtributoError, err:\n flash(unicode(err), \"warning\")\n\n redirect(url_action)", "def post(self):\n data = request.json\n create_ue(data)\n return None, 201", "def menu_item_new(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n img_id = 0\n if request.method == 'POST':\n if 'file' in request.files:\n print(\"File found\")\n img_id = helper.create_new_image_if_not_exists(file=request.files['file'],\n title=request.form['img_name'])\n new_item = MenuItem(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n course=request.form['course'],\n likes=0,\n dislikes=0,\n restaurant_id=restaurant_id,\n user_id=login_session['user_id'],\n image_id=img_id)\n session.add(new_item)\n session.commit()\n flash(\"New Menu Item {} created!\".format(new_item.name))\n return redirect(url_for('restaurant_menu', restaurant_id=restaurant_id))\n else:\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newmenuitem.html', restaurant=restaurant, user_info=user_info)", "def newMenuItem(restaurant_id):\n\n if 'access_token' not in flask_session:\n return logInRedirect()\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).first()\n user_id = getUserId(flask_session['email'],flask_session['google_plus_id'])\n if not restaurant.user_id == user_id:\n flash(\"Only restaurant owners can add new items.\")\n return redirect(url_for(\"publicMenu\",restaurant_id = restaurant_id))\n\n if request.method == \"POST\":\n new_name = request.form['new_name']\n print \"\\nnewMenuItem POST triggered, name is: \", new_name\n newMenuItem = MenuItem( name=new_name,\n restaurant_id=restaurant.id )\n session.add(newMenuItem)\n session.commit()\n flash( \"new item '\" + new_name + \"' created!\")\n print \"POST worked!\"\n return redirect(url_for(\"showMenu\", restaurant_id=restaurant.id))\n\n else:\n return render_template('newMenuItem.html', restaurant = restaurant)", "def createchore():\n return render_template(\"newchore.html\")", "def createItem(self, parentFolderId, name, description) :\n path = 'item'\n params = { 'folderId': parentFolderId,\n 'name': name,\n 'description': description }\n obj = self.sendRestRequest('POST', path, params)\n if '_id' in obj :\n return obj['_id']\n else :\n raise Exception('Error, expected the returned item object to have an \"_id\" field')", "def add_new_user():\n return render_template('new.html')", "def goto_make_new_user():\n\n return render_template('users/new.html')", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def createItem(name, category, price, user_id):\n try:\n description = wikipedia.summary(name)\n except wikipedia.exceptions.DisambiguationError as e:\n description = wikipedia.summary(name + \" \" + category.name)\n\n i = Item(name=name, description=description,\n category_id=category.id, price=price, user_id=user_id)\n session.add(i)\n session.commit()\n print 'Item \"' + name + '\" added.'\n return i", "def handle_add(self, controller):\n \n controller.customer.CreatePizza()\n controller.show_frame(PageOne)", "def post_create_item_with_http_info(self, name, **kwargs):\n\n all_params = ['name', '_from', 'mode', 'body', 'jenkins_crumb', 'content_type']\n all_params.append('async')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method post_create_item\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `post_create_item`\")\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'name' in params:\n query_params.append(('name', params['name']))\n if '_from' in params:\n query_params.append(('from', params['_from']))\n if 'mode' in params:\n query_params.append(('mode', params['mode']))\n\n header_params = {}\n if 'jenkins_crumb' in params:\n header_params['Jenkins-Crumb'] = params['jenkins_crumb']\n if 'content_type' in params:\n header_params['Content-Type'] = params['content_type']\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['text/html'])\n\n # Authentication setting\n auth_settings = ['jenkins_auth']\n\n return self.api_client.call_api('/createItem', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n async=params.get('async'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def show_new(thing):\n add_template_variable('thing', thing)\n return my_render_template('generic/create.html')", "def create_item(obj: endpoint_model):\n # should this error if exists?\n new_obj = db.save(obj)\n return new_obj", "def post(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "def create(cls, payload: dict) -> 'Item':\n payload['slug'] = create_order_slug()\n return super().create(payload)", "def post(self, request, *args, **kwargs):\n return super().create(*args, **kwargs)", "def create_user():\n\n return render_template(\"users/create_user.html\")", "def test_create_view_adds_to_db(testapp):\n post_params = {\n 'title': 'Some Title.',\n 'body': 'Some Body.'\n }\n response = testapp.post('/journal/new-entry', post_params, status=302)\n full_response = response.follow()\n assert full_response.html.find(class_='entryListItem').a.text == post_params[\"title\"]", "def create_account():\n\n return render_template('account.html')", "def newItem(category_id):\n editedCategory = session.query(Category). \\\n filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Category.\\\n Please create your own Category in order to edit.')\n return redirect(url_for('showCategory', category_id=category_id))\n if request.method == 'POST':\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item created')\n return redirect(url_for('showCategory',\n category_id=category_id))\n else:\n return render_template('newItem.html', category_id=category_id)", "def create_item_command(cog_href: str, destination: str) -> None:\n item = stac.create_item(cog_href)\n\n item.save_object(dest_href=destination)", "def addCatalogItem(sport_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n if request.method == 'POST':\n newCatalogItem = Item(\n name=request.form['itemName'],\n description=request.form['itemDescription'],\n sport_id=sport_id,\n user_id=login_session['user_id'])\n session.add(newCatalogItem)\n session.commit()\n return redirect(url_for('showCatalog', sport_id=sport_id))\n else:\n return render_template('newcatalogitem.html', sport_id=sport_id)", "def create_work_item(self):", "def get(self, user):\n return self.render(\"post-new.html\", user=user)", "def create():", "def create():", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "async def post(self) -> JSONResponse:\n\n identifier = self.data[\"id\"]\n action_name = self.data[\"action_name\"]\n item = self.core.item_manager.items.get(identifier)\n if not item:\n return self.error(\n ERROR_ITEM_NOT_FOUND,\n f\"No item found with identifier {identifier}\", status_code=404)\n\n try:\n content = (await self.request.content.read()).decode()\n kwargs = json.loads(content) if content else {}\n except json.JSONDecodeError as e:\n return self.error(e)\n\n if action_name not in item.actions:\n return self.error(\n ITEM_ACTION_NOT_FOUND,\n f\"Item {identifier} of type {item.type} \"\n f\"does not have an action {action_name}\")\n\n try:\n return self.json({\n \"item\": item.identifier,\n \"action\": action_name,\n \"result\": await item.run_action(action_name, kwargs)\n })\n # pylint: disable=broad-except\n except Exception as e:\n LOGGER.error(\"An error occured executing an action\", exc_info=True)\n return self.error(e)", "def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', '[email protected]')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))", "def test_handler_create(self, superuser, client):\n request = create_request(\"POST\", \"/create\",\n data=dict(title=\"Test\",\n slug=\"test\",\n language=\"en\",\n type=Type1.get_name()))\n request.user = superuser\n\n handler = MainHandler()\n res = handler.dispatch(request, nodepath=\"\", handlerpath=\"create\")\n assert res.status_code == 302\n\n node = Node.get(\"/test\")\n assert node.content().title == \"Test\"\n assert node.content().owner == superuser", "def show_new_user_page():\n\n return render_template(\"new_user.html\")", "def add():\n if request.method == \"POST\":\n result = add_post(\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n return render_template(\"add.html\")", "def create(self):\n\n pass", "def show(self, item_id):\n pass", "def test_create_item_good(test_client, item):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 201\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']\n assert data['item']['id'] > 0", "def test_get_create_page(self):\n\n url = reverse('create-notification')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def test_get_create_page(self):\n\n url = reverse('create-notification')\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)" ]
[ "0.74668145", "0.71456575", "0.6886429", "0.6816857", "0.68146276", "0.6764137", "0.65232", "0.6477503", "0.6445127", "0.6436602", "0.6388984", "0.6381548", "0.634037", "0.6285156", "0.6273723", "0.624713", "0.62171143", "0.6188557", "0.6186302", "0.61858845", "0.61284363", "0.61182874", "0.61075354", "0.6101788", "0.6090157", "0.6083825", "0.6071801", "0.6023216", "0.59993005", "0.5965686", "0.5962984", "0.59554565", "0.5923534", "0.5912299", "0.59082913", "0.59017867", "0.5896042", "0.5894627", "0.5879213", "0.58387053", "0.5833262", "0.5809291", "0.57911706", "0.5786822", "0.5777832", "0.5764679", "0.5744932", "0.5732828", "0.57279354", "0.57279354", "0.5714395", "0.57015705", "0.56901395", "0.5669704", "0.56594324", "0.56418574", "0.56412923", "0.56399643", "0.562397", "0.5623321", "0.56224775", "0.5611692", "0.56088436", "0.56033236", "0.55914956", "0.5589297", "0.55793446", "0.5578879", "0.55779815", "0.55678177", "0.5565862", "0.5564605", "0.55632484", "0.55621725", "0.5555469", "0.5554958", "0.5547795", "0.5545792", "0.5531986", "0.5531693", "0.55306154", "0.5524167", "0.55222887", "0.55099565", "0.5509136", "0.55069035", "0.55069035", "0.5502411", "0.5502411", "0.5502411", "0.5496167", "0.5494714", "0.54883283", "0.54832757", "0.5480173", "0.54779494", "0.54741156", "0.54737103", "0.5472652", "0.5472652" ]
0.7436075
1
Post endpoint to create an item. If form is invalid will return create item page with errors displayed, otherwise create item and redirect to item page.
def create_item(): name = request.form['name'] catagory = request.form['catagory'] description = request.form['description'] errors = form_errors(request.form) if errors: catagories = [c.name for c in Catagory.fetch_all()] values = { 'name': name, 'catagory': catagory, 'description': description } return render_template( 'add_item.html', catagories=catagories, values=values, errors=errors ) Item.create(name, catagory_name=catagory, description=description) return redirect(url_for( 'read_item', catagory_name=catagory, item_name=name ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def new_item():\n if request.method == 'POST':\n new_item = Item(\n category_id=int(request.form['category']),\n name=request.form['name'],\n description=request.form['description'],\n created_date=datetime.datetime.now(),\n user_id=login_session['user_id'])\n session.add(new_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=new_item.category_id,\n item_id=new_item.id))\n else:\n categories = session.query(\n Category).all()\n return render_template(\n 'views/add.html',\n categories=categories)", "def add_item():\n\n form = ItemForm()\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n new_item = Item(\n category_id=form.category_id.data.id,\n name=form.name.data.capitalize(),\n description=form.description.data,\n user_id=current_user.id)\n db.session.add(new_item)\n db.session.commit()\n flash(\"New item '{}' was successfully created\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title='Add Item',\n form=form,\n form_name='item',\n action=url_for('url.add_item'))", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def createItem(category_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n if request.method == 'POST':\r\n session = DBSession()\r\n item = Item(name=request.form['name'],\r\n description=request.form['description'],\r\n category_id=category_id,\r\n user_id=login_session['user_id'])\r\n session.add(item)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('newitem.html', category_id=category_id)", "def newItem():\n if request.method == 'POST':\n if not checkLogin():\n return requests(url_for('catelog'))\n\n if request.form['name'].strip() == '':\n flash('item create failed: name is empty!')\n return redirect(url_for('newItem'))\n\n category = session.query(\n Category).filter_by(\n name=request.form['category']).one()\n\n ifCategory = session.query(Category).filter_by(\n name=request.form['category']).one()\n ifItem = session.query(Item).filter_by(\n category_id=ifCategory.id,\n name=request.form['name']).all()\n if (len(ifItem) > 0):\n flash('item create failed: item(%s) \\\n is already exist in category(%s)' % (\n ifItem[0].name,\n ifCategory.name))\n return redirect(url_for('catelog'))\n\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n category=category,\n auth=getLoginUser(),\n time=getIntTime())\n session.add(newItem)\n session.commit()\n\n flash('new item created: %s' % newItem.name)\n\n return redirect(url_for(\n 'itemDetail',\n category_name=category.name,\n item_name=newItem.name))\n else:\n all_category = session.query(Category).all()\n return render_template(\n 'new-item.html',\n all_category=all_category,\n isLogin=checkLogin())", "def insert_item():\n if 'userinfo' not in session.keys():\n session['target'] = url_for('insert_item')\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n creator_email = session['userinfo']['email']\n sqlsession = SQLSESSION()\n user = sqlsession.query(User).filter_by(email=creator_email).first()\n item = Item(name=request.form['name'],\n description=request.form['description'],\n category_id=int(request.form['category']),\n creator_id=user.id)\n sqlsession.add(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n categories = sqlsession.query(Category).all()\n return render_template(\"new_item.html\",\n categories=categories)", "def add_item():\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Get form fields\n name = request.form['name']\n url = request.form['url']\n photo_url = request.form['photo_url']\n description = request.form['description']\n category = request.form['item_category']\n # Retrieve the database ID of the selected category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Retrieve user's database ID for the item's database entry\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print('Database ID of category is {}.'.format(category_id.id))\n # Flash messages for incomplete item info\n if not request.form['name']:\n flash('Please add item name')\n return redirect(url_for('add_item'))\n if not request.form['url']:\n flash('Please add item URL')\n return redirect(url_for('add_item'))\n if not request.form['photo_url']:\n flash('Please add item photo URL')\n return redirect(url_for('add_item'))\n if not request.form['description']:\n flash('Please add a description')\n return redirect(url_for('add_item'))\n # Query database for item name\n item_name_in_db = (session.query(Items.name)\n .filter_by(name=name)\n .all())\n # If the item name is already in the database, don't add\n if item_name_in_db:\n print('Item name \"{}\" already in database.'.format(name))\n flash('Item name \"{}\" already in database.'.format(name))\n return redirect(url_for('add_item'))\n # Create object with form field info to add to database\n new_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n session.add(new_item)\n session.commit()\n print('Item \"{}\" created.'.format(new_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('add_item.html',\n categories=categories,\n login_status=login_status)", "def createNewItem(request):\n newItem = ItemSerializer(data=request.data)\n if newItem.is_valid():\n newItem.save()\n return Response(newItem.data, status=status.HTTP_201_CREATED)\n\n fail = {\n \"item\" : \"item is not valid\"\n }\n return JsonResponse(fail)", "def insert_item_page(request):\n validate(instance=request.body, schema=item_schema)\n body = json.loads(request.body)\n item = Item.new_item(body['cart_id'], body['food_id'], body['count'])\n return JsonResponse(model_to_json(item))", "def add_item(request):\n \n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, 'New item added successfully!')\n return redirect(reverse('add_item'))\n else:\n messages.error(request, 'Failed to add item. Please check the form.')\n else:\n form = ProductForm()\n \n template = 'products/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def create_item():\n #if not request.json:\n # abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=False, help=\"Item code missing\")\n parser.add_argument('item_name', type=str, required=True, help=\"Item name missing\")\n parser.add_argument('size', type=str, required=True, help=\"Size missing\")\n parser.add_argument('color', type=str, required=True, help=\"Color missing\")\n parser.add_argument('quality', type=str, required=True, help=\"Quality missing\")\n parser.add_argument('username', type=str, required=True, help=\"Username missing\")\n args = parser.parse_args(strict=True)\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_item = dict(\n item_code = args['item_code'],\n item_name = args['item_name'],\n size_code = get_size_code( args['size']),\n color_code = get_color_code( args['color']),\n quality_code = get_quality_code( args['quality'])\n )\n try:\n u = models.Items(**new_item)\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'item code already exists.'}), 400)\n\n return make_response(jsonify({'success': True}))", "def add_item(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n new_item = form.save()\n messages.success(request, 'Your product was added to the '\n 'store successfully.')\n return redirect(reverse('item_info', args=[new_item.id]))\n else:\n messages.error(request, 'There was an issue adding the '\n 'product. Please ensure the form is valid.')\n else:\n form = ProductForm()\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def add_item():\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = Item()\n # First we populate the new item.\n item.category_id = request.form['categoryId']\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n # Now let's pull its category.\n category = session.query(Category).filter_by(id=item.category_id).one()\n # And make sure they're properly linked.\n item.category = category\n session.add(item)\n session.flush()\n id = item.id\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state'], 'id': id}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def new():\n session = current_app.config['db']\n if request.method == \"POST\":\n new_name = request.form['itemname']\n try:\n item = WineABV(name=new_name)\n session.add(item)\n session.commit()\n except exc.IntegrityError:\n session.rollback()\n flash(\"Duplicate values!\", 'danger')\n item = WineABV(name=new_name)\n return render_template(template_prefix+'/new_form.html', item=item)\n\n flash(\"Successfully Added '%s'\" % (new_name,), 'success')\n return redirect(url_for('.show'))\n else:\n item = WineABV(name=\"\")\n return render_template(template_prefix+'new_form.html', item=item)", "def newItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'], category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItem', category_id=category.id))\n else:\n return render_template('newitem.html', category_id=category.id)", "def post():\n\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n return redirect(url_for('index'))\n return render_template('new.html', form=form)", "def post(self):\n try:\n new_form = FORM_SCHEMA.load(request.json).data\n except ValidationError as err:\n APP.logger.error(err.args)\n return err.messages, status.HTTP_400_BAD_REQUEST\n\n add_new_form = Form(**new_form)\n DB.session.add(add_new_form)\n\n try:\n DB.session.commit()\n except IntegrityError as err:\n APP.logger.error(err.args)\n DB.session.rollback()\n return {'error': 'Already exists.'}, status.HTTP_400_BAD_REQUEST\n return Response(status=status.HTTP_201_CREATED)", "def post(self, item):\n\n db.session.add(item)\n\n return item", "def addItem(category_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to add item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempted operation on non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n if request.method == 'POST':\r\n # create operation\r\n name = request.form['name']\r\n description = request.form['description']\r\n if not name:\r\n flash('Add ItemError: Name can\\'t be empty')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n newItem = Item(name=name, description=description,\r\n category_id=category_id, user_id=category.user_id)\r\n session.add(newItem)\r\n session.commit()\r\n flash('Added Item \\'{}\\' Successfully!'.format(newItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with the form\r\n return render_template(\"addItem.html\", category=category)", "def create():\r\n form = ArticleForm(request.form)\r\n\r\n # Check request method and validate form\r\n if request.method == 'POST' and form.validate():\r\n data = {}\r\n data['article_id'] = uuid.uuid4().hex\r\n data['title'] = form.title.data\r\n data['description'] = form.description.data\r\n\r\n data = dict((k, v) for k, v in data.items() if v)\r\n\r\n # Save data in DynamoDb table\r\n response = table.put_item(Item=data)\r\n\r\n if response:\r\n flash('Article is successfully added')\r\n return redirect(url_for('article.list'))\r\n\r\n return render_template('article/form.html', add_article=True,\r\n form=form, title='Add Article')", "def create_item(self, user: User, **kwargs) -> None:", "def add():\n if request.method == \"POST\":\n result = add_post(\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n return render_template(\"add.html\")", "def add_item(request, shoppinglist_id, category_id=False, product_id=False):\n if request.method == 'POST':\n form = ItemForm(request.POST)\n if form.is_valid():\n shoppinglist = get_object_or_404(\n Shoppinglist,\n pk=shoppinglist_id,\n pantry__owner=request.user\n )\n product = get_object_or_404(Product, pk=product_id)\n try:\n item = Item.objects.get(shoppinglist=shoppinglist,\n product=product)\n item.amount += form.cleaned_data['amount']\n except ObjectDoesNotExist:\n item = Item(shoppinglist=shoppinglist,\n product=product,\n amount=form.cleaned_data['amount'],\n bought=False)\n item.save()\n return redirect('shoppinglists.views.detail', shoppinglist_id)\n\n response_dict = {'shoppinglist_id': shoppinglist_id,\n 'categories': Category.objects.all(),\n 'logged': False}\n if category_id:\n response_dict.update(\n {'category_id': category_id,\n 'category': Category.objects.get(pk=category_id),\n 'products': Product.objects.filter(categories__pk=category_id)}\n )\n if product_id:\n response_dict.update(\n {'form': ItemForm(),\n 'product': Product.objects.get(pk=product_id),\n 'product_id': product_id}\n )\n return render_to_response('shoppinglists/item_form.html',\n response_dict,\n context_instance=RequestContext(request))", "def save_item(item, item_id):\n # User is modifying an EXISTING item in the database\n if item_id > 0:\n item.Item.name = request.form['title']\n item.Item.description = request.form['description']\n item.Item.category_id = request.form['category']\n session.add(item.Item)\n session.commit()\n flash(\"Updated \" + item.Item.name)\n return render_template('item_details.html', item=item, login_session=login_session)\n\n # User is creating a NEW item\n else:\n new_item = Item(name=request.form.get('title'), description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['userid'])\n session.add(new_item)\n session.commit()\n flash(\"Created \" + new_item.name)\n created_item = session.query(Item, User).filter(Item.id == new_item.id).join(User).first()\n return render_template('item_details.html', item=created_item, login_session=login_session)", "def add_view(self, request):\r\n instance_form = self.get_minimal_add_form()\r\n form = instance_form(request.POST, request.FILES, prefix=self.base_url())\r\n\r\n new_instance = None\r\n if form.is_valid():\r\n new_instance = form.save()\r\n template = select_template(self.item_add_template)\r\n context = RequestContext(request)\r\n context.update({\r\n \"insert\": self,\r\n \"form\": form,\r\n \"object\": new_instance\r\n })\r\n response = HttpResponse(template.render(context))\r\n response.status_code = 201\r\n return response\r\n response = HttpResponse(form.errors)\r\n response.status_code = 400\r\n return response", "def newMenuItemPage(restaurant_id):\n restaurant = db_methods.searchResByID(restaurant_id)\n res_id = restaurant_id\n user_id = login_session['user_id']\n if request.method == 'POST':\n item_name = request.form['item_name']\n item_price = request.form['item_price']\n item_desc = request.form['item_desc']\n item_course = request.form['item_course']\n if item_name and item_price and item_desc and item_course:\n db_methods.addNewMenuItem(user_id, item_name, item_price, \n item_desc, item_course, res_id)\n time.sleep(0.1)\n return redirect(\"/restaurants/%s/menu/\" % res_id)\n else:\n error = \"Please be sure to fill out all required fields.\"\n return render_template('newmenuitem.html', error = error)\n else:\n return render_template('newmenuitem.html', res_id = res_id)", "def restaurantMenuItemNew(restaurant_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n if request.form['name']:\n newItem = MenuItem(name=request.form['name'], description=request.form[\n 'description'], price=request.form['price'], course=request.form['course'], restaurant_id=restaurant_id)\n session.add(newItem)\n session.commit()\n\n flash('Menu Item Created', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemNew.html', restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "def test_add_item_using_post(self):\n pass", "def new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(pub_date=datetime.date.today())\n post.title = form.title.data\n post.content = form.content.data\n post.slug = slugify(post.title)\n db.session.add(post)\n db.session.commit()\n return flask.redirect(flask.url_for(\n 'view_post',\n year=post.pub_date.year,\n month=post.pub_date.month,\n day=post.pub_date.day,\n slug=post.slug\n ))\n return flask.render_template('new.html', form=form)", "def post(self, request):\n try:\n data = json.loads(request.body)\n except ValueError:\n return HttpResponseBadRequest('Not valid JSON!')\n\n form = MobileForm(data)\n\n if form.is_valid():\n mobile = form.save()\n response = HttpResponse(status=201)\n response['Location'] = '/mobiles/' + str(mobile.id)\n return response\n else:\n return HttpResponseBadRequest('Invalid data!')", "def add_item():\n\n form = AddOrEditItemForm(Category.query.order_by(Category.name).all())\n img_upload_name = None\n if form.validate_on_submit():\n img_upload_name = secure_filename(form.img_upload.data.filename)\n img_deletehash = None\n img_url = None\n\n # Upload image to Imgur if FileField is specified\n if img_upload_name != '':\n img_url, img_deletehash = upload_image(form.img_upload.data)\n if img_url is None or img_deletehash is None:\n flash(\"Failed to upload image.\")\n return redirect(url_for('.index'))\n elif form.img_url.data != '':\n img_url = form.img_url.data\n\n new_item = Item(name=form.name.data, description=form.description.data,\n category=Category.query.get(form.category.data),\n img_url=img_url, img_deletehash=img_deletehash,\n owner=current_user._get_current_object())\n\n try:\n db.session.add(new_item)\n db.session.commit()\n except:\n flash(\n (\"Failed to add item \\\"%s\\\".\"\n \" Make sure that the item name is unique.\") % new_item.name)\n else:\n flash(\"A new item \\\"%s\\\" has been added.\" % new_item.name)\n finally:\n return redirect(url_for('.index'))\n\n # Set SelectField's default value\n category_name = request.args.get('category_name')\n if category_name is not None:\n default_category = Category.query.filter_by(name=category_name).first()\n if default_category is None:\n flash(\"Wrong parameter(s).\")\n return redirect(url_for('.index'))\n form.category.data = default_category.id\n\n return render_template('add_or_edit.html',\n form=form, filename=img_upload_name)", "def put_on_sale():\n\n item = {\n \"status\": 'for_sale',\n \"category\": request.form['item-type'],\n \"name\": request.form['item-name'],\n \"price\": request.form['item-price'],\n \"description\": request.form['item-description'],\n \"mail\": request.form['seller-email']\n }\n\n put_item(item)\n\n return redirect('/')", "def post(self):\r\n data = request.form\r\n return create(data=data)", "def newListItem(category_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n category = session.query(Category).filter_by(id=category_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n if request.method == 'POST':\n if \"btn_new\" in request.form:\n newItem = ListItem(name=request.form['name'],\n description=request.form['description'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Catalog Item: %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('newitem.html',\n category_id=category_id,\n user=getUserInfo(login_session['user_id']))", "def test_create_item_good(test_client, item):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 201\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']\n assert data['item']['id'] > 0", "def new_pet_form():\n form = PetFormNew()\n if form.validate_on_submit():\n form_data = {k: v for k, v in form.data.items() if k != \"csrf_token\"}\n new_pet = Pet(**form_data)\n \n db.session.add(new_pet)\n db.session.commit()\n return redirect('/')\n else:\n return render_template('pet_new.html', form=form)", "def create():\n if request.method == 'POST':\n if request.form.get('title') and request.form.get('content'):\n entry = Entry.create(\n title = request.form.get('title'),\n content = request.form.get('content'),\n published = request.form.get('published') or False)\n flash('Entry created successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n else:\n flash('Title and Content are required!', 'danger')\n return render_template('create.html')", "def do_POST(self):\n try:\n if self.path.endswith(\"/restaurant/new\"):\n ctype, pdict = cgi.parse_header(self.headers.getheader('Content-type'))\n if ctype == 'multipart/form-data':\n fields = cgi.parse_multipart(self.rfile, pdict)\n restaurantArray = fields.get('restaurant')\n\n # create a new Restaurant\n newRestaurantObject = Restaurant()\n newRestaurantObject.save(restaurantArray[0])\n\n self.send_response(301)\n self.send_header('Content-Type', 'text/html')\n self.send_header('Location', '/restaurants')\n self.end_headers()\n return\n except:\n pass", "def create_product():\n form = ProductForm(request.form)\n if form.validate():\n product = Product()\n product.name = form.name.data\n product.price = form.price.data\n product.quantity = form.quantity.data\n product.description = form.description.data\n product.category = form.category.data\n product.unique_tag = form.unique_tag.data\n db.session.add(product)\n db.session.commit()\n flash(f\"Product {product.name} created!\")\n return redirect(url_for('get_products'))\n\n flash(\"Invalid data\")\n return redirect(url_for('get_products'))", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def create_view(request, title, modelform, **kwargs):\n instance_form = modelform(request.POST or None)\n if instance_form.is_valid():\n instance = instance_form.save(commit=False)\n for default in kwargs.keys():\n setattr(instance, default, kwargs[default])\n instance.save()\n messages.success(request, _(\"%s was created.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Create\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def issueCreateSave(request, item=None, group=None):\n\n if item == None and group == None:\n return True\n\n data = request.POST.copy()\n\n # update the statuses for the issue\n # instance passed is an issue, so we need to get the Machine out of it\n form = forms.itemStatusForm(data)\n\n if form.is_valid():\n return form.save(machine=item, group=group)\n\n return False", "def newItem(category_id):\n editedCategory = session.query(Category). \\\n filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Category.\\\n Please create your own Category in order to edit.')\n return redirect(url_for('showCategory', category_id=category_id))\n if request.method == 'POST':\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item created')\n return redirect(url_for('showCategory',\n category_id=category_id))\n else:\n return render_template('newItem.html', category_id=category_id)", "def post(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"./\"\n\n pp = PoseePermiso('redefinir tipo item',id_tipo_item=id_tipo_item)\n \n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(url_action)\n \n if kw.has_key(\"sprox_id\"):\n del kw[\"sprox_id\"]\n\n tipo = TipoItem.por_id(id_tipo_item)\n try:\n tipo.agregar_atributo(**kw)\n except NombreDeAtributoError, err:\n flash(unicode(err), \"warning\")\n\n redirect(url_action)", "def create_appointment():\n\n form = AppointmentForm()\n\n if form.validate_on_submit():\n\n appointment = Appointment(\n title = form.title.data,\n description = form.description.data,\n location = form.location.data,\n start = form.start.data,\n client = form.client.data,\n user = current_user\n )\n\n try:\n db.session.add(appointment)\n db.session.commit()\n\n flash('Successfully created the appointment.')\n\n return redirect(url_for('appointment.read_appointments'))\n except:\n flash('Error creating the appointment')\n\n return render_template('appointments/form.html.j2', form=form, title='Create appointment')", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n self.form_invalid_init(form=form)\n self.form_invalid_add_global_errormessages(form=form)\n return self.form_invalid(form)", "def create_todo_list_view(request: HttpRequest) -> Union[HttpResponse, HttpResponseRedirect]:\n if request.method == 'GET':\n form = TodoListForm()\n\n return render(request, 'todo/create_todo_list.html', {'form': form})\n elif request.method == 'POST':\n form = TodoListForm(data=deepcopy(request.POST))\n\n if form.is_valid():\n todo_list = form.save()\n\n return redirect(todo_list.get_absolute_url())\n else:\n return render(request, 'todo/create_todo_list.html', {'form': form})", "def create_item(item: Item):\n coll_users = data_access.get_user_collection()\n coll_items = data_access.get_items_collection()\n\n if not item.users:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"Empty user list not allowed.\")\n\n if not item.content:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n \"No description / content given.\")\n\n for user_name in item.users:\n if coll_users.find_one({\"name\": user_name}) is None:\n raise HTTPException(status.HTTP_400_BAD_REQUEST,\n f\"User {user_name} not exists in the user list.\")\n\n item_dict = item.dict()\n item_dict[\"item_id\"] = uuid.uuid4()\n\n tm_now = datetime.datetime.now().isoformat()\n item_dict[\"status_change_date\"] = tm_now\n\n coll_items.insert_one(item_dict)", "def addCatalogItem(sport_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n if request.method == 'POST':\n newCatalogItem = Item(\n name=request.form['itemName'],\n description=request.form['itemDescription'],\n sport_id=sport_id,\n user_id=login_session['user_id'])\n session.add(newCatalogItem)\n session.commit()\n return redirect(url_for('showCatalog', sport_id=sport_id))\n else:\n return render_template('newcatalogitem.html', sport_id=sport_id)", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def todos_create_page():\n todo = Todo()\n if todo.form_submit():\n todo.update(mongo.db)\n print('Created new TODO: {text}'.format(**todo.doc))\n return redirect('/')\n else:\n return render_template(\n template_name_or_list='todo.html',\n todo=todo,\n handle='Create')", "def post_create(request):\n\tform = PostForm(request.POST or None, request.FILES or None)\n\tif request.POST:\n\t\tif form.is_valid():\n\t\t\tinstance = form.save(commit=False)\n\t\t\tinstance.user = request.user\n\t\t\tinstance.save()\n\t\t\tmessages.success(request, \"Post created!\")\n\t\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\t\telse:\n\t\t\tmessages.error(request, \"Sorry! Something went wrong.\", extra_tags=\"\")\n\tcontext = {\n\t\t'title': \"Create Post\",\n\t\t'form' : form,\n\t}\n\treturn render(request, 'post/create.html', context)", "def newMenuItem(restaurant_id):\n\n if 'access_token' not in flask_session:\n return logInRedirect()\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).first()\n user_id = getUserId(flask_session['email'],flask_session['google_plus_id'])\n if not restaurant.user_id == user_id:\n flash(\"Only restaurant owners can add new items.\")\n return redirect(url_for(\"publicMenu\",restaurant_id = restaurant_id))\n\n if request.method == \"POST\":\n new_name = request.form['new_name']\n print \"\\nnewMenuItem POST triggered, name is: \", new_name\n newMenuItem = MenuItem( name=new_name,\n restaurant_id=restaurant.id )\n session.add(newMenuItem)\n session.commit()\n flash( \"new item '\" + new_name + \"' created!\")\n print \"POST worked!\"\n return redirect(url_for(\"showMenu\", restaurant_id=restaurant.id))\n\n else:\n return render_template('newMenuItem.html', restaurant = restaurant)", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.validate():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def create_post(bid):\n form = PostForm(request.form)\n if request.method == 'POST':\n if form.validate():\n DB.session.add(\n Post(\n bid,\n current_user.uid,\n form.name.data,\n form.desc.data))\n DB.session.commit()\n flash('Post ({}) successfully created!'.format(form.name.data))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)", "def create(thing):\n fields = {}\n errors = []\n\n for col in thing.cols:\n new[col.field_name] = request.form.get(col.field_name)\n if col.required and not new[col.field_name]:\n errors.append('%s cannot be empty' % col.human_name)\n\n if errors:\n for e in errors:\n flash(e)\n add_template_variable('thing', thing)\n add_template_variable('fields', fields)\n return my_render_template('generic/create_post.html')\n\n # insert into database\n\n db = get_db()\n cursor = db.cursor()\n\n # create the two strings we use in the query\n field_names = \"'\" + \"', '\".join(thing.field_names) + \"'\"\n question_marks = \", \".join(map(lambda x: '?', thing.field_names.count() ))\n\n cursor.execute(\"insert into posts (%s) values (%s)\" % (field_names, question_marks), (title, body))\n db.commit()\n new_id = cursor.lastrowid\n\n # show new post to the user\n flash(\"You made a new %s\" % thing.human_name)\n return redirect(url_for('show_one', id_=new_id))", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n self.use_template(self.render_template())\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def add_product(request):\n\n if request.method == 'POST':\n form = ProductPostForm(request.POST, request.FILES)\n if form.is_valid():\n product = form.save()\n return redirect('product_details', product.id)\n else:\n form = ProductPostForm()\n return render(request, 'addproductform.html', {'form': form})", "def add_to_bag(request, item_id):\n car = get_object_or_404(Car, pk=item_id)\n insurance = car.insurance\n support = car.support\n\n item = request.POST.get('item')\n bag = request.session.get('bag', {})\n\n if item == 'car':\n bag[\"car_id\"] = item_id\n request.session['bag'] = bag\n return redirect(reverse(\"car_insurance\", kwargs={\"id\": car.id}))\n\n elif item == 'insurance':\n bag[\"insurance\"] = insurance\n request.session['bag'] = bag\n return redirect(reverse(\"car_support\", kwargs={\"id\": car.id}))\n\n elif item == 'support':\n bag[\"support\"] = support\n request.session['bag'] = bag\n return redirect(reverse(\"checkout\"))", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def post(self, request, *args, **kwargs):\n form = self.get_form(self.form_class)\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def newRestaurantPage():\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == 'POST':\n res_name = request.form['res_name']\n user_id = login_session['user_id']\n if res_name:\n db_methods.addNewRestaurant(res_name, user_id)\n time.sleep(0.1)\n return redirect(\"/restaurants\")\n else:\n error = \"You need to enter the name of the restaurant you want to add.\"\n return render_template('newrestaurant.html', error = error)\n else:\n return render_template('newrestaurant.html')", "def add_items(list_id):\n\n item_title = request.form[\"item_title\"]\n item_description = request.form[\"item_description\"]\n user_id = session.get(\"user_id\")\n\n if not user_id:\n raise Exception(\"No user logged in.\")\n\n to_do_list = ToDoList.query.get(list_id)\n\n new_item = ToDoItem(item_title=item_title,\n item_description=item_description)\n to_do_list.to_do_items.append(new_item)\n db.session.add(new_item)\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def test_create_item_missing_name(test_client, item_without_name):\n\n response = test_client.post(BASE_URL,\n data=json.dumps(item_without_name),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 400\n assert data['error'] == app.BAD_REQUEST", "def create_listing(request):\n \n # if data is submitted\n if request.method == 'POST':\n # populate a form variable with user data\n form = Auction_listingForm(request.POST)\n \n # make sure form is valid and that user provides a minimum required price\n if form.is_valid():\n instance = form.save(commit=False)\n if instance.price >= 0.01:\n instance.user = request.user\n instance.save()\n return HttpResponseRedirect(reverse(\"index\"))\n # return an error message if user tries to bypass HTML verification\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Looks you tried to bypass the HTML verification. Unfortunately, your hacker level is too low to break this site.\"\n })\n # return error message if form is not valid\n else:\n return render(request, 'auctions/apology.html', {\n 'message': \"Form is invalid.\"\n }) \n # if reached via URL\n else:\n form = Auction_listingForm()\n \n return render(request, 'auctions/create_listing.html', {'form': form})", "def post_create_item(self, name, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.post_create_item_with_http_info(name, **kwargs)\n else:\n (data) = self.post_create_item_with_http_info(name, **kwargs)\n return data", "def create():\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n body = request.form['body']\r\n error = None\r\n db = get_db()\r\n cur = db.cursor()\r\n\r\n cur.execute('SELECT title FROM novel.post WHERE title = %s', title)\r\n newTitle = cur.fetchone()\r\n\r\n if not title:\r\n error = 'Title is required.'\r\n\r\n if newTitle and newTitle['title'] == title:\r\n error = 'Title is repeated.'\r\n\r\n if error is not None:\r\n flash(error)\r\n else:\r\n db = get_db()\r\n db.cursor().execute(\r\n 'INSERT INTO novel.post (title, body, author_id) VALUES (\"{0}\", \"{1}\", \"{2}\")'\r\n .format(title, body, g.user['id'])\r\n )\r\n db.commit()\r\n return redirect(url_for('novel.index'))\r\n\r\n return render_template('novel/create.html')", "async def post(self) -> JSONResponse:\n\n identifier = self.data[\"id\"]\n action_name = self.data[\"action_name\"]\n item = self.core.item_manager.items.get(identifier)\n if not item:\n return self.error(\n ERROR_ITEM_NOT_FOUND,\n f\"No item found with identifier {identifier}\", status_code=404)\n\n try:\n content = (await self.request.content.read()).decode()\n kwargs = json.loads(content) if content else {}\n except json.JSONDecodeError as e:\n return self.error(e)\n\n if action_name not in item.actions:\n return self.error(\n ITEM_ACTION_NOT_FOUND,\n f\"Item {identifier} of type {item.type} \"\n f\"does not have an action {action_name}\")\n\n try:\n return self.json({\n \"item\": item.identifier,\n \"action\": action_name,\n \"result\": await item.run_action(action_name, kwargs)\n })\n # pylint: disable=broad-except\n except Exception as e:\n LOGGER.error(\"An error occured executing an action\", exc_info=True)\n return self.error(e)", "def drinks_submit():\n drink = {\n 'name': request.form.get('name'),\n 'price': request.form.get('price'),\n 'description': request.form.get('description'),\n 'images': request.form.get('images').split()\n }\n drink_id = drinks_collection.insert_one(drink).inserted_id\n return redirect(url_for('drinks_show', drink_id=drink_id))", "def post_project():\n\n title = request.form.get('title')\n description = request.form.get('description')\n max_grade = request.form.get('max_grade')\n\n hackbright.make_new_project(title, description, max_grade)\n\n flash(\"Successfully added new project.\")\n\n return redirect(\"/project?title={}\".format(title))", "def createItem(self, item):\r\n try:\r\n self.feed_handler.createItem(item.link, item.title, item.descr,\r\n item.source, item.channelURL)\r\n self.feed_passed = self.feed_passed + 1\r\n except Exception, ex: \r\n # Remove comment for detailed information on feed item created\r\n #print ex\r\n pass", "def pet_add_form():\n\n form = PetAddForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n flash(f\"Added a {species} called {name}\")\n\n pet = Pet(\n name=name,\n species=species,\n photo_url=photo_url if photo_url != '' else None,\n age=age,\n notes=notes\n )\n\n db.session.add(pet)\n db.session.commit()\n\n return redirect(\"/\")\n\n else:\n return render_template(\"pet_add_form.html\", form=form)", "def issueCreate(request):\n args = { 'statusForm' : forms.itemStatusForm(), }\n return render_to_string('issueCreate.html', args,\n context_instance=RequestContext(request))", "def menu_item_new(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n img_id = 0\n if request.method == 'POST':\n if 'file' in request.files:\n print(\"File found\")\n img_id = helper.create_new_image_if_not_exists(file=request.files['file'],\n title=request.form['img_name'])\n new_item = MenuItem(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'],\n course=request.form['course'],\n likes=0,\n dislikes=0,\n restaurant_id=restaurant_id,\n user_id=login_session['user_id'],\n image_id=img_id)\n session.add(new_item)\n session.commit()\n flash(\"New Menu Item {} created!\".format(new_item.name))\n return redirect(url_for('restaurant_menu', restaurant_id=restaurant_id))\n else:\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newmenuitem.html', restaurant=restaurant, user_info=user_info)", "def create_post():\n if \"image\" not in request.files:\n return {\"errors\": \"image required\"}, 400\n\n image = request.files[\"image\"]\n\n if not allowed_file(image.filename):\n return {\"errors\": \"file type not permitted\"}, 400\n\n image.filename = get_unique_filename(image.filename)\n\n upload = upload_file_to_s3(image)\n\n if \"url\" not in upload:\n\n return upload, 400\n\n url = upload[\"url\"]\n\n\n form = ShopForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n shop = Store(\n name=form.data[\"name\"],\n address=form.data[\"address\"],\n user_id=current_user.id,\n description=form.data[\"description\"],\n photo_url=url\n )\n db.session.add(shop)\n db.session.commit()\n return shop.to_dict()\n return {'errors': validation_errors_to_error_messages(form.errors)}, 401", "def new(request):\n if request.method == 'POST':\n form = ShoppinglistForm(request.POST, my_user=False)\n if form.is_valid():\n pantry = get_object_or_404(Pantry,\n pk=form.cleaned_data['pantry'].id)\n list = Shoppinglist(name=form.cleaned_data['name'],\n pantry=pantry)\n list.save()\n return redirect('blackem.users.views.home')\n else:\n form = ShoppinglistForm(my_user=request.user)\n return render_to_response('shoppinglists/shoppinglist_form.html',\n {'form': form,\n 'logged': True},\n context_instance=RequestContext(request))", "def create(request):\n if request.method == \"POST\":\n form = InitialInvoice(data=request.POST)\n if form.is_valid():\n data = form.cleaned_data\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": ItemForm(),\n \"stage\": \"2\",\n \"initial_data\": data\n })\n\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": InitialInvoice(),\n \"stage\": \"1\"\n })", "def edit_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to edit the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = ItemForm()\n\n # If the form is validated, update the item with its data to the\n # database\n if form.validate_on_submit():\n\n # If the item name or sport has been modified, check that an\n # item with the same name and sport does not already exist, or\n # send a flash message and do not add the new item to the\n # database\n if form.name.data != item.name or form.sport.data != item.sport:\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n return redirect(url_for('items.edit_item',\n item_name=item_name))\n\n # If the item name or sport has not been modified, update all\n # details to the database, send a flash message, and redirect\n # to 'home'\n else:\n item.name = form.name.data\n item.sport = form.sport.data\n item.category = form.category.data\n item.description = form.description.data\n item.private = form.private.data\n db.session.commit()\n flash(f'\"{item.name}\" has been updated!', 'good')\n return redirect(url_for('items.item', item_name=item_name))\n\n # If the form is being requested, not submitted, pre-fill the form\n # with existing item data\n elif request.method == 'GET':\n form.name.data = item.name\n form.sport.data = item.sport\n form.category.data = item.category\n form.description.data = item.description\n form.private.data = item.private\n\n return render_template('edit_item.html', item=item, form=form)", "def test_post_get_item(self):\n item = {'brand': 'apple',\n 'name': 'iPhone7',\n 'description': 'The latest iphone'}\n\n def check_item(result):\n item['id'] = result['id']\n resp = self.make_request('get', '/store/items/{}'.format(item['id']))\n self.assertEqual(resp.json()['id'], item['id'])\n self.assertEqual(resp.json()['brand'], item['brand'])\n self.assertEqual(resp.json()['name'], item['name'])\n self.assertEqual(resp.json()['description'], item['description'])\n\n self.assert_request('post',\n '/store/items',\n data=item,\n expected_code=201,\n expected_json=check_item)", "def post(self, request, **kwargs):\n player = get_player_from_request(request)\n if player is None or player.room is not None:\n return redirect(\"rooms:redirect\")\n\n form = RoomCreationForm(request.POST)\n if form.is_valid():\n room_name = form.cleaned_data.get(\"room_name\")\n if Room.objects.filter(name=room_name).exists():\n return render(request, self.template_name, {\"form\": form, \"error\": True})\n else:\n room = Room.objects.create(name=room_name)\n player.room = room\n player.save()\n return redirect(\"rooms:room\", room=room)\n else:\n return render(request, self.template_name, {\"form\": form})", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def post(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "def posts_post():\n data = request.json\n\n try:\n validate(data, post_schema)\n except ValidationError as error:\n data = {\"message\": error.message}\n return Response(json.dumps(data), 422, mimetype=\"application/json\")\n\n post = Post(title=data[\"title\"], body=data[\"body\"])\n session.add(post)\n session.commit()\n\n data = json.dumps(post.as_dictionary())\n headers = {\"Location\": url_for(\"post_get\", id=post.id)}\n\n return Response(data, 201, headers=headers, mimetype=\"application/json\")", "def add_pet_form():\n\n form = AddPetForm()\n \n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n available = form.available.data\n\n new_pet = Pet(name=name, species=species, photo_url=photo_url, age=age, notes=notes, available=available)\n db.session.add(new_pet)\n db.session.commit()\n\n flash(f\"Added {name}, species: {species}, age: {age}, notes: {notes}, photo_url={photo_url}, available={available}\")\n \n return redirect(\"/add\")\n\n else:\n return render_template(\n \"pet_add_form.html\", form=form)", "def post(self, request, *args, **kwargs):\n try:\n form = self.get_form()\n except RedirectNeeded as exc:\n messages.add_message(request, messages.SUCCESS, \"Payment redirects to %s\" % exc.args[0])\n return HttpResponseRedirect(exc.args[0])\n #except Exception as exc:\n # return HttpResponseBadRequest(exc, content_type=\"text/plain\")\n\n if form.validate():\n messages.add_message(request, messages.SUCCESS, \"Payment succeeded\")\n return self.form_valid(form)\n else:\n messages.add_message(request, messages.ERROR, \"Payment failed\")\n return self.form_invalid(form)", "def place_new():\n if g.is_logged == False:\n flash (\"You need to be logged in\")\n return redirect(url_for('index'))\n\n if request.method == 'POST':\n db = get_db()\n db.execute('''insert into places (name, address, city, zipcode) values (?, ?, ?, ?)''', [request.form['name'], request.form['address'], request.form['city'], request.form['zipcode']])\n db.commit()\n\n flash('The restaurant was succesfully added')\n return redirect(url_for('index'))\n else:\n\n return render_template('newplace.html')", "def create_place():\n form = CreatePlacesForm(request.form)\n if form.validate_on_submit():\n # set the collection\n places_db = mongo.db.places\n # insert the new recipe\n places_db.insert_one({\n 'name': request.form['name'],\n 'city': request.form['city'],\n 'added_by': session['username'],\n 'description': request.form['description'],\n 'tags': request.form['tags'],\n 'image': request.form['image'],\n 'views': 0\n })\n return redirect(url_for('home'))\n return render_template('create_restaurant.html', form=form)", "def save_add(request):\n if request.method == \"POST\":\n initial_data, data = process_request(request)\n return render(request,\n \"invoice/invoice_create.html\",\n {\n \"form\": ItemForm(),\n \"stage\": \"2\",\n \"prev_data\": data,\n \"initial_data\": initial_data\n })", "def create_item_page():\n catagories = [c.name for c in Catagory.fetch_all()]\n return render_template('add_item.html', catagories=catagories, values={})", "def post(self, request):\n self.createform = forms.QuotationForm(request.POST)\n if self.createform.is_valid():\n req = {\n \"name\": self.createform.cleaned_data['name'],\n \"email\": self.createform.cleaned_data['email'],\n \"phone\": self.createform.cleaned_data['phone'],\n \"vehiculeModel\": self.createform.cleaned_data['vehiculeModel'],\n \"vehiculeYearMake\": self.createform.cleaned_data['vehiculeYearMake'],\n \"vehiculeNumber\": self.createform.cleaned_data['vehiculeNumber'],\n \"vehiculePrice\": self.createform.cleaned_data['vehiculePrice'],\n \"covWind\": self.createform.cleaned_data['covWind'],\n \"covPass\": self.createform.cleaned_data['covPass'],\n \"covFlood\": self.createform.cleaned_data['covFlood']\n }\n response = requests.post(settings.QUOTATION_API_BASE_URL + 'create/',\n data=json.dumps(req, cls=DecimalEncoder),\n headers={'Content-type': 'Application/json'})\n if response.status_code == 201:\n reponseJson = response.json()\n quotationCreatedId = reponseJson['id']\n userFromJson = reponseJson['customer']\n user = User.objects.filter(username=userFromJson['username'])\n login(self.request, user.first())\n return HttpResponseRedirect('/quotation/' +\n str(quotationCreatedId))\n\n return render(request, self.template_name, {'form': self.createform})", "def add():\n user_id = session[\"user_id\"]\n if not user_id:\n session.clear()\n redirect(\"/\")\n database = db.db_connect()\n user = database.execute(\"SELECT * FROM Users where id = ?\", (user_id,)).fetchone()\n if not user:\n session.clear()\n redirect(\"/\")\n\n if request.method == \"POST\":\n title = request.form.get(\"title\")\n deadline = request.form.get(\"deadline\")\n importance = request.form.get(\"importance\")\n try:\n print((int(user_id), title, deadline, importance))\n database = db.db_connect()\n database.execute(\"INSERT INTO Tasks (author, description, deadline, importance) VALUES\"\n \"(?, ?, ?, ?)\", (int(user_id), title, deadline, importance))\n database.commit()\n resp = redirect(\"/menu/\")\n except Exception as e:\n resp = render_template(\"add_item.html\", warn=True, warnmessage=e)\n return resp\n return render_template(\"add_item.html\", warn=False, warnmessage=\"\")", "def post(self):\n created = post_tool(request.json)\n return created, 201", "def add_pet():\n\n form = AddPetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n pet = Pet(name=name, \n species=species, \n photo_url=photo_url, \n age=age, \n notes=notes)\n db.session.add(pet)\n db.session.commit()\n \n return redirect('/')\n\n else:\n return render_template(\n \"add_pet_form.html\", form=form)", "def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n return self.form_valid(form, request)\n else:\n return self.form_invalid(form, request)", "def dispatch(self, request, *args, **kwargs):\n item_id = request.POST.get('item') or kwargs.get('pk') or 1\n try:\n self.object = LabelingAnswer.objects.get(rater=self.rater,\n item__pk=item_id)\n self.item = Item.objects.get(pk=item_id)\n return HttpResponseRedirect(self.get_success_url())\n except LabelingAnswer.DoesNotExist:\n pass\n\n self.item = self.get_item()\n if not self.item:\n messages.error(request,\n f'Item id {item_id} is missing or has already '\n f'been labeled by rater id {self.rater.id}')\n return HttpResponseRedirect(reverse('workflow:error'))\n\n return super().dispatch(request, *args, **kwargs)", "def test_create_item(self):\n\n url = reverse('stock-item-create')\n\n response = self.client.get(url, {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(url, {'part': 999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from a valid item, valid location\n response = self.client.get(url, {'location': 1, 'copy': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n\n # Copy from an invalid item, invalid location\n response = self.client.get(url, {'location': 999, 'copy': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)", "def post(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)" ]
[ "0.7512342", "0.74192584", "0.7168803", "0.7162308", "0.6992464", "0.68776226", "0.6828334", "0.67595434", "0.6719401", "0.67075944", "0.66212463", "0.66060036", "0.65833145", "0.65363026", "0.6523961", "0.6484986", "0.6414084", "0.6341097", "0.6312691", "0.6282003", "0.62772465", "0.6200028", "0.61978626", "0.61482304", "0.60983765", "0.6088804", "0.6073651", "0.60616446", "0.6033742", "0.6027761", "0.6025253", "0.6017064", "0.6015371", "0.59488904", "0.5920941", "0.5916769", "0.5915642", "0.5909973", "0.59088105", "0.5886886", "0.5863686", "0.5858988", "0.5848875", "0.5848667", "0.5839242", "0.58359087", "0.58298", "0.58222336", "0.58144104", "0.5800826", "0.57951814", "0.5783722", "0.5763624", "0.57633364", "0.57630056", "0.5749538", "0.5747365", "0.57417405", "0.57212687", "0.572021", "0.57085913", "0.5692659", "0.56879556", "0.56843245", "0.5684245", "0.56661904", "0.5662788", "0.56518614", "0.5648102", "0.56447923", "0.5633399", "0.56307197", "0.5619421", "0.5607822", "0.5604964", "0.5603537", "0.5601445", "0.5593285", "0.5591518", "0.55833346", "0.55802834", "0.55788696", "0.55788696", "0.55788696", "0.5574326", "0.55741245", "0.5568076", "0.5567535", "0.55594707", "0.55537254", "0.553985", "0.55358624", "0.55219704", "0.551661", "0.551587", "0.55075586", "0.5503181", "0.550121", "0.54964566", "0.54957855" ]
0.7602898
0
Endpoint to display update item page.
def update_item_page(item_name, catagory_name): item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) catagories = [c.name for c in Catagory.fetch_all()] return render_template( 'edit_item.html', catagories=catagories, values={ 'name': item.name, 'catagory': item.catagory_name, 'description': item.description }, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def item_update(request):\n if request.method == 'POST':\n item_to_update = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_update.name = request.POST['name']\n item_to_update.count = int(request.POST['count'])\n item_to_update.date_of_expiration = request.POST['exp']\n item_to_update.fk_category = Category.objects.get(name=request.POST['cat'])\n item_to_update.fk_subcategory = SubCategory.objects.get(name=request.POST['subcat'])\n item_to_update.notes = request.POST['notes']\n item_to_update.save()\n return HttpResponse(status=200)", "def update_item(item_id):\n edited_item = session.query(Item).filter_by(id=item_id).one()\n\n # redirect to details page if current user does not own item\n if edited_item.user_id != login_session['user_id']:\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n\n if request.method == 'POST':\n if request.form['category']:\n edited_item.category_id = request.form['category']\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n edited_item.updated_date = datetime.datetime.now()\n session.add(edited_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n else:\n categories = session.query(Category).all()\n return render_template(\n 'views/edit.html',\n edited_item=edited_item,\n categories=categories)", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def update():\n return 'update api in put'", "def item_edit(context, request, render=None):\n if render is None:\n render = request.params.get('render', True)\n properties = request.validated\n # This *sets* the property sheet\n request.registry.notify(BeforeModified(context, request))\n context.update(properties)\n request.registry.notify(AfterModified(context, request))\n if render == 'uuid':\n item_uri = '/%s' % context.uuid\n else:\n item_uri = request.resource_path(context)\n if asbool(render) is True:\n rendered = embed(request, item_uri + '?embed=false')\n else:\n rendered = item_uri\n request.response.status = 200\n result = {\n 'status': 'success',\n '@type': ['result'],\n '@graph': [rendered],\n }\n return result", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def edit_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to edit the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = ItemForm()\n\n # If the form is validated, update the item with its data to the\n # database\n if form.validate_on_submit():\n\n # If the item name or sport has been modified, check that an\n # item with the same name and sport does not already exist, or\n # send a flash message and do not add the new item to the\n # database\n if form.name.data != item.name or form.sport.data != item.sport:\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n return redirect(url_for('items.edit_item',\n item_name=item_name))\n\n # If the item name or sport has not been modified, update all\n # details to the database, send a flash message, and redirect\n # to 'home'\n else:\n item.name = form.name.data\n item.sport = form.sport.data\n item.category = form.category.data\n item.description = form.description.data\n item.private = form.private.data\n db.session.commit()\n flash(f'\"{item.name}\" has been updated!', 'good')\n return redirect(url_for('items.item', item_name=item_name))\n\n # If the form is being requested, not submitted, pre-fill the form\n # with existing item data\n elif request.method == 'GET':\n form.name.data = item.name\n form.sport.data = item.sport\n form.category.data = item.category\n form.description.data = item.description\n form.private.data = item.private\n\n return render_template('edit_item.html', item=item, form=form)", "def edit_item(request, item_id):\n if request.user.is_superuser:\n item = get_object_or_404(Product, pk=item_id)\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES, instance=item)\n if form.is_valid():\n form.save()\n messages.success(request, 'Item was successfully updated.')\n return redirect(reverse('item_info', args=[item.id]))\n else:\n messages.error(request, 'There was an issue updating the '\n 'item. Please make sure the form is valid.')\n else:\n form = ProductForm(instance=item)\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/edit_item.html'\n context = {\n 'form': form,\n 'item': item,\n }\n\n return render(request, template, context)", "def edit(item_id):\n session = current_app.config['db']\n item = session.query(WineABV).filter_by(id=item_id).one()\n if request.method == \"POST\":\n new_name = request.form['itemname']\n item.name = new_name\n try:\n session.commit()\n except exc.IntegrityError:\n session.rollback()\n flash(\"Duplicate values!\", 'danger')\n return render_template('edit_form.html', item=item)\n\n flash(\"Successfully Edited '%s'\" % (new_name,), 'success')\n return redirect(url_for('.show'))\n else:\n return render_template(template_prefix+'edit_form.html', item=item)", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def edit_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('edit_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n item.name = request.form['name']\n item.category_id = request.form['category']\n item.description = request.form['description']\n sqlsession.commit()\n return redirect(url_for('view_item', item_id=item_id))\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n categories = sqlsession.query(Category).all()\n return render_template(\"edit_item.html\",\n item=item,\n categories=categories)", "def partial_update(self,request,pk = None):\r\n\r\n return Response({'HTTP method':'PATCH'})", "def edit_item(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n Item.user_id == current_user.id\n ).first()\n\n if not item:\n flash(\"Couldn't find a item with that id\", category='warning')\n return redirect(request.referrer)\n\n form = ItemForm()\n form.editting_item_id = item_id\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n item.category_id = form.category_id.data.id\n item.name = form.name.data.capitalize()\n item.description = form.description.data\n db.session.commit()\n flash('Successfully updated Item', 'success')\n return redirect(url_for('url.index'))\n\n elif request.method == 'GET':\n form.name.data = item.name\n form.description.data = item.description\n\n return render_template(\n 'forms/form.html',\n form_title='Edit Item',\n form=form,\n form_name='item',\n action=url_for('url.edit_item', item_id=item_id))", "def issueUpdateView(context, issue):\n\n user = context.get('user')\n\n if not user.has_perm('IssueTracker.can_change'):\n return \"\"\n\n if issue.item:\n item = issue.item.item\n \n args = {\n \"form\": forms.UpdateMachineForm(instance=item),\n }\n\n return render_to_string('issueUpdate.html', args, context)\n\n return \"\"", "def partial_update(self, request, pk=None): #partial update a specific object\n return Response({'http_method': 'PATCH'})", "def editItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n editedItem.user_id = login_session['user_id']\n session.add(editedItem)\n session.commit()\n return redirect(url_for('showCatalog', sport_id=sport_id))\n else:\n return render_template('edititem.html', sport_id=sport_id,\n item_id=item_id, sport=sport, item=editedItem)", "def update(request):\n return 0", "def updateItem(self, object):\n pass", "def editItem(category_item_id):\n editedItem = db.findItem(id=category_item_id)\n if editedItem.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.updateItem(editedItem, request.form)\n return redirect(url_for('showCatalog'))\n return render_template(\n 'edit_item.html', categories=db.getAllCategories(), item=editedItem)", "def update(_id): \n pages_object = Pages(_id)\n page = pages_object.page\n \n language_name = languages_object.get_languages(3)\n \n # Update page\n if request.method == 'POST':\n if pages_object.update():\n return redirect(url_for('pages.overview'))\n \n len_of_label = len(page['label'])\n \n # Come back a message when there is an error\t\n if not pages_object.message is None:\n message = pages_object.message\n status = pages_object.status\n \n return render_template('{}/update.html'.format(MODULE_DIR), **locals())", "def edit_item(item_id):\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = session.query(Item).filter_by(id=item_id).one()\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n session.add(item)\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)", "def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})", "def update_item(self, table, item):", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def edit_items(request):\n token = getToken(request)\n superUser = isSuperUser(token)\n if superUser == True:\n id = request.data['id']\n try:\n items = Items.objects.get(id=id)\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ItemsSerializer(items, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def partial_update(self, request, pk=None):\n\n return Response({'http_method':'PATCH'})", "def partial_update(self,request,pk= None):\n return Response({'http_method':'PATCH'})", "def editListItem(category_id, item_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n editedItem = session.query(ListItem).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n\n if request.method == 'POST':\n if \"btn_edit\" in request.form:\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n session.add(editedItem)\n session.commit()\n flash('Catalog Item Successfully Edited')\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('edititem.html',\n item=editedItem,\n user=getUserInfo(login_session['user_id']))", "def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})", "def update_items(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['id'] = cpdoc.id\n\n item_ser = self.get_serializer(instance=obj_cp, data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)", "def test_update_item_using_post(self):\n pass", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def editItem(category_id, item_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to edit item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempted operation on non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n editedItem = session.query(Item).filter_by(id=item_id,\r\n category_id=category_id).first()\r\n if not editedItem:\r\n flash('Attempt to edit non-existent item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # authorization\r\n if login_session['user_id'] != editedItem.user_id:\r\n flash('Sorry, you are not authorized to edit the item \\'{}\\''\r\n .format(editedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n if request.method == 'POST':\r\n # update operation\r\n if request.form['name']:\r\n editedItem.name = request.form['name']\r\n\r\n if request.form['description']:\r\n editedItem.description = request.form['description']\r\n else:\r\n editedItem.description = ''\r\n session.add(editedItem)\r\n session.commit()\r\n flash('Edited Item \\'{}\\' Successfully'.format(editedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with the form\r\n return render_template(\"editItem.html\",\r\n category=category, item=editedItem)", "def patch(self, item_id):\n\n try:\n\n data = request.json\n\n if data is None:\n raise NotImplementedError(\"No data\")\n\n controller = self.controller()\n data[\"id\"] = item_id\n data = controller.date_time_parser(data)\n schema = self.schema(many=False)\n raw_data = controller.update(**data)\n data = schema.dump(raw_data)\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def pp_update_item(edit_url, **kwargs):\n # build Requests session\n pp = requests.Session()\n pp.auth = (udata.pp2['user'], udata.pp2['pass'])\n pp.cookies.update(get_cookies('secure1.inmotionhosting.com'))\n\n # retrieve existing data\n fdata = pp_get_item(edit_url)\n\n # update form data with kwargs\n fdata.update(kwargs)\n\n # then post update\n bpost = pp.post('https://secure1.inmotionhosting.com%s' % (edit_url), data=fdata)\n\n return bpost", "def update(entry_id):\n entry = models.Journal.select().where(\n models.Journal.id == entry_id).get()\n form = forms.JournalForm() # if the form validates\n if form.validate_on_submit(): # if click update button\n entry.title = form.title.data\n entry.date = form.date.data\n entry.time_spent = form.time_spent.data\n entry.learnt = form.learnt.data\n entry.resources = form.resources.data\n entry.save() # commit the changes\n flash('Entry has been updated', 'success')\n return redirect(url_for('detail', entry_id=entry.id))\n elif request.method == 'GET': # fill the form with current data\n form.title.data = entry.title\n form.date.data = entry.date\n form.time_spent.data = entry.time_spent\n form.learnt.data = entry.learnt\n form.resources.data = entry.resources\n return render_template('update.html', form=form)", "def editItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if editedItem.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Item.\\\n Please create own Item in order to edit.')\n return redirect(url_for('showallCategories'))\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n session.add(editedItem)\n session.commit()\n flash('Item Edit successfull')\n return redirect(url_for('showCategory', category_id=category_id))\n else:\n return render_template(\n 'editItem.html', category_id=category_id,\n item_id=item_id, item=editedItem)", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def render_edit_item_page(\r\n self, client_id, state, user_id, user_name, item_id):\r\n categories = self._db_manager.get_category_list(user_id)\r\n if len(categories) == 0:\r\n flash(\"You have created no categories to add items to.\")\r\n return\r\n item = self._db_manager.get_item(item_id)\r\n if item is None:\r\n flash(\"Invalid item.\")\r\n return\r\n if item[\"user_id\"] != user_id:\r\n flash(\"Only the original creator can edit an item.\")\r\n return\r\n return render_template(\r\n \"item_edit.html\",\r\n client_id=client_id,\r\n state=state,\r\n is_logged_in=True,\r\n user_name=user_name,\r\n categories=categories,\r\n item=item\r\n )", "def editItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n\n if editedItem.user_id != login_session['user_id']:\n flash(\"You are authorised to edit items created by you!\")\n return redirect(url_for(\"showCatalog\"))\n\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n session.add(editedItem)\n session.commit()\n flash('%s Item Successfully Edited' % (editedItem.name))\n return redirect(url_for('showItem',\n category_id=editedItem.category_id))\n else:\n return render_template('edititem.html', category=category,\n item=editedItem)", "def put(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PUT'})", "def edit(self, *args, **kw):\n id_tipo_item = UrlParser.parse_id(request.url, \"tipositems\")\n url_action = \"../\"\n \n pp = PoseePermiso('redefinir tipo item',\n id_tipo_item=id_tipo_item)\n if not pp.is_met(request.environ):\n flash(pp.message % pp.nombre_permiso, 'warning')\n redirect(url_action)\n tmpl_context.widget = self.edit_form\n value = self.edit_filler.get_value( \\\n values={'id_atributos_por_tipo_item': int(args[0])})\n value['_method'] = 'PUT'\n page = \"Atributo {nombre}\".format(nombre=value[\"nombre\"])\n return dict(value=value, \n page=page, \n atras=url_action)", "def update_item(item_name, catagory_name):\n try:\n item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name)\n except NoResultFound:\n abort(404)\n errors = form_errors(request.form)\n new_item_name = request.form.get('name')\n new_catagory_name = request.form.get('catagory')\n new_description = request.form.get('description')\n if errors:\n values = {\n 'name': new_item_name,\n 'catagory': new_catagory_name,\n 'description': new_description\n }\n catagories = [c.name for c in Catagory.fetch_all()]\n return render_template(\n 'add_item.html',\n catagories=catagories,\n values=values,\n errors=errors\n )\n item.update(\n name=new_item_name,\n catagory_name=new_catagory_name,\n description=new_description\n )\n return redirect(url_for(\n 'read_item', item_name=new_item_name, catagory_name=new_catagory_name\n ))", "def edit(self):\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n title = self.request.get('title')\n if not title:\n return JSONResponse(self.request).error(\n _('agenda_item_update_empty_string',\n default=u\"Agenda Item title must not be empty.\")).proceed().dump()\n\n title = title.decode('utf-8')\n if self.agenda_item.has_proposal:\n if len(title) > ISubmittedProposal['title'].max_length:\n return JSONResponse(self.request).error(\n _('agenda_item_update_too_long_title',\n default=u\"Agenda Item title is too long.\")\n ).proceed().dump()\n\n self.agenda_item.set_title(title)\n return JSONResponse(self.request).info(\n _('agenda_item_updated',\n default=u\"Agenda Item updated.\")).proceed().dump()", "def editMenuItemPage(restaurant_id, item_id):\n item = db_methods.searchItemByID(item_id)\n res_id = restaurant_id\n item_id = item_id\n if request.method == 'POST':\n item_name = request.form['item_name']\n item_price = request.form['item_price']\n item_desc = request.form['item_desc']\n item_course = request.form['item_course']\n if item_name and item_price and item_desc and item_course:\n db_methods.editMenuItem(item_name, item_price, item_desc, item_course, item_id)\n time.sleep(0.1)\n return redirect('/restaurants/%s/menu' % res_id)\n else:\n error = \"Please fill out all required fields.\"\n return render_template(\"editmenuitem.html\", error = error)\n else:\n return render_template('editmenuitem.html', item=item, res_id=res_id)", "def edit_item_details(item_id):\n category_id = None\n if 'category_id' in request.args:\n category_id = int(request.args['category_id'])\n if 'userid' not in login_session:\n flash('Unfortunately you need to be logged in to make changes', 'error')\n return redirect(url_for('show_homepage'))\n\n item = None\n if item_id != 0:\n item = is_user_the_creator(item_id)\n if request.method == 'GET':\n categories = session.query(Category).order_by(asc(Category.name)).all()\n return display_item(categories, item, item_id, category_id)\n else:\n return save_item(item, item_id)", "def update_item(self, item):\n try:\n index = self.ui.listItemList.model().index_of(item)\n # TODO: missing a way to insert row, don't know how to add data with insertRows\n # see https://svn.enthought.com/svn/enthought/TraitsBackendQt/trunk/enthought/traits/ui/qt4/list_str_model.py\n #if item.isRead() and self.show_updated_only():\n # self.ui.listItemList.model().removeRow(index.row())\n #else:\n self.ui.listItemList.update(index)\n except:\n pass\n self.update_title()", "def edit_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'You have successfully updated store item!')\n return redirect(reverse('home'))\n else:\n messages.error(request, 'Failed to update item. Please check the form.')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_item.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)", "def update(self, request, *args, **kwargs):\n response = super(ProductViewSet, self).update(request, *args, **kwargs)\n response.data['message'] = \"Producto ha sido editado\"", "def edit_item(Task_id):\n\n if request.method == 'POST':\n #get the parameters from html form\n Description = request.form['Description']\n status = request.form['status']\n Due_date = request.form['Due_date']\n\n #if status of task is open then it will set the value=1\n if status == 'open':\n status = 1\n else:\n status = 0\n\n #conncetion to the database\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n\n #update query to update fields of task\n c.execute(\"UPDATE task SET Description = ?, Due_date = ?, Modified_date = Date('now'), status = ? WHERE Task_id LIKE ?\", (Description, Due_date, status, Task_id))\n conn.commit()\n return redirect(\"/todo\")\n else:\n Task_id = str(Task_id)\n #conncetion to the database\n conn = sqlite3.connect('todo.db')\n c = conn.cursor()\n\n #select query to get the Description of particular task\n c.execute(\"SELECT Description,Due_date FROM task WHERE Task_id = ?\",[Task_id])\n cur_data = c.fetchone()\n conn.commit()\n return render_template('update_task.html',Task_id= Task_id,old= cur_data)", "def put(self, request, pk=None):\n return Response({'method': 'patch'})", "def management_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n management_reference = get_object_or_404(Management, id=id,company=company)\n management_form = ManagementForm(instance=management_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('management_form.html',{'form':management_form, 'info': management_reference},context_instance=RequestContext(request))\n else:\n management_form = ManagementForm(request.POST, instance=management_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if management_form.is_valid():\n management_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'info': management_reference},\n context_instance=RequestContext(request))", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def api_item_details(item_id):\n if request.method == 'GET':\n item = session.query(Item, User).join(User).filter(Item.id == item_id).first()\n return jsonify(item.Item.to_json())\n # TODO - Add a POST method + HTTP Auth to allow a RESTful item modification", "def updateItem(request):\n # Getting the data when you add to cart. Body of JSON\n data = json.loads(request.body)\n # Getting values we sent to body as JSON. prodID and Action\n productId = data['prodId']\n action = data['action']\n\n # Get curr customer\n customer = request.user.customer\n product = BobaProduct.objects.get(id=productId)\n\n # get order associated with customer\n order, created = CustomerOrder.objects.get_or_create(customer=customer)\n\n # Get value of curr order. If it exist, want to just change it\n orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)\n\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save() #saving this order item\n\n # If the quantity of the order goes below 1, delete the orderItem\n\n if orderItem.quantity < 1:\n orderItem.delete()\n return JsonResponse('Item was added', safe=False)", "def edit_form():\n return template (\"edit\")", "def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)", "def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)", "def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)", "def edit_task_page(request):\n data = {}\n try:\n tasklist = request.GET.get(\"tasklist\")\n task = request.GET.get(\"task\")\n data[\"tasklist\"] = tasklist\n\n task_obj = Todo.objects.get(title=task)\n data[\"data\"] = task_obj\n\n return render(request, \"pages/update-task.html\", data)\n except Exception as ex:\n return HttpResponse(ex)", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def update(self) -> requests.request:\n # Check if id is set\n if self.args.id is None:\n raise Exception('Provide id of asset you want to update')\n\n # Check URL validity\n if self.args.url is not None and self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send PUT request\n return requests.put(\n self.REQUEST_URL + str(self.args.id),\n {'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def show(self, item_id):\n pass", "def updateItem(self, item, values):\n print ('Updating item: ' + unicode(item))\n item = int(item) #Importante: Para evitar que se caiga la api de PODIO más adelante\n message = self._client.Item.update(item, {'fields':values})\n return message", "def edit_from_list(id=None,item_id=None):\n setExits()\n #import pdb;pdb.set_trace()\n \n item_id=cleanRecordID(item_id)\n item_rec = None\n rec = None\n warehouses = Warehouse(g.db).select()\n trx_types = get_site_config().get('trx_types',['Add','Remove',])\n transaction = Transaction(g.db)\n trx_id = cleanRecordID(id)\n if trx_id > 0:\n rec = transaction.get(trx_id)\n \n if rec:\n item_id = rec.item_id\n else:\n rec = transaction.new()\n rec.created = local_datetime_now()\n if 'last_trx' in session:\n transaction.update(rec,session['last_trx'])\n \n # Handle Response?\n if request.form:\n #import pdb;pdb.set_trace()\n error_list=[]\n transaction.update(rec,request.form)\n if save_record(rec,error_list):\n return \"success\" # the success function looks for this...\n else:\n pass\n \n \n if item_id > 0:\n item_rec = Item(g.db).get(item_id)\n \n if not item_rec:\n flash(\"This is not a valid item id\")\n return \"failure: This is not a valid item id.\"\n else:\n rec.item_id=item_id\n \n \n return render_template('trx_edit_from_list.html',rec=rec,current_item=item_rec,warehouses=warehouses,trx_types=trx_types)", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n \n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def edit_item(category, item):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n if request.method == 'POST':\n # Query database with SQLAlchemy and store queries as objects\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '))\n .one())\n # Get form fields submitted by user, or retain item info\n name = request.form['name'] if request.form['name'] else item.name\n url = request.form['url'] if request.form['url'] else item.url\n if request.form['photo_url']:\n photo_url = request.form['photo_url']\n else:\n photo_url = item.photo_url\n if request.form['description']:\n description = request.form['description']\n else:\n description = item.description\n category = request.form['item_category']\n # Retrieve the database ID of the item's category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of creator\n creator_db_id = item.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Item creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Item to edit is \"{}\".'.format(item.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n # Store edits in an object\n edited_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n # Overwrite item object with new info from edited_item object\n item.name = edited_item.name\n item.url = edited_item.url\n item.photo_url = edited_item.photo_url\n item.description = edited_item.description\n item.category_id = edited_item.category_id\n session.add(item)\n session.commit()\n print('Item \"{}\" edited.'.format(edited_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('edit_item.html',\n categories=categories,\n item=item,\n login_status=login_status)", "def editItem(category_id, item_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n editedItem = session.query(Item).filter_by(id=item_id).one()\r\n if editedItem.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to edit this item.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n editedItem.name = request.form['name']\r\n session.add(editedItem)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('edititem.html', category_id=category_id, item=editedItem)", "def collection_update(request, *args, **kwargs):\n patch_data = request.data\n # Extract form data and validate\n form = CollectionForm(patch_data)\n if not form.is_valid():\n data = json.dumps({\"errors\": form.errors})\n return HttpResponse(content=data, content_type=\"application/json\", status=status.HTTP_400_BAD_REQUEST)\n # Update the collection\n collection = Collection.objects.get(id=int(kwargs['pk']))\n if \"title\" in patch_data:\n collection.title = patch_data[\"title\"]\n if \"permission\" in patch_data:\n collection.public = patch_data[\"permission\"] == \"Public\"\n if \"comment\" in patch_data:\n collection.comment = patch_data[\"comment\"]\n collection.save()\n # Prepare a response\n data = json.dumps({'success': True, 'id': collection.id, 'url': \"/collection/{0}\".format(collection.id)})\n return HttpResponse(data, content_type=\"json\")", "def modify(request, invoice_number):\n if request.method == \"POST\":\n n = Invoice.objects.filter(number=request.POST[\"invoice_number\"])\n items = n.get().items\n\n for i, item in enumerate(items):\n item[\"short_description\"] = request.POST[str(i + 1) + \"_short_description\"]\n item[\"particulars\"] = request.POST[str(i + 1) + \"_particulars\"]\n item[\"quantity\"] = request.POST[str(i + 1) + \"_quantity\"]\n item[\"unit\"] = request.POST[str(i + 1) + \"_unit\"]\n item[\"unit_price\"] = request.POST[str(i + 1) + \"_unit_price\"]\n item[\"total_cost\"] = request.POST[str(i + 1) + \"_total_cost\"]\n\n n.update(\n number=request.POST[\"invoice_number\"],\n invoice_date=request.POST[\"invoice_date\"],\n reference_number=request.POST[\"reference_number\"],\n reference_date=request.POST[\"reference_date\"],\n addressed_to=request.POST[\"addressed_to\"],\n party_gst=request.POST[\"party_gst\"],\n items=items,\n c_gst=request.POST[\"c_gst\"],\n s_gst=request.POST[\"s_gst\"],\n other_charges=request.POST[\"other_charges\"],\n notes=request.POST[\"additional_notes\"],\n total=request.POST[\"total\"],\n modified_at=datetime.datetime.now()\n )\n return redirect(\"index_page\")\n\n data = Invoice.objects.get(number=invoice_number)\n\n sub_total = sum([a.get(\"total_cost\") for a in data.items])\n\n return render(request,\n \"invoice/invoice_modify.html\",\n {\n \"invoice\": data,\n \"sub_total\": sub_total\n })", "def item_detail(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n current_user.id == Item.user_id\n ).first()\n if not item:\n flash(\"Couldn't find this item\", category='warning')\n return redirect(url_for('url.index'))\n return render_template('detail.html', item=item)", "def update_todo_list_view(request: HttpRequest, pk: int) -> Union[HttpResponse, HttpResponseRedirect]:\n todo_list = TodoListModel.objects.get(id=pk)\n\n context = {\n 'todo_list': todo_list\n }\n\n if request.method == 'GET':\n context['form'] = TodoListForm(instance=todo_list)\n\n return render(request, 'todo/update_todo_list.html', context)\n elif request.method == 'POST':\n form = TodoListForm(data=deepcopy(request.POST), instance=todo_list)\n\n if form.is_valid():\n todo_list = form.save()\n\n return redirect(todo_list.get_absolute_url())\n else:\n context['form'] = form\n return render(request, 'todo/update_todo_list.html', {'form': form})", "def Update(self, controller):\n pass", "def taco_test_put_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))", "def companylink_update(request, slug):\n\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n companylink_reference = get_object_or_404(CompanyLink, company=company)\n companylink_form = CompanyLinkForm(instance=companylink_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('companylink_form.html',{'form':companylink_form, 'info': companylink_reference},context_instance=RequestContext(request))\n else:\n companylink_form = CompanyLinkForm(request.POST, instance=companylink_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if companylink_form.is_valid():\n companylink_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('companylink_form.html', \n {'form': companylink_form, 'form_errors': companylink_form.errors, 'info': companylink_reference},\n context_instance=RequestContext(request))", "def edit(self, **kwargs):\n ...", "def put(self, request, pk):\n return self.update(request, pk)", "def patch(self, request, pk=None): #pk of id of objects to be updated (DB)\n return Response({'method':'PATCH'})", "def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "def acquisition_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n acquisition_reference = get_object_or_404(Acquisition, id=id,company=company)\n acquisition_form = AcquisitionForm(instance=acquisition_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('acquisition_form.html',{'form':acquisition_form, 'info': acquisition_reference},context_instance=RequestContext(request))\n else:\n acquisition_form = AcquisitionForm(request.POST, instance=acqusition_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if acquisition_form.is_valid():\n acquisition_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('acquisition_form.html', \n {'form': acquisition_form, 'form_errors': acquisition_form.errors, 'info': acquisition_reference},\n context_instance=RequestContext(request))", "def post_update():\n\n\n user_id = session['user_id']\n post = request.form.get('post')\n\n Update.add_update(user_id, post)\n\n return \"Updated Post\"", "def office_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n office_reference = get_object_or_404(Office, id=id,company=company)\n office_form = OfficeForm(instance=office_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('office_form.html',{'form':office_form, 'info': office_reference},context_instance=RequestContext(request))\n else:\n office_form = OfficeForm(request.POST, instance=office_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if office_form.is_valid():\n office_form.save(commit = False)\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'info': office_reference},\n context_instance=RequestContext(request))", "def updatebasketview(request, item_id):\n product = Product.objects.get(pk=item_id)\n amount = int(request.POST.get('amount'))\n redirectpage = request.POST.get('redirectpage')\n basket = request.session.get('basket', {})\n\n if item_id in list(basket.keys()):\n basket[item_id] += amount\n messages.success(\n request, f'Updated {product.name} amount to {basket[item_id]}')\n else:\n basket[item_id] = amount\n messages.success(request, f'Added {product.name} to your bag')\n\n request.session['basket'] = basket\n print(request.session['basket'])\n return redirect(redirectpage)", "def update():\n if request.method == 'POST':\n # TODO: Use utils.update_user()\n if 'field' not in request.form:\n table_name = request.form['table']\n table = get_table_by_name(table_name)\n i = inspect(table)\n fields = i.columns.keys()\n for f in fields:\n if i.columns[f].primary_key or i.columns[f].unique:\n fields.remove(f)\n\n return render_template(\n 'update.html',\n fields=fields,\n table_name=table_name,\n )\n\n table = get_table_by_name(request.form['table'])\n if table is None:\n return 'Table not chosen?'\n\n user = get_user(table, request.form['key'])\n success, reason = update_row_in_table(\n user, request.form['field'], request.form['value']\n )\n\n if not success:\n return f'Error occurred trying to update - {reason}'\n\n log(\n f\"<code>{current_user.name}</code> has updated <code>{request.form['field']}</code> of <code>{user}</code> to <code>{request.form['value']}</code>\"\n )\n return 'User has been updated!'\n return render_template('events.html', events=get_accessible_tables())", "def test_modify_item_successfully(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item,\n headers=self.header)\n response = self.client.put('/buckets/1/items/1',\n content_type='application/json',\n data=self.item_edit,\n headers=self.header)\n self.assertEquals(response.status_code, 200)\n self.assertIn('Item successfully updated',\n response.data.decode())", "def award_update(request, slug,id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n award_reference = get_object_or_404(Award, id=id,company=company)\n award_form = AwardForm(instance=award_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('award_form.html',{'form':award_form, 'info': award_reference},context_instance=RequestContext(request))\n else:\n award_form = AwardForm(request.POST, instance=award_reference)\n if award_form.is_valid():\n award_form.save()\n \n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('award_form.html', \n {'form': award_form, 'form_errors': award_form.errors, 'info': award_reference},\n context_instance=RequestContext(request))", "def put(self,request, pk =None):\n return Response({'method': 'PUT'})", "def task_update(request, id=None):\n instance = get_object_or_404(Todo, id=id)\n print(instance)\n print(instance)\n form = TaskForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n \n return redirect('lists:alllist')\n\n context = {\n \"desription\": instance.description,\n \"instance\": instance,\n \"form\":form,\n }\n return render(request, \"lists/update_task.html\", context)", "def put(self ,request, pk = None):\r\n\r\n return Response({'method ': 'put'})", "def funding_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n funding_form = FundingForm(instance=funding_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('funding_form.html',{'form':funding_form, 'info': funding_reference},context_instance=RequestContext(request))\n else:\n funding_form = FundingForm(request.POST, instance=funding_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if funding_form.is_valid():\n funding_form.save(commit = False)\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('funding_form.html', \n {'form': funding_form, 'form_errors': funding_form.errors, 'info': funding_reference},\n context_instance=RequestContext(request))" ]
[ "0.6676208", "0.64609563", "0.64174616", "0.63904625", "0.63857603", "0.6339873", "0.6314984", "0.62889534", "0.6269479", "0.6255692", "0.6253654", "0.624584", "0.62428105", "0.6238067", "0.62355477", "0.6230332", "0.62169313", "0.6211939", "0.62091535", "0.61984015", "0.618539", "0.6185296", "0.6174242", "0.6163913", "0.61522734", "0.61456543", "0.6123437", "0.6117977", "0.6110631", "0.6098802", "0.6083665", "0.60807365", "0.6043047", "0.6042451", "0.603365", "0.60328925", "0.6032766", "0.59845966", "0.5971495", "0.595734", "0.5951673", "0.5938952", "0.593828", "0.5915417", "0.591526", "0.590019", "0.5893721", "0.5879184", "0.58779234", "0.58736515", "0.5849608", "0.5814847", "0.5812691", "0.58118004", "0.5804155", "0.5802293", "0.57778656", "0.5776359", "0.57690126", "0.5753223", "0.5725017", "0.57242775", "0.57242775", "0.57242775", "0.57031494", "0.5700161", "0.56918246", "0.567438", "0.56709033", "0.56685036", "0.5663534", "0.5661673", "0.5645203", "0.5645203", "0.5645203", "0.563276", "0.5632493", "0.5620571", "0.5602397", "0.56001705", "0.55869895", "0.5582619", "0.55641514", "0.556171", "0.5561108", "0.5555685", "0.55535156", "0.5553023", "0.55384845", "0.55339295", "0.5529717", "0.55295855", "0.5515686", "0.5511505", "0.54965246", "0.5487424", "0.54844433", "0.54842496", "0.5483798", "0.54768044" ]
0.675567
0
Post endpoint to update an item. If form is invalid will return create item page with errors displayed, otherwise update item and redirect to item page.
def update_item(item_name, catagory_name): try: item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) except NoResultFound: abort(404) errors = form_errors(request.form) new_item_name = request.form.get('name') new_catagory_name = request.form.get('catagory') new_description = request.form.get('description') if errors: values = { 'name': new_item_name, 'catagory': new_catagory_name, 'description': new_description } catagories = [c.name for c in Catagory.fetch_all()] return render_template( 'add_item.html', catagories=catagories, values=values, errors=errors ) item.update( name=new_item_name, catagory_name=new_catagory_name, description=new_description ) return redirect(url_for( 'read_item', item_name=new_item_name, catagory_name=new_catagory_name ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to edit the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = ItemForm()\n\n # If the form is validated, update the item with its data to the\n # database\n if form.validate_on_submit():\n\n # If the item name or sport has been modified, check that an\n # item with the same name and sport does not already exist, or\n # send a flash message and do not add the new item to the\n # database\n if form.name.data != item.name or form.sport.data != item.sport:\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n return redirect(url_for('items.edit_item',\n item_name=item_name))\n\n # If the item name or sport has not been modified, update all\n # details to the database, send a flash message, and redirect\n # to 'home'\n else:\n item.name = form.name.data\n item.sport = form.sport.data\n item.category = form.category.data\n item.description = form.description.data\n item.private = form.private.data\n db.session.commit()\n flash(f'\"{item.name}\" has been updated!', 'good')\n return redirect(url_for('items.item', item_name=item_name))\n\n # If the form is being requested, not submitted, pre-fill the form\n # with existing item data\n elif request.method == 'GET':\n form.name.data = item.name\n form.sport.data = item.sport\n form.category.data = item.category\n form.description.data = item.description\n form.private.data = item.private\n\n return render_template('edit_item.html', item=item, form=form)", "def update_item(item_id):\n edited_item = session.query(Item).filter_by(id=item_id).one()\n\n # redirect to details page if current user does not own item\n if edited_item.user_id != login_session['user_id']:\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n\n if request.method == 'POST':\n if request.form['category']:\n edited_item.category_id = request.form['category']\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n edited_item.updated_date = datetime.datetime.now()\n session.add(edited_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=edited_item.category_id,\n item_id=edited_item.id))\n else:\n categories = session.query(Category).all()\n return render_template(\n 'views/edit.html',\n edited_item=edited_item,\n categories=categories)", "def edit_item(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n Item.user_id == current_user.id\n ).first()\n\n if not item:\n flash(\"Couldn't find a item with that id\", category='warning')\n return redirect(request.referrer)\n\n form = ItemForm()\n form.editting_item_id = item_id\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n item.category_id = form.category_id.data.id\n item.name = form.name.data.capitalize()\n item.description = form.description.data\n db.session.commit()\n flash('Successfully updated Item', 'success')\n return redirect(url_for('url.index'))\n\n elif request.method == 'GET':\n form.name.data = item.name\n form.description.data = item.description\n\n return render_template(\n 'forms/form.html',\n form_title='Edit Item',\n form=form,\n form_name='item',\n action=url_for('url.edit_item', item_id=item_id))", "def editItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if editedItem.user_id != login_session['user_id']:\n flash('You are not authorized to edit this Item.\\\n Please create own Item in order to edit.')\n return redirect(url_for('showallCategories'))\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n session.add(editedItem)\n session.commit()\n flash('Item Edit successfull')\n return redirect(url_for('showCategory', category_id=category_id))\n else:\n return render_template(\n 'editItem.html', category_id=category_id,\n item_id=item_id, item=editedItem)", "def editItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n\n if editedItem.user_id != login_session['user_id']:\n flash(\"You are authorised to edit items created by you!\")\n return redirect(url_for(\"showCatalog\"))\n\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n if request.form['price']:\n editedItem.price = request.form['price']\n session.add(editedItem)\n session.commit()\n flash('%s Item Successfully Edited' % (editedItem.name))\n return redirect(url_for('showItem',\n category_id=editedItem.category_id))\n else:\n return render_template('edititem.html', category=category,\n item=editedItem)", "def edit_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('edit_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n item.name = request.form['name']\n item.category_id = request.form['category']\n item.description = request.form['description']\n sqlsession.commit()\n return redirect(url_for('view_item', item_id=item_id))\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n categories = sqlsession.query(Category).all()\n return render_template(\"edit_item.html\",\n item=item,\n categories=categories)", "def editItem(category_id, item_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to edit item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempted operation on non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n editedItem = session.query(Item).filter_by(id=item_id,\r\n category_id=category_id).first()\r\n if not editedItem:\r\n flash('Attempt to edit non-existent item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # authorization\r\n if login_session['user_id'] != editedItem.user_id:\r\n flash('Sorry, you are not authorized to edit the item \\'{}\\''\r\n .format(editedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n if request.method == 'POST':\r\n # update operation\r\n if request.form['name']:\r\n editedItem.name = request.form['name']\r\n\r\n if request.form['description']:\r\n editedItem.description = request.form['description']\r\n else:\r\n editedItem.description = ''\r\n session.add(editedItem)\r\n session.commit()\r\n flash('Edited Item \\'{}\\' Successfully'.format(editedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with the form\r\n return render_template(\"editItem.html\",\r\n category=category, item=editedItem)", "def edit_item(item_id):\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'POST':\n item = session.query(Item).filter_by(id=item_id).one()\n item.picture = request.form['picture']\n item.name = request.form['name']\n item.price = request.form['price']\n item.description = request.form['description']\n item.user_id = login_session['user_id']\n session.add(item)\n session.commit()\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def editItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if request.method == 'POST':\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n editedItem.user_id = login_session['user_id']\n session.add(editedItem)\n session.commit()\n return redirect(url_for('showCatalog', sport_id=sport_id))\n else:\n return render_template('edititem.html', sport_id=sport_id,\n item_id=item_id, sport=sport, item=editedItem)", "def item_update(request):\n if request.method == 'POST':\n item_to_update = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_update.name = request.POST['name']\n item_to_update.count = int(request.POST['count'])\n item_to_update.date_of_expiration = request.POST['exp']\n item_to_update.fk_category = Category.objects.get(name=request.POST['cat'])\n item_to_update.fk_subcategory = SubCategory.objects.get(name=request.POST['subcat'])\n item_to_update.notes = request.POST['notes']\n item_to_update.save()\n return HttpResponse(status=200)", "def edit_item(item_name):\n\n item = Item.query.filter_by(name=item_name).first_or_404()\n\n if item.owner != current_user:\n flash(\"Failed to edit item %s since you are not the owner.\" %\n item.name)\n return redirect(url_for('.index'))\n\n form = AddOrEditItemForm(Category.query.order_by(Category.name).all())\n if form.validate_on_submit():\n\n img_upload_name = secure_filename(form.img_upload.data.filename)\n img_deletehash = None\n img_url = None\n\n # Delete uploaded image on Imgur\n if item.img_deletehash is not None \\\n and not delete_image(item.img_deletehash):\n flash(\"Failed to edit item \\\"%s\\\".\" % item.name)\n return redirect(url_for('.index'))\n\n # Upload new image on Imgur\n if img_upload_name != '':\n img_url, img_deletehash = upload_image(form.img_upload.data)\n print \"img_url: \" + img_url\n print \"img_deletehash: \" + img_deletehash\n if img_url is None or img_deletehash is None:\n flash(\"Failed to upload image.\")\n return redirect(url_for('.index'))\n\n elif form.img_url.data != '':\n img_url = form.img_url.data\n\n item.name = form.name.data\n item.description = form.description.data\n item.category = Category.query.get(form.category.data)\n item.img_url = img_url\n item.img_deletehash = img_deletehash\n\n try:\n db.session.commit()\n except:\n flash(\n (\"Failed to edit item \\\"%s\\\".\"\n \" Make sure that the item name is unique.\") % item.name)\n else:\n flash(\"Item \\\"%s\\\" has been edited.\" % item.name)\n finally:\n return redirect(url_for('.index'))\n\n form.name.data = item.name\n form.description.data = item.description\n form.category.data = item.category.id\n form.img_url.data = item.img_url\n\n return render_template('add_or_edit.html', form=form)", "def edit_item(request, item_id):\n if request.user.is_superuser:\n item = get_object_or_404(Product, pk=item_id)\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES, instance=item)\n if form.is_valid():\n form.save()\n messages.success(request, 'Item was successfully updated.')\n return redirect(reverse('item_info', args=[item.id]))\n else:\n messages.error(request, 'There was an issue updating the '\n 'item. Please make sure the form is valid.')\n else:\n form = ProductForm(instance=item)\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/edit_item.html'\n context = {\n 'form': form,\n 'item': item,\n }\n\n return render(request, template, context)", "def editListItem(category_id, item_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n editedItem = session.query(ListItem).filter_by(id=item_id).one()\n category = session.query(Category).filter_by(id=category_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n\n if request.method == 'POST':\n if \"btn_edit\" in request.form:\n if request.form['name']:\n editedItem.name = request.form['name']\n if request.form['description']:\n editedItem.description = request.form['description']\n session.add(editedItem)\n session.commit()\n flash('Catalog Item Successfully Edited')\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('edititem.html',\n item=editedItem,\n user=getUserInfo(login_session['user_id']))", "def edit_item(category, item):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n if request.method == 'POST':\n # Query database with SQLAlchemy and store queries as objects\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item.replace('-', ' '))\n .one())\n # Get form fields submitted by user, or retain item info\n name = request.form['name'] if request.form['name'] else item.name\n url = request.form['url'] if request.form['url'] else item.url\n if request.form['photo_url']:\n photo_url = request.form['photo_url']\n else:\n photo_url = item.photo_url\n if request.form['description']:\n description = request.form['description']\n else:\n description = item.description\n category = request.form['item_category']\n # Retrieve the database ID of the item's category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of creator\n creator_db_id = item.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Item creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Item to edit is \"{}\".'.format(item.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n # Store edits in an object\n edited_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n # Overwrite item object with new info from edited_item object\n item.name = edited_item.name\n item.url = edited_item.url\n item.photo_url = edited_item.photo_url\n item.description = edited_item.description\n item.category_id = edited_item.category_id\n session.add(item)\n session.commit()\n print('Item \"{}\" edited.'.format(edited_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('edit_item.html',\n categories=categories,\n item=item,\n login_status=login_status)", "def edit(item_id):\n session = current_app.config['db']\n item = session.query(WineABV).filter_by(id=item_id).one()\n if request.method == \"POST\":\n new_name = request.form['itemname']\n item.name = new_name\n try:\n session.commit()\n except exc.IntegrityError:\n session.rollback()\n flash(\"Duplicate values!\", 'danger')\n return render_template('edit_form.html', item=item)\n\n flash(\"Successfully Edited '%s'\" % (new_name,), 'success')\n return redirect(url_for('.show'))\n else:\n return render_template(template_prefix+'edit_form.html', item=item)", "def test_update_item_using_post(self):\n pass", "def editItem(category_item_id):\n editedItem = db.findItem(id=category_item_id)\n if editedItem.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.updateItem(editedItem, request.form)\n return redirect(url_for('showCatalog'))\n return render_template(\n 'edit_item.html', categories=db.getAllCategories(), item=editedItem)", "def put_on_sale():\n\n item = {\n \"status\": 'for_sale',\n \"category\": request.form['item-type'],\n \"name\": request.form['item-name'],\n \"price\": request.form['item-price'],\n \"description\": request.form['item-description'],\n \"mail\": request.form['seller-email']\n }\n\n put_item(item)\n\n return redirect('/')", "def editMenuItemPage(restaurant_id, item_id):\n item = db_methods.searchItemByID(item_id)\n res_id = restaurant_id\n item_id = item_id\n if request.method == 'POST':\n item_name = request.form['item_name']\n item_price = request.form['item_price']\n item_desc = request.form['item_desc']\n item_course = request.form['item_course']\n if item_name and item_price and item_desc and item_course:\n db_methods.editMenuItem(item_name, item_price, item_desc, item_course, item_id)\n time.sleep(0.1)\n return redirect('/restaurants/%s/menu' % res_id)\n else:\n error = \"Please fill out all required fields.\"\n return render_template(\"editmenuitem.html\", error = error)\n else:\n return render_template('editmenuitem.html', item=item, res_id=res_id)", "def save_item(item, item_id):\n # User is modifying an EXISTING item in the database\n if item_id > 0:\n item.Item.name = request.form['title']\n item.Item.description = request.form['description']\n item.Item.category_id = request.form['category']\n session.add(item.Item)\n session.commit()\n flash(\"Updated \" + item.Item.name)\n return render_template('item_details.html', item=item, login_session=login_session)\n\n # User is creating a NEW item\n else:\n new_item = Item(name=request.form.get('title'), description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['userid'])\n session.add(new_item)\n session.commit()\n flash(\"Created \" + new_item.name)\n created_item = session.query(Item, User).filter(Item.id == new_item.id).join(User).first()\n return render_template('item_details.html', item=created_item, login_session=login_session)", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n\n if form.validate():\n messages.add_message(request, messages.SUCCESS, \"Payment update successfull\")\n return self.form_valid(form)\n else:\n messages.add_message(request, messages.ERROR, \"Payment update failed\")\n return self.form_invalid(form)", "def update_todo_list_view(request: HttpRequest, pk: int) -> Union[HttpResponse, HttpResponseRedirect]:\n todo_list = TodoListModel.objects.get(id=pk)\n\n context = {\n 'todo_list': todo_list\n }\n\n if request.method == 'GET':\n context['form'] = TodoListForm(instance=todo_list)\n\n return render(request, 'todo/update_todo_list.html', context)\n elif request.method == 'POST':\n form = TodoListForm(data=deepcopy(request.POST), instance=todo_list)\n\n if form.is_valid():\n todo_list = form.save()\n\n return redirect(todo_list.get_absolute_url())\n else:\n context['form'] = form\n return render(request, 'todo/update_todo_list.html', {'form': form})", "def pp_update_item(edit_url, **kwargs):\n # build Requests session\n pp = requests.Session()\n pp.auth = (udata.pp2['user'], udata.pp2['pass'])\n pp.cookies.update(get_cookies('secure1.inmotionhosting.com'))\n\n # retrieve existing data\n fdata = pp_get_item(edit_url)\n\n # update form data with kwargs\n fdata.update(kwargs)\n\n # then post update\n bpost = pp.post('https://secure1.inmotionhosting.com%s' % (edit_url), data=fdata)\n\n return bpost", "def new_item():\n form = ItemForm()\n user = current_user\n\n # If the form is validated, add its data to the database\n if form.validate_on_submit():\n\n # Check that an item with the same name and sport does not\n # already exist, or send a flash message and do not add the\n # new item to the database\n query = Item.query.filter_by(name=form.name.data,\n sport=form.sport.data).first()\n if query:\n flash('This sport already has an item with that name.', 'bad')\n\n # If the item does not yet exist, add all details to the\n # database, send a flash message, and redirect to 'home'\n else:\n name = form.name.data\n sport = form.sport.data\n category = form.category.data\n description = form.description.data\n private = form.private.data\n item = Item(name=name, sport=sport, category=category,\n description=description, private=private,\n user_id=user.id)\n db.session.add(item)\n db.session.commit()\n flash(f'\"{name}\" has been added!', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('new_item.html', form=form, title='New Item')", "def edit_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES, instance=product)\n if form.is_valid():\n form.save()\n messages.success(request, 'You have successfully updated store item!')\n return redirect(reverse('home'))\n else:\n messages.error(request, 'Failed to update item. Please check the form.')\n else:\n form = ProductForm(instance=product)\n messages.info(request, f'You are editing {product.name}')\n\n template = 'products/edit_item.html'\n context = {\n 'form': form,\n 'product': product,\n }\n\n return render(request, template, context)", "def save(self, request, item, linked_item, linked_item_form):\n\t\tlinked_item.url = linked_item_form.cleaned_data['url']\n\t\titem.description = linked_item_form.cleaned_data['subject']\n\t\titem.priority = Priority.objects.get(id = linked_item_form.cleaned_data['priority'])\n\t\tlinked_item.delivery_notes = linked_item_form.cleaned_data['delivery_notes']\n\t\t#\n\t\t# Look for actions other than save\n\t\t#\n\t\tif request.POST.get('update', '') == 'Completed':\n\t\t\titem.fixed = True\n\t\t\titem.validated = False\n\t\tif request.POST.get('update', '') == 'Failed':\n\t\t\titem.fixed = False\n\t\t\titem.validated = True\n\t\t\titem.location = Location.objects.get(name = 'Production')\n\t\tif request.POST.get('update', '') == 'Verified':\n\t\t\titem.fixed = True\n\t\t\titem.validated = True\n\n\t\titem.save()\n\t\tlinked_item.save()\n\n\t\t#\n\t\t# Check for comments and add\n\t\t#\n\t\tcomment_text = linked_item_form.cleaned_data['comments'].strip()\n\t\tif comment_text != '':\n\t\t\tpost_comment(request, item, comment_text)", "def update_appointment(request,pk):\n appointment = AppointmentRequests.objects.get(id=pk)\n form = AppointmentUpdate(instance=appointment)\n if request.method == \"POST\":\n form = AppointmentUpdate(request.POST,instance=appointment)\n if form.is_valid():\n form.save()\n return redirect(\"dashboard\")\n else:\n messages.info(request,\"Invalid Data sent, Make sure you provided right data.\")\n return redirect(\"update_appointment\",pk=pk)\n else:\n return render(request,\"update_appointment.html\",{\"form\":form})", "def management_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n management_reference = get_object_or_404(Management, id=id,company=company)\n management_form = ManagementForm(instance=management_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('management_form.html',{'form':management_form, 'info': management_reference},context_instance=RequestContext(request))\n else:\n management_form = ManagementForm(request.POST, instance=management_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if management_form.is_valid():\n management_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'info': management_reference},\n context_instance=RequestContext(request))", "async def post(self):\n def parse(param):\n parts = param.split('=')\n value = int(parts[1]) if parts[1].isnumeric() else 0\n return parts[0], value\n\n try:\n params_str = self.request.body.decode()\n data = dict([parse(p) for p in params_str.split('&')])\n except Exception as e:\n logging.error(f'Error receiving update form: {e}')\n return\n\n data['user_id'] = self.user.user_id\n data['icu_id'] = self.icu.icu_id\n await self.queue.put(data)\n\n self.redirect(home.HomeHandler.ROUTE)", "def editItem(category_id, item_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n editedItem = session.query(Item).filter_by(id=item_id).one()\r\n if editedItem.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to edit this item.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n editedItem.name = request.form['name']\r\n session.add(editedItem)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('edititem.html', category_id=category_id, item=editedItem)", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def add_item():\n\n form = ItemForm()\n # Query for select field\n form.category_id.query = Category.query.filter(\n Category.user_id == current_user.id).all()\n\n if form.validate_on_submit():\n new_item = Item(\n category_id=form.category_id.data.id,\n name=form.name.data.capitalize(),\n description=form.description.data,\n user_id=current_user.id)\n db.session.add(new_item)\n db.session.commit()\n flash(\"New item '{}' was successfully created\".format(\n form.name.data.capitalize()), category='success')\n return redirect(url_for('url.index'))\n\n return render_template(\n 'forms/form.html',\n form_title='Add Item',\n form=form,\n form_name='item',\n action=url_for('url.add_item'))", "def new_item():\n if request.method == 'POST':\n new_item = Item(\n category_id=int(request.form['category']),\n name=request.form['name'],\n description=request.form['description'],\n created_date=datetime.datetime.now(),\n user_id=login_session['user_id'])\n session.add(new_item)\n session.commit()\n return redirect(\n url_for(\n 'item_details',\n category_id=new_item.category_id,\n item_id=new_item.id))\n else:\n categories = session.query(\n Category).all()\n return render_template(\n 'views/add.html',\n categories=categories)", "def funding_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n funding_form = FundingForm(instance=funding_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('funding_form.html',{'form':funding_form, 'info': funding_reference},context_instance=RequestContext(request))\n else:\n funding_form = FundingForm(request.POST, instance=funding_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if funding_form.is_valid():\n funding_form.save(commit = False)\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('funding_form.html', \n {'form': funding_form, 'form_errors': funding_form.errors, 'info': funding_reference},\n context_instance=RequestContext(request))", "def post_update():\n\n\n user_id = session['user_id']\n post = request.form.get('post')\n\n Update.add_update(user_id, post)\n\n return \"Updated Post\"", "def add_item():\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Get form fields\n name = request.form['name']\n url = request.form['url']\n photo_url = request.form['photo_url']\n description = request.form['description']\n category = request.form['item_category']\n # Retrieve the database ID of the selected category\n category_id = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Retrieve user's database ID for the item's database entry\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print('Database ID of category is {}.'.format(category_id.id))\n # Flash messages for incomplete item info\n if not request.form['name']:\n flash('Please add item name')\n return redirect(url_for('add_item'))\n if not request.form['url']:\n flash('Please add item URL')\n return redirect(url_for('add_item'))\n if not request.form['photo_url']:\n flash('Please add item photo URL')\n return redirect(url_for('add_item'))\n if not request.form['description']:\n flash('Please add a description')\n return redirect(url_for('add_item'))\n # Query database for item name\n item_name_in_db = (session.query(Items.name)\n .filter_by(name=name)\n .all())\n # If the item name is already in the database, don't add\n if item_name_in_db:\n print('Item name \"{}\" already in database.'.format(name))\n flash('Item name \"{}\" already in database.'.format(name))\n return redirect(url_for('add_item'))\n # Create object with form field info to add to database\n new_item = Items(name=name,\n url=url,\n photo_url=photo_url,\n description=description,\n category_id=category_id.id,\n creator_db_id=user_db_id)\n session.add(new_item)\n session.commit()\n print('Item \"{}\" created.'.format(new_item.name))\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Query database with SQLAlchemy to display categories on page\n categories = session.query(Categories).all()\n # Render webpage\n return render_template('add_item.html',\n categories=categories,\n login_status=login_status)", "def issueUpdateViewSubmit(request, issue):\n\n data = request.POST.copy()\n user = request.user\n\n if not user.has_perm('IssueTracker.can_change'):\n # don't process\n return True\n\n if issue.item:\n item = issue.item.item\n \n form = forms.UpdateMachineForm(data, instance=item)\n \n if form.is_valid():\n form.save()\n return True\n return False\n\n # didn't do any processing, proceed\n return True", "def edit_item_details(item_id):\n category_id = None\n if 'category_id' in request.args:\n category_id = int(request.args['category_id'])\n if 'userid' not in login_session:\n flash('Unfortunately you need to be logged in to make changes', 'error')\n return redirect(url_for('show_homepage'))\n\n item = None\n if item_id != 0:\n item = is_user_the_creator(item_id)\n if request.method == 'GET':\n categories = session.query(Category).order_by(asc(Category.name)).all()\n return display_item(categories, item, item_id, category_id)\n else:\n return save_item(item, item_id)", "def office_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n office_reference = get_object_or_404(Office, id=id,company=company)\n office_form = OfficeForm(instance=office_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('office_form.html',{'form':office_form, 'info': office_reference},context_instance=RequestContext(request))\n else:\n office_form = OfficeForm(request.POST, instance=office_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if office_form.is_valid():\n office_form.save(commit = False)\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'info': office_reference},\n context_instance=RequestContext(request))", "def companylink_update(request, slug):\n\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n companylink_reference = get_object_or_404(CompanyLink, company=company)\n companylink_form = CompanyLinkForm(instance=companylink_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('companylink_form.html',{'form':companylink_form, 'info': companylink_reference},context_instance=RequestContext(request))\n else:\n companylink_form = CompanyLinkForm(request.POST, instance=companylink_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if companylink_form.is_valid():\n companylink_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('companylink_form.html', \n {'form': companylink_form, 'form_errors': companylink_form.errors, 'info': companylink_reference},\n context_instance=RequestContext(request))", "def dispatch(self, request, *args, **kwargs):\n item_id = request.POST.get('item') or kwargs.get('pk') or 1\n try:\n self.object = LabelingAnswer.objects.get(rater=self.rater,\n item__pk=item_id)\n self.item = Item.objects.get(pk=item_id)\n return HttpResponseRedirect(self.get_success_url())\n except LabelingAnswer.DoesNotExist:\n pass\n\n self.item = self.get_item()\n if not self.item:\n messages.error(request,\n f'Item id {item_id} is missing or has already '\n f'been labeled by rater id {self.rater.id}')\n return HttpResponseRedirect(reverse('workflow:error'))\n\n return super().dispatch(request, *args, **kwargs)", "def updatebasketview(request, item_id):\n product = Product.objects.get(pk=item_id)\n amount = int(request.POST.get('amount'))\n redirectpage = request.POST.get('redirectpage')\n basket = request.session.get('basket', {})\n\n if item_id in list(basket.keys()):\n basket[item_id] += amount\n messages.success(\n request, f'Updated {product.name} amount to {basket[item_id]}')\n else:\n basket[item_id] = amount\n messages.success(request, f'Added {product.name} to your bag')\n\n request.session['basket'] = basket\n print(request.session['basket'])\n return redirect(redirectpage)", "def update_items(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['id'] = cpdoc.id\n\n item_ser = self.get_serializer(instance=obj_cp, data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)", "def save(self, request, item, linked_item, linked_item_form):\n\t\titem.description = linked_item_form.cleaned_data['subject']\n\t\tlinked_item.text = linked_item_form.cleaned_data['text']\n\t\tlinked_item.delivery_notes = linked_item_form.cleaned_data['delivery_notes']\n\t\titem.priority = Priority.objects.get(id = linked_item_form.cleaned_data['priority'])\n\t\t#\n\t\t# Look for actions other than save\n\t\t#\n\t\tif request.POST.get('update', '') == 'Completed':\n\t\t\titem.fixed = True\n\t\tif request.POST.get('update', '') == 'Failed':\n\t\t\titem.fixed = False\n\t\t\titem.validated = True\n\t\tif request.POST.get('update', '') == 'Verified':\n\t\t\titem.fixed = True\n\t\t\titem.validated = True\n\n\t\titem.save()\n\t\tlinked_item.save()\n\n\t\t#\n\t\t# Check for comments and add\n\t\t#\n\t\tcomment_text = linked_item_form.cleaned_data['comments'].strip()\n\t\tif comment_text != '':\n\t\t\tpost_comment(request, item, comment_text)", "def update_item_page(item_name, catagory_name):\n item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name)\n catagories = [c.name for c in Catagory.fetch_all()]\n return render_template(\n 'edit_item.html',\n catagories=catagories,\n values={\n 'name': item.name,\n 'catagory': item.catagory_name,\n 'description': item.description\n },\n )", "def updateItem(request):\n # Getting the data when you add to cart. Body of JSON\n data = json.loads(request.body)\n # Getting values we sent to body as JSON. prodID and Action\n productId = data['prodId']\n action = data['action']\n\n # Get curr customer\n customer = request.user.customer\n product = BobaProduct.objects.get(id=productId)\n\n # get order associated with customer\n order, created = CustomerOrder.objects.get_or_create(customer=customer)\n\n # Get value of curr order. If it exist, want to just change it\n orderItem, created = OrderItem.objects.get_or_create(order=order, product=product)\n\n if action == 'add':\n orderItem.quantity += 1\n elif action == 'remove':\n orderItem.quantity -= 1\n orderItem.save() #saving this order item\n\n # If the quantity of the order goes below 1, delete the orderItem\n\n if orderItem.quantity < 1:\n orderItem.delete()\n return JsonResponse('Item was added', safe=False)", "def add_item(request):\n \n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request, 'New item added successfully!')\n return redirect(reverse('add_item'))\n else:\n messages.error(request, 'Failed to add item. Please check the form.')\n else:\n form = ProductForm()\n \n template = 'products/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def update_cart(request):\n post_data = request.POST.copy()\n item_id = post_data['item_id']\n quantity = post_data['quantity']\n cart_item = get_single_item(request, item_id)\n if cart_item:\n try:\n if int(quantity) > 0:\n cart_item.quantity = int(quantity)\n cart_item.save()\n else:\n remove_from_cart(request)\n except ValueError: # por si la entrada en el input Update es una letra\n pass", "def edit(self,item=None):\r\n raise AbstractError\r\n return False", "def add_item(request, shoppinglist_id, category_id=False, product_id=False):\n if request.method == 'POST':\n form = ItemForm(request.POST)\n if form.is_valid():\n shoppinglist = get_object_or_404(\n Shoppinglist,\n pk=shoppinglist_id,\n pantry__owner=request.user\n )\n product = get_object_or_404(Product, pk=product_id)\n try:\n item = Item.objects.get(shoppinglist=shoppinglist,\n product=product)\n item.amount += form.cleaned_data['amount']\n except ObjectDoesNotExist:\n item = Item(shoppinglist=shoppinglist,\n product=product,\n amount=form.cleaned_data['amount'],\n bought=False)\n item.save()\n return redirect('shoppinglists.views.detail', shoppinglist_id)\n\n response_dict = {'shoppinglist_id': shoppinglist_id,\n 'categories': Category.objects.all(),\n 'logged': False}\n if category_id:\n response_dict.update(\n {'category_id': category_id,\n 'category': Category.objects.get(pk=category_id),\n 'products': Product.objects.filter(categories__pk=category_id)}\n )\n if product_id:\n response_dict.update(\n {'form': ItemForm(),\n 'product': Product.objects.get(pk=product_id),\n 'product_id': product_id}\n )\n return render_to_response('shoppinglists/item_form.html',\n response_dict,\n context_instance=RequestContext(request))", "def test_modify_item_successfully(self):\n self.client.post('/buckets',\n content_type='application/json',\n data=self.bucket, headers=self.header)\n self.client.post('/buckets/1/items',\n content_type='application/json',\n data=self.item,\n headers=self.header)\n response = self.client.put('/buckets/1/items/1',\n content_type='application/json',\n data=self.item_edit,\n headers=self.header)\n self.assertEquals(response.status_code, 200)\n self.assertIn('Item successfully updated',\n response.data.decode())", "def add_item(request):\n if request.user.is_superuser:\n if request.method == \"POST\":\n form = ProductForm(request.POST, request.FILES)\n if form.is_valid():\n new_item = form.save()\n messages.success(request, 'Your product was added to the '\n 'store successfully.')\n return redirect(reverse('item_info', args=[new_item.id]))\n else:\n messages.error(request, 'There was an issue adding the '\n 'product. Please ensure the form is valid.')\n else:\n form = ProductForm()\n else:\n messages.error(request, 'Sorry, you do not have permission to access '\n 'this page.')\n return redirect(reverse('home'))\n\n template = 'shop/add_item.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)", "def add_item():\n\n form = AddOrEditItemForm(Category.query.order_by(Category.name).all())\n img_upload_name = None\n if form.validate_on_submit():\n img_upload_name = secure_filename(form.img_upload.data.filename)\n img_deletehash = None\n img_url = None\n\n # Upload image to Imgur if FileField is specified\n if img_upload_name != '':\n img_url, img_deletehash = upload_image(form.img_upload.data)\n if img_url is None or img_deletehash is None:\n flash(\"Failed to upload image.\")\n return redirect(url_for('.index'))\n elif form.img_url.data != '':\n img_url = form.img_url.data\n\n new_item = Item(name=form.name.data, description=form.description.data,\n category=Category.query.get(form.category.data),\n img_url=img_url, img_deletehash=img_deletehash,\n owner=current_user._get_current_object())\n\n try:\n db.session.add(new_item)\n db.session.commit()\n except:\n flash(\n (\"Failed to add item \\\"%s\\\".\"\n \" Make sure that the item name is unique.\") % new_item.name)\n else:\n flash(\"A new item \\\"%s\\\" has been added.\" % new_item.name)\n finally:\n return redirect(url_for('.index'))\n\n # Set SelectField's default value\n category_name = request.args.get('category_name')\n if category_name is not None:\n default_category = Category.query.filter_by(name=category_name).first()\n if default_category is None:\n flash(\"Wrong parameter(s).\")\n return redirect(url_for('.index'))\n form.category.data = default_category.id\n\n return render_template('add_or_edit.html',\n form=form, filename=img_upload_name)", "def acquisition_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n acquisition_reference = get_object_or_404(Acquisition, id=id,company=company)\n acquisition_form = AcquisitionForm(instance=acquisition_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('acquisition_form.html',{'form':acquisition_form, 'info': acquisition_reference},context_instance=RequestContext(request))\n else:\n acquisition_form = AcquisitionForm(request.POST, instance=acqusition_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if acquisition_form.is_valid():\n acquisition_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('acquisition_form.html', \n {'form': acquisition_form, 'form_errors': acquisition_form.errors, 'info': acquisition_reference},\n context_instance=RequestContext(request))", "def test_update_item_good(test_client, item):\n\n response = test_client.put(GOOD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 200\n assert data['item']['name'] == item['name']\n assert data['item']['value'] == item['value']", "def update(id):\r\n post = get_post(id)\r\n db = get_db()\r\n cur = db.cursor()\r\n\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n body = request.form['body']\r\n error = None\r\n\r\n cur.execute('SELECT id FROM novel.post WHERE title = %s', title)\r\n newId = cur.fetchone()\r\n\r\n\r\n\r\n if not title:\r\n error = 'Title is required.'\r\n\r\n if newId and newId['id'] != id:\r\n error = 'Title is repeated.'\r\n\r\n if error is not None:\r\n flash(error)\r\n else:\r\n\r\n cur.execute(\r\n 'UPDATE novel.post SET title = \"{0}\", body = \"{1}\" WHERE id = {2}'\r\n .format(title, body, id)\r\n )\r\n db.commit()\r\n return redirect(url_for('novel.index'))\r\n\r\n return render_template('novel/update.html', post=post)", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n self.form_invalid_init(form=form)\n self.form_invalid_add_global_errormessages(form=form)\n return self.form_invalid(form)", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.validate():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def post(self, request, *args, **kwargs):\n form = self.get_form(self.form_class)\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n return self.form_valid(form, request)\n else:\n return self.form_invalid(form, request)", "def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj", "def update(request, todo_id):\n context = {}\n todo_query = Todo.objects.filter(id=todo_id).first()\n\n if request.method == \"POST\":\n forms = TodoForm(request.POST,instance=todo_query)\n if forms.is_valid():\n task = forms.save()\n task.user = request.user\n task.save()\n return redirect(\"index\")\n else:\n context['form'] = forms\n return render(request, \"todos/update.html\", context)\n\n forms = TodoForm(instance=todo_query)\n context['form'] = forms\n context['task_id'] = todo_query\n return render(request, \"todos/update.html\", context)", "def award_update(request, slug,id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n award_reference = get_object_or_404(Award, id=id,company=company)\n award_form = AwardForm(instance=award_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('award_form.html',{'form':award_form, 'info': award_reference},context_instance=RequestContext(request))\n else:\n award_form = AwardForm(request.POST, instance=award_reference)\n if award_form.is_valid():\n award_form.save()\n \n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('award_form.html', \n {'form': award_form, 'form_errors': award_form.errors, 'info': award_reference},\n context_instance=RequestContext(request))", "def adjust_basket(request, item_id):\n\n product = get_object_or_404(Product, pk=item_id)\n quantity = int(request.POST.get('quantity'))\n basket = request.session.get('basket', {})\n basket[item_id] = quantity\n messages.success(request, f'{product.name} quantity updated to \\\n {basket[item_id]}')\n request.session['basket'] = basket\n\n return redirect(reverse('view_basket'))", "def community_post_update_view(request, slug):\n task = \"Update\"\n post = CommunityPostModel.objects.get(slug=slug) # Get the post\n\n form = AddEditPostForm(instance=post) # An unbound form\n if request.method == 'POST': # If the form has been submitted...\n form = AddEditPostForm(request.POST, request.FILES, instance=post) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n post = form.save() # Save the object to the database\n slug_str = \"%s %s\" % (post.title, post.date_posted) # Create a slug from the title and date\n post.slug = slugify(slug_str) # Create the slug\n post.save() # Save the object to the database\n return redirect('community-post-detail', slug=post.slug) # Redirect to the detail page\n\n context = { # Pass the variables to the template\n 'task': task,\n 'post': post,\n 'form': form,\n }\n return render(request,\n 'pages/patient-community/community-create-update-post.html',\n context) # render the patient community update page", "def save():\n form_data = request.form.to_dict()\n if (form_data['release-at'] == 'Never'):\n form_data['release-at'] = None\n\n if not 'id' in form_data:\n r = requests.post(API_ROUTE, headers={'Auth': _auth()}, json=form_data)\n if r.status_code != requests.codes.created:\n return r.text, r.status_code\n else:\n r = requests.put(API_ROUTE + '/' + str(request.form['id']), headers={'Auth': _auth()}, json=form_data)\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return redirect(url_for('index'), code=278)", "def customer_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n customer_reference = get_object_or_404(Customer, id=id,company=company)\n customer_form = CustomerForm(instance=customer_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('customer_form.html',{'form':customer_form, 'info': customer_reference},context_instance=RequestContext(request))\n else:\n customer_form = CustomerForm(request.POST, instance=customer_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if customer_form.is_valid():\n customer_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('customer_form.html', \n {'form': customer_form, 'form_errors': customer_form.errors, 'info': customer_reference},\n context_instance=RequestContext(request))", "def edit_product(request, pk):\n\n products = get_object_or_404(Product, pk=pk)\n if request.method == 'POST':\n form = ProductPostForm(request.POST, instance=products)\n if form.is_valid():\n product = form.save()\n return redirect(product_details, product.pk)\n else:\n form = ProductPostForm(instance=products)\n return render(request, 'editproduct.html', {'form': form})", "def test_shoppingitems_editing(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', '[email protected]')\n # make a post request with the edit name and original name\n res = self.app.post(\n '/edit-item',\n data={'item_name_org': 'Bread', 'item_name': 'Juice', 'list_name': 'Easter'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.edit_item(\n 'Juice', 'Bread', 'Easter', '[email protected]')\n # test response from shoppingitems class\n self.assertIsInstance(response, list)\n # check if edit was successful by looking for the edited name\n self.assertIn(\"Juice\", str(res.data))", "def item_handler(id):\n if request.method == 'PUT':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to update a item\n rq = request.get_json()\n name = rq['name']\n picture = rq['picture']\n description = rq['description']\n item = updateItem(id, name, picture, description)\n return jsonify(item=item.serialize)\n elif request.method == 'DELETE':\n # authorization\n if not checkAuthorization('Item', id, g.user.id):\n return (jsonify({'data': 'Unauthorized', 'error': '401'}), 401)\n # Call the method to remove a item\n item = deleteItem(id)\n return jsonify(item=item.serialize)", "def patch(self, item_id):\n\n try:\n\n data = request.json\n\n if data is None:\n raise NotImplementedError(\"No data\")\n\n controller = self.controller()\n data[\"id\"] = item_id\n data = controller.date_time_parser(data)\n schema = self.schema(many=False)\n raw_data = controller.update(**data)\n data = schema.dump(raw_data)\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "def create_item():\n name = request.form['name']\n catagory = request.form['catagory']\n description = request.form['description']\n errors = form_errors(request.form)\n if errors:\n catagories = [c.name for c in Catagory.fetch_all()]\n values = {\n 'name': name, 'catagory': catagory, 'description': description\n }\n return render_template(\n 'add_item.html',\n catagories=catagories,\n values=values,\n errors=errors\n )\n Item.create(name, catagory_name=catagory, description=description)\n return redirect(url_for(\n 'read_item', catagory_name=catagory, item_name=name\n ))", "def issueUpdateView(context, issue):\n\n user = context.get('user')\n\n if not user.has_perm('IssueTracker.can_change'):\n return \"\"\n\n if issue.item:\n item = issue.item.item\n \n args = {\n \"form\": forms.UpdateMachineForm(instance=item),\n }\n\n return render_to_string('issueUpdate.html', args, context)\n\n return \"\"", "def restaurantMenuItemEdit(restaurant_id, menu_id):\n try:\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n menuItem = session.query(MenuItem).filter_by(id=menu_id).one()\n if request.method == 'POST':\n if request.form['name']:\n menuItem.name = request.form['name']\n if request.form['description']:\n menuItem.description = request.form['description']\n if request.form['price']:\n menuItem.price = request.form['price']\n if request.form['course']:\n menuItem.course = request.form['course']\n\n session.add(menuItem)\n session.commit()\n\n flash('Menu Item Successfully Edited', 'menu')\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))\n else:\n return render_template('menuItemEdit.html', menuItem=menuItem, restaurant=restaurant)\n\n except exc.NoResultFound:\n return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))", "def test_update_item_incorrect_id(test_client, item):\n\n response = test_client.put(BAD_ITEM_URL,\n data=json.dumps(item),\n content_type='application/json')\n\n data = json.loads(response.get_data())\n\n assert response.status_code == 404\n assert data['error'] == app.NOT_FOUND", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n self.use_template(self.render_template())\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def newItem():\n if request.method == 'POST':\n if not checkLogin():\n return requests(url_for('catelog'))\n\n if request.form['name'].strip() == '':\n flash('item create failed: name is empty!')\n return redirect(url_for('newItem'))\n\n category = session.query(\n Category).filter_by(\n name=request.form['category']).one()\n\n ifCategory = session.query(Category).filter_by(\n name=request.form['category']).one()\n ifItem = session.query(Item).filter_by(\n category_id=ifCategory.id,\n name=request.form['name']).all()\n if (len(ifItem) > 0):\n flash('item create failed: item(%s) \\\n is already exist in category(%s)' % (\n ifItem[0].name,\n ifCategory.name))\n return redirect(url_for('catelog'))\n\n newItem = Item(\n name=request.form['name'],\n description=request.form['description'],\n category=category,\n auth=getLoginUser(),\n time=getIntTime())\n session.add(newItem)\n session.commit()\n\n flash('new item created: %s' % newItem.name)\n\n return redirect(url_for(\n 'itemDetail',\n category_name=category.name,\n item_name=newItem.name))\n else:\n all_category = session.query(Category).all()\n return render_template(\n 'new-item.html',\n all_category=all_category,\n isLogin=checkLogin())", "def update(entry_id):\n entry = models.Journal.select().where(\n models.Journal.id == entry_id).get()\n form = forms.JournalForm() # if the form validates\n if form.validate_on_submit(): # if click update button\n entry.title = form.title.data\n entry.date = form.date.data\n entry.time_spent = form.time_spent.data\n entry.learnt = form.learnt.data\n entry.resources = form.resources.data\n entry.save() # commit the changes\n flash('Entry has been updated', 'success')\n return redirect(url_for('detail', entry_id=entry.id))\n elif request.method == 'GET': # fill the form with current data\n form.title.data = entry.title\n form.date.data = entry.date\n form.time_spent.data = entry.time_spent\n form.learnt.data = entry.learnt\n form.resources.data = entry.resources\n return render_template('update.html', form=form)", "def edit_from_list(id=None,item_id=None):\n setExits()\n #import pdb;pdb.set_trace()\n \n item_id=cleanRecordID(item_id)\n item_rec = None\n rec = None\n warehouses = Warehouse(g.db).select()\n trx_types = get_site_config().get('trx_types',['Add','Remove',])\n transaction = Transaction(g.db)\n trx_id = cleanRecordID(id)\n if trx_id > 0:\n rec = transaction.get(trx_id)\n \n if rec:\n item_id = rec.item_id\n else:\n rec = transaction.new()\n rec.created = local_datetime_now()\n if 'last_trx' in session:\n transaction.update(rec,session['last_trx'])\n \n # Handle Response?\n if request.form:\n #import pdb;pdb.set_trace()\n error_list=[]\n transaction.update(rec,request.form)\n if save_record(rec,error_list):\n return \"success\" # the success function looks for this...\n else:\n pass\n \n \n if item_id > 0:\n item_rec = Item(g.db).get(item_id)\n \n if not item_rec:\n flash(\"This is not a valid item id\")\n return \"failure: This is not a valid item id.\"\n else:\n rec.item_id=item_id\n \n \n return render_template('trx_edit_from_list.html',rec=rec,current_item=item_rec,warehouses=warehouses,trx_types=trx_types)", "def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)", "def item_status(item_id):\n\n item_completed = request.form.get(\"item_completed\", \"off\")\n list_id = request.form[\"list_id\"]\n\n item_completed = item_completed == \"on\"\n\n to_do_item = ToDoItem.query.get(item_id)\n to_do_item.completed = item_completed\n db.session.commit()\n\n return redirect(f\"/lists/{list_id}\")", "def update_product_form(productId, name=None, status=None): # noqa: E501\n return 'do some magic!'", "def certification_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n certification_reference = get_object_or_404(Certification, id=id,company=company)\n certification_form = CertificationForm(instance=certification_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('certification_form.html',{'form':certification_form, 'info': certification_reference},context_instance=RequestContext(request))\n else:\n certification_form = CertificationForm(request.POST, instance=certification_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if certification_form.is_valid():\n certification_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('certification_form.html', \n {'form': certification_form, 'form_errors': certification_form.errors, 'info': certification_reference},\n context_instance=RequestContext(request))", "def updateItem(self, item, values):\n print ('Updating item: ' + unicode(item))\n item = int(item) #Importante: Para evitar que se caiga la api de PODIO más adelante\n message = self._client.Item.update(item, {'fields':values})\n return message", "def process_post_edit(user_id, post_id):\n\n title = request.form.get('title')\n content = request.form.get('content')\n\n post = Post.query.get_or_404(post_id)\n\n post.title = title\n post.content = content\n\n db.session.add(post)\n db.session.commit()\n\n return redirect(f'/users/{user_id}/posts/{post_id}')", "def newItem():\n if request.method == 'POST':\n db.createItem(\n title=request.form['title'],\n description=request.form['description'],\n category_id=request.form['category'],\n user_id=login_session['user_id'])\n flash(\"New catalog item created!\", 'success')\n return redirect(url_for('showCatalog'))\n return render_template('new_item.html', categories=db.getAllCategories())", "def task_update(request, id=None):\n instance = get_object_or_404(Todo, id=id)\n print(instance)\n print(instance)\n form = TaskForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n \n return redirect('lists:alllist')\n\n context = {\n \"desription\": instance.description,\n \"instance\": instance,\n \"form\":form,\n }\n return render(request, \"lists/update_task.html\", context)", "def update_swms(request, pk):\n swms = Swms.objects.get(id=pk)\n swms_form = SwmsForm(instance=swms)\n\n context = {\n 'swm_form': swms_form\n }\n\n if request.method == 'POST':\n form = SwmsForm(request.POST)\n\n if form.is_valid():\n print(\"Valid Form\")\n form.save()\n\n return render(request, 'swms_form/swms_form.html', {'swm_form': form})\n else:\n print(\"Invalid Form\")\n print(form.errors)\n\n return render(request, 'swms_form/swms_form.html', {'swm_form': form})\n\n return render(request, 'swms_form/swms_form.html', context)", "def delete_item(item_name):\n\n item = Item.query.filter_by(name=item_name).first_or_404()\n if item.owner != current_user:\n flash(\"Failed to delete item %s since you are not the owner.\" %\n item.name)\n return redirect(url_for('.index'))\n\n form = DeleteForm()\n if form.validate_on_submit():\n try:\n db.session.delete(item)\n db.session.commit()\n except:\n flash((\"Failed to delete item \\\"%s\\\".\") % item.name)\n else:\n flash(\"Item \\\"%s\\\" has been deleted.\" % item.name)\n finally:\n return redirect(url_for('.index'))\n return render_template('delete.html', form=form, name=item_name)", "def put(self, form_id):\n updated_form = Form.query.get(form_id)\n if not updated_form:\n return {'error': 'Does not exist.'}, status.HTTP_404_NOT_FOUND\n try:\n updated_data = FORM_SCHEMA.load(request.json).data\n except ValidationError as err:\n APP.logger.error(err.args)\n return err.messages, status.HTTP_400_BAD_REQUEST\n\n for key, value in updated_data.items():\n setattr(updated_form, key, value)\n DB.session.commit()\n return Response(status=status.HTTP_200_OK)", "def update_category_item(catalog_item_id):\n edited_item = session.query(CatalogItem). \\\n filter_by(id=catalog_item_id).one()\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n if request.form['price']:\n edited_item.price = request.form['price']\n session.add(edited_item)\n session.commit()", "def form_valid(self, form):\n\n ai = form.save(\n token=self.request.session.get('token', False),\n aiid=self.kwargs.get('aiid', '')\n )\n\n # Check if save was successful\n if ai['status']['code'] in [200, 201]:\n level = messages.SUCCESS\n redirect_url = HttpResponseRedirect(\n reverse_lazy(\n self.request.GET.get('next', self.success_url),\n kwargs={'aiid': ai.get('aiid', self.kwargs.get('aiid'))}\n )\n )\n else:\n level = messages.ERROR\n redirect_url = self.render_to_response(\n self.get_context_data(form=form)\n )\n\n messages.add_message(self.request, level, ai['status']['info'])\n\n return redirect_url", "def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def edit(article_id):\r\n response = table.get_item(\r\n Key={'article_id': article_id}\r\n )\r\n data = response.get('Item')\r\n\r\n if data is None:\r\n flash('Unable to get Article')\r\n return redirect(url_for('article.list'))\r\n\r\n form = ArticleForm(title=data.get('title'), description=data.get('description'))\r\n\r\n # Check request method and validate form\r\n if request.method == 'POST' and form.validate():\r\n data = {}\r\n data['article_id'] = article_id\r\n data['title'] = form.title.data\r\n data['description'] = form.description.data\r\n\r\n data = dict((k, v) for k, v in data.items() if v)\r\n\r\n # Save data in DynamoDb to update table\r\n response = table.put_item(Item=data)\r\n\r\n if response:\r\n flash('Article is successfully updated')\r\n return redirect(url_for('article.list'))\r\n \r\n return render_template('article/form.html', add_article=False,\r\n form=form, title='Edit Article', article_id=article_id)", "def pet_detail_edit(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n form = PetEditForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n flash(f\"Pet{pet_id} updated!\")\n return redirect(f\"/{pet_id}\")\n\n else:\n return render_template(\"pet_detail.html\", form=form, pet=pet)", "def product_image_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n image_reference = get_object_or_404(Product, id=id,company=company)\n product_image_form = ProductImageForm(instance=image_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('pictures.html',{'form':product_image_form },context_instance=RequestContext(request))\n else:\n product_image_form = ProductImageForm(request.POST, request.FILES, instance=image_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if product_image_form.is_valid():\n product_image_form.save()\n\n\n # To FIX\n return HttpResponseRedirect('/company/%s/edit/' % str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('pictures.html', \n {'form': product_image_form, 'form_errors': product_image_update.errors},\n context_instance=RequestContext(request))", "async def updateAppProduct(self, item_id=None, body=\"\"):\n payload = {}\n \n if item_id:\n payload[\"item_id\"] = item_id\n \n\n # Parameter validation\n schema = CatalogValidator.updateAppProduct()\n schema.dump(schema.load(payload))\n \n # Body validation\n from .models import ApplicationItemMeta\n schema = ApplicationItemMeta()\n schema.dump(schema.load(body))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product/{item_id}/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"Id of the company associated to custom meta.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"application id for which the custom_meta is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"item_id\",\"description\":\"product id for which the custom_meta is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"Id of the company associated to custom meta.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"application id for which the custom_meta is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"item_id\",\"description\":\"product id for which the custom_meta is associated.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", item_id=item_id)\n query_string = await create_query_string(item_id=item_id)\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"PATCH\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"patch\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product/{item_id}/\", item_id=item_id), query_string, headers, body, exclude_headers=exclude_headers), data=body)", "def update_recipe(db_id):\r\n\r\n # validates request form\r\n form = request.form\r\n appliance_list = request.form.getlist('appliance_categories')\r\n error_list = validate_form(form, 'recipe')\r\n\r\n if error_list == []:\r\n # validates image URL\r\n image_URL = validate_image(form['img_link'])\r\n\r\n # keeps the old category name in case of change\r\n previous_category = mongo.db.recipes.find_one(\r\n {'_id': ObjectId(db_id)})['category']\r\n\r\n # updates recipe\r\n mongo.db.recipes.update(\r\n {'_id': ObjectId(db_id)}, {'$set': {\r\n 'title': request.form.get('title'),\r\n 'category': request.form.get('category'),\r\n 'ingredients': request.form.get('ingredients').split('\\n'),\r\n 'method': request.form.get('method').split('\\n'),\r\n 'appliances': request.form.getlist('appliance_categories'),\r\n 'img_link': image_URL,\r\n 'servings': int(request.form.get('servings'))}\r\n }\r\n )\r\n\r\n # updates counter in the old category (the recipe was taken from)\r\n update_quantity_in_category(previous_category)\r\n\r\n # updates counter in the new category (the recipe was moved to)\r\n update_quantity_in_category(request.form.get('category'))\r\n\r\n # redirects to the recipes in the same category\r\n return redirect(url_for(\r\n 'search',\r\n collection='recipes',\r\n find=request.form.get('category'))\r\n )\r\n\r\n else:\r\n # initializes page title and header\r\n page_title = 'Update recipe'\r\n page_header = 'Update a recipe:'\r\n\r\n # sends error list back to the form to correct mistakes\r\n return render_template(\r\n 'edit_form.html',\r\n collection=mongo.db.recipe_categories.find().sort('name'),\r\n recipe=mongo.db.recipes.find_one({'_id': ObjectId(db_id)}),\r\n categories=mongo.db.appliance_categories.find().sort('name'),\r\n errors=error_list,\r\n form=form,\r\n appliance_list=appliance_list,\r\n page_title=page_title,\r\n page_header=page_header\r\n )", "def newItem(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newItem = Item(name=request.form['name'],\n description=request.form['description'],\n price=request.form['price'], category_id=category.id,\n user_id=login_session['user_id'])\n session.add(newItem)\n session.commit()\n flash('New Item %s Successfully Created' % (newItem.name))\n return redirect(url_for('showItem', category_id=category.id))\n else:\n return render_template('newitem.html', category_id=category.id)" ]
[ "0.72878253", "0.71200603", "0.67693913", "0.6749517", "0.6684326", "0.66837543", "0.66720814", "0.66635346", "0.66288364", "0.65429723", "0.6519543", "0.6503763", "0.6453958", "0.6416763", "0.63206726", "0.63055027", "0.62442046", "0.6199904", "0.6198838", "0.6180717", "0.6051199", "0.6028449", "0.60173297", "0.59917617", "0.5947581", "0.59177977", "0.5820349", "0.5790988", "0.5761132", "0.5759848", "0.57189065", "0.57116216", "0.56872755", "0.5680337", "0.5678305", "0.5671047", "0.5671016", "0.56678516", "0.56287783", "0.56281185", "0.5621123", "0.56096077", "0.56086993", "0.5587554", "0.5570709", "0.55634123", "0.55630565", "0.55583435", "0.5551427", "0.5546342", "0.551975", "0.5508549", "0.5506222", "0.5505318", "0.55036396", "0.5496683", "0.548787", "0.5485128", "0.54702353", "0.5466879", "0.5466874", "0.54629683", "0.54569304", "0.54556924", "0.54555386", "0.5452014", "0.5434994", "0.5428074", "0.5426756", "0.5422215", "0.5421053", "0.5417757", "0.5401799", "0.53926104", "0.53914964", "0.53776526", "0.5366452", "0.5362755", "0.536041", "0.5339486", "0.5336684", "0.5332823", "0.5331314", "0.5325906", "0.53136337", "0.53110933", "0.53056604", "0.52990943", "0.52967334", "0.5295031", "0.5293318", "0.52885705", "0.5280977", "0.5270959", "0.52682", "0.52657104", "0.5248907", "0.5245879", "0.5227819", "0.5220166" ]
0.6661015
8
Endpoint to display confirm delete item page.
def delete_item_page(item_name, catagory_name): return render_template( 'delete_item.html', item_name=item_name, catagory_name=catagory_name )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_item_details(item_id):\n item = is_user_the_creator(item_id)\n item_name = item.Item.name\n if request.method == 'GET':\n return render_template('item_delete_confirm.html', item_name=item_name, item_id=item_id,\n login_session=login_session,\n csrf_token=generate_csrf_token())\n else:\n session.delete(item.Item)\n session.commit()\n flash(item_name + \" deleted\")\n return redirect(url_for('show_homepage'))", "def delete(self, item):\n self._createAction(item, \"delete\")", "def get_delete_confirmation_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv", "def render_delete_item_page(\r\n self, client_id, state, user_id, user_name, item_id):\r\n item = self._db_manager.get_item(item_id)\r\n if item is None:\r\n flash(\"Invalid item.\")\r\n return\r\n if item[\"user_id\"] != user_id:\r\n flash(\"Only the original creator can delete an item.\")\r\n return\r\n category = self._db_manager.get_category(item[\"category_id\"])\r\n if category is None:\r\n flash(\"Sorry, something went wrong.\")\r\n return\r\n return render_template(\r\n \"item_delete.html\",\r\n client_id=client_id,\r\n state=state,\r\n is_logged_in=True,\r\n user_name=user_name,\r\n category=category[\"name\"],\r\n item=item\r\n )", "def delete(request, itemID):\n\ttry:\n\t\titem = get_object_or_404(Item, itemID = itemID)\n\n\t# Handle when the given itemID is not a UUID\n\texcept ValidationError:\n\t\traise Http404\n\n\tif item.seller.user != request.user:\n\t\traise Http404\n\t\n\titem.delete()\n\treturn HttpResponseRedirect(reverse('tailored:index'))", "def delete():\n click.echo('delete was called.')", "def delete(self, request, *args, **kwargs):\r\n self.object = self.get_object()\r\n success_url = self.get_success_url()\r\n self.object.delete()\r\n messages.success(self.request, self.success_message)\r\n return HttpResponseRedirect(success_url)", "def delete_view(self, request, object_id):\r\n obj = self.get_object(unquote(object_id))\r\n obj.delete()\r\n return HttpResponse(\"Deleted\")", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n self.object = self.get_object()\n success_url = self.get_success_url()\n success_message = _(f'Successfully deleted todo list: {self.object}')\n\n self.object.delete()\n\n messages.success(request=request, message=success_message)\n\n return redirect(success_url)", "def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def delete_item(id):\n return '', 201", "def delete_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'You have deleted the item!')\n return redirect(reverse('items'))", "def delete(self, request, *args, **kwargs):\n\t\ttask_object = self.get_object()\n\t\tsuccess_url = self.get_success_url()\n\t\ttask_object.is_deleted =1\n\t\ttask_object.save()\n\t\treturn HttpResponseRedirect(success_url)", "def deleteItem(category_item_id):\n itemToDelete = db.findItem(id=category_item_id)\n if itemToDelete.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.deleteItem(itemToDelete)\n flash('%s Successfully Deleted' % itemToDelete.title, 'success')\n return redirect(url_for('showCatalog'))\n return render_template('delete_item.html', item=itemToDelete)", "def delete(item_id):\n session = current_app.config['db']\n if request.method == \"POST\":\n used = session.query(func.count(WineType.id).label('count'))\\\n .filter_by(abv_id=item_id).scalar()\n item = session.query(WineABV).filter_by(id=item_id).one()\n c_name = item.name\n if used == 0:\n session.delete(item)\n session.commit()\n flash(\"Successfully Deleted '%s'\" % (c_name,), 'success')\n else:\n flash(\"'%s' is still in use and cannot be deleted.\" % (c_name,),\n 'danger')\n return redirect(url_for('.show'))\n\n else:\n item = session.query(WineABV).filter_by(id=item_id).one()\n return render_template(template_prefix+'delete_form.html', item=item)", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def confirm_delete(self):\n self.language = LANGUAGE.get(self.lang)\n message = Message(self.language[\"del_user\"], self.language[\"del_info\"])\n delete_message = message.create_question_message(self.language[\"yes\"])\n response = delete_message.exec()\n\n if response == QMessageBox.Yes:\n self.delete_user()\n elif response == QMessageBox.No:\n delete_message.close()", "def delete(request):\n return render(request, 'modify.html')", "def test_commentary_view_delete(self):\n \n test_response = self.client.get('/papers/commentary/1/delete')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('object' in test_response.context) \n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'confirm_delete.html')", "def delete():\n return render_template('layout.html')", "def delete(request, message_id):\n return HttpResponse(\"error\")", "def _deleteItemMsgBox(self, files: List[QModelIndex]) -> int:\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Confirm delete\")\n msgBox.setIcon(QMessageBox.Warning)\n msgBox.setWindowIcon(QIcon(':delete.png'))\n filename = files[0].data() if len(files) == 1 else None\n msgText = f\"Are you sure to delete '{filename}'?\" if filename else f\"Are you sure to delete {len(files)} items?\"\n msgBox.setText(msgText)\n msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n\n return msgBox.exec()", "def post_delete(self, *args, **kw):\n #obtenemos el id de la fase para hacer el filtrado despues de la redireccion\n item_to_del = DBSession.query(Item).filter_by(id_item=args[0]).one()\n fid = item_to_del.id_fase_fk\n pks = self.provider.get_primary_fields(self.model)\n d = {}\n for i, arg in enumerate(args):\n d[pks[i]] = arg\n self.provider.delete(self.model, d)\n\n path = './' + '../' * (len(pks) - 1) + '?fid=' + str(fid)\n\n redirect(path)", "def delete_product_view(request, id):\n Product.objects.get(id=id).delete()\n messages.success(request, \"Product deleted successfully.\")\n return redirect(\"products\")", "def delete_item(item_name):\n\n item = Item.query.filter_by(name=item_name).first_or_404()\n if item.owner != current_user:\n flash(\"Failed to delete item %s since you are not the owner.\" %\n item.name)\n return redirect(url_for('.index'))\n\n form = DeleteForm()\n if form.validate_on_submit():\n try:\n db.session.delete(item)\n db.session.commit()\n except:\n flash((\"Failed to delete item \\\"%s\\\".\") % item.name)\n else:\n flash(\"Item \\\"%s\\\" has been deleted.\" % item.name)\n finally:\n return redirect(url_for('.index'))\n return render_template('delete.html', form=form, name=item_name)", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()", "def deleteMenuItemPage(restaurant_id, item_id):\n item = db_methods.searchItemByID(item_id)\n res_id = item.restaurant_id\n error = item.name + \" has been deleted from the restaurant menu.\"\n db_methods.deleteMenuItem(item_id)\n return render_template('deleteitem.html', error = error, res_id = res_id, \n item = item)", "def admindelete(object, id):\n db = get_db()\n execute_str = 'DELETE FROM ' + object + ' WHERE id = ' + str(id)\n db.execute(execute_str)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))", "def delete_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to delete the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = DeleteItemForm()\n\n # If the form is submitted, delete the item from the database,\n # send a flash message, and redirect home\n if form.validate_on_submit():\n db.session.delete(item)\n db.session.commit()\n flash(f'\"{item.name}\" has been deleted.', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('delete_item.html', item=item, form=form)", "def action_delete():\n try:\n deleted = delete_notification()\n except:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n if deleted:\n return dict(msg=\"Notification deleted\")\n else:\n return dict(msg=\"No notification to delete\")", "def delete(self, request, *args, **kwargs):\n # We need to get the object ID before deleting the object as\n # it's needed in success template.\n self.object = self.get_object()\n context = self.get_success_context_data()\n\n # This should delete the object and return a HttpResponseRedirect\n response = super().delete(request, *args, **kwargs)\n\n success_template_name = self.get_success_template_name()\n if request.is_turbo and success_template_name:\n return TemplateResponse(request, success_template_name, context)\n\n return response", "def transaction_delete(request, transaction_id, model_class=Transaction, template_name='budget/transactions/delete.html'):\n transaction = get_object_or_404(Transaction.active.all(), pk=transaction_id)\n if request.POST:\n if request.POST.get('confirmed'):\n transaction.delete()\n return HttpResponseRedirect(reverse('budget_transaction_list'))\n return render_to_response(template_name, {\n 'transaction': transaction,\n }, context_instance=RequestContext(request))", "def delete_item(request, shoppinglist_id, item_id):\n Item.objects.filter(pk=item_id,\n shoppinglist__pantry__owner=request.user).delete()\n return redirect('shoppinglists.views.detail', shoppinglist_id)", "def delete(self):\n self.request().delete()", "def delete(request, todo_id):\n\n todo = get_object_or_404(Todo, pk=todo_id)\n todo.delete()\n\n return redirect('index')", "def collection_delete_confirm_btn(self):\n collection_delete_confirm_btn_sitem = self.locator_finder_by_xpath(self.collection_delete_confirm_btn_id)\n collection_delete_confirm_btn_sitem.click()\n time.sleep(1)", "def delete_template(self):\n return '{}/{}.html'.format(self.object_name, self.delete_endpoint)", "def help_delete(self):\n print(DELETE)", "def test_delete(admin_client):\n book = BookFactory()\n url = reverse(\"admin:books_book_delete\", args=(book.pk,))\n\n response = admin_client.get(url)\n templates_used = [t.name for t in response.templates]\n\n assert response.status_code == 200\n render_counts = {x: templates_used.count(x) for x in set(templates_used)}\n\n # The number of times each template was rendered\n assert render_counts == {\n \"admin/delete_confirmation.html\": 1,\n \"admin/base_site.html\": 1,\n \"admin/base.html\": 1,\n \"admin/includes/object_delete_summary.html\": 1,\n \"jazzmin/includes/ui_builder_panel.html\": 1,\n }\n\n # The templates that were used\n assert set(templates_used) == {\n \"admin/delete_confirmation.html\",\n \"admin/base_site.html\",\n \"admin/base.html\",\n \"admin/includes/object_delete_summary.html\",\n \"jazzmin/includes/ui_builder_panel.html\",\n }\n\n response = admin_client.post(url, data={\"post\": \"yes\"}, follow=True)\n\n # We deleted our object, and are now back on the changelist\n assert not Book.objects.all().exists()\n assert response.resolver_match.url_name == \"books_book_changelist\"", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def test_publication_view_delete(self):\n \n test_response = self.client.get('/papers/14-3-3-proteins-a-number-of-functions-for-a-numbered-protein/delete/')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('publication' in test_response.context) \n self.assertTemplateUsed(test_response, 'confirm_delete.html')\n self.assertEqual(test_response.context['publication'].pk, 1)\n self.assertEqual(test_response.context['publication'].title, u'14-3-3 proteins: a number of functions for a numbered protein.')\n\n #verifies that a non-existent object returns a 404 error.\n null_response = self.client.get('/papers/not-a-real-paper/delete/')\n self.assertEqual(null_response.status_code, 404)", "def get_absolute_url(self):\n return reverse('order_items:order_item_delete', args=[str(self.id)])", "def task_delete(request, tasklist_id):\n tasklist = get_object_or_404(Todo, pk=tasklist_id)\n tasklist.delete()\n print(tasklist)\n messages.success(request, \"Successfully deleted\")\n return redirect('lists:alllist')", "def delete(request):\n paste = Paste.get(request.matchdict['idContent'])\n\n password = _buildPassword(paste.username,\n paste.created,\n request.POST['password'])\n\n if password == paste.password:\n\n paste.delete()\n\n request.session.flash(u\"Deleted\") # TODO translatoion\n\n return HTTPFound(request.route_path('home', ))\n\n request.session.flash(u\"Wrong password\") # TODO translatoion\n\n return HTTPFound(request.route_path('deleteConfirm', idContent=paste._id))", "def todo_delete(request, todo_id):\n todo = get_object_or_404(TodoItem, pk=todo_id)\n event_ident = todo.event.get_ident()\n todo.delete()\n\n messages.success(request, 'TODO was deleted successfully.',\n extra_tags='todos')\n\n return redirect(event_details, event_ident)", "def deleteItem(category_id, item_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n deletedItem = session.query(Item).filter_by(id=item_id).one()\r\n if deletedItem.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to delete this item.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n session.delete(deletedItem)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('deleteitem.html', item=deletedItem)", "def delete_item(item_name, catagory_name):\n try:\n item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name)\n except NoResultFound:\n abort(404)\n item.delete()\n return redirect(url_for('home'))", "def delete_product(request, id):\n\n return render(request, \"core/delete_product.html\", {\n \"object\": Product.objects.get(id=id)\n })", "def delete_item(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n Item.user_id == current_user.id\n ).first()\n\n if not item:\n flash(\"Couldn't find the item\", category='warning')\n return redirect(request.referrer)\n\n item_name = item.name\n db.session.delete(item)\n db.session.commit()\n flash(\n \"Successfully deleted item '{}'\".format(item_name),\n \"success\")\n\n return redirect(url_for('url.index'))", "def delete(id):\n result = delete_post(id)\n flash(result)\n return redirect(url_for(\"show\"))", "def deleteConfirm(request):\n paste = Paste.get(request.matchdict['idContent'])\n\n if not(paste.username and paste.password):\n return HTTPFound(request.route_path('oneContent', idContent=paste._id))\n\n lexer = get_lexer_by_name(paste.typeContent, stripall=True)\n\n result = highlight(paste['content'], lexer, formatter)\n\n return {'paste': paste,\n 'content': result,}", "def delete(self, source_index):\r\n click_css(self, 'a.delete-button', source_index, require_notification=False)\r\n # Click the confirmation dialog button\r\n click_css(self, 'a.button.action-primary', 0)", "def _delete(self, pk, user=None):\n request = self.factory.delete(self.detail_url(pk), format='json')\n force_authenticate(request, user)\n resp = self.detail_view(request, pk=pk)\n resp.render()\n return resp", "def remove(self, obj, **kwargs):\n request = kwargs.pop('request')\n if self.has_delete_permission(request, obj):\n info = obj._meta.app_label, obj._meta.module_name\n delete_url = reverse('admin:%s_%s_delete' % info,\n args=(quote(obj.pk),\n quote(self.prescription.pk)))\n return ('<div><a href=\"%s\" class=\"inline-deletelink\"'\n 'title=\"Delete\"></a></div>') % delete_url\n else:\n return \"\"", "def delete_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('delete_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n sqlsession.delete(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"delete_item.html\", item=item)", "def delete(self, *args, **kwargs):\n return 0", "def DELETE(self):\n ids = self.context.objectIds()\n self.context.manage_delObjects(ids)\n self.context.createTemplate()\n return self.request.response.setStatus(200)", "def delete_todo_list_view(request: HttpRequest, pk: int) -> HttpResponseRedirect:\n todo_list = TodoListModel.objects.get(id=pk)\n\n if request.method == 'GET':\n return render(request, 'todo/delete_todo_list.html', {'todo_list': todo_list})\n elif request.method == 'POST':\n success_message = _(f'Successfully deleted todo list: {todo_list}')\n\n todo_list.delete()\n\n messages.success(request=request, message=success_message)\n\n return redirect(reverse_lazy('todo:list_todo_lists'))", "def delete(self):\n\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n # the agenda_item is ad hoc if it has a document but no proposal\n if self.agenda_item.has_document and not self.agenda_item.has_proposal:\n document = self.agenda_item.resolve_document()\n trasher = ITrashable(document)\n trasher.trash()\n\n self.agenda_item.remove()\n\n return JSONResponse(self.request).info(\n _(u'agenda_item_deleted',\n default=u'Agenda Item Successfully deleted')).dump()", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "async def delete(self):\n return await self.set_message(text='')", "def deleteProduct(request,productId):\n deleteObj = Collection()\n deleteObj.id=productId\n productBll.deleteProducts(deleteObj)\n return HttpResponseRedirect('/admin/product/list/')", "def delete():\n if request.method == 'POST':\n table_name = request.form['table']\n table = get_table_by_name(table_name)\n if 'id' not in request.form:\n user_data = get_data_from_table(table)\n\n return render_template(\n 'delete.html', table_name=table_name, user_data=user_data\n )\n\n if table is None:\n return 'Table not chosen?'\n\n success, reason = utils.delete_user(request.form['id'], table_name)\n\n if not success:\n return f'Error occurred trying to delete - {reason}'\n\n log(\n f\"<code>{current_user.name}</code> has deleted <code>{request.form['id']}</code> from {table_name}\"\n )\n return (\n f\"<code>{request.form['id']}</code> has been deleted from from {table_name}\"\n )\n return render_template('events.html', events=get_accessible_tables())", "def delete(id):\r\n\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n query = \"UPDATE product SET quantity = 0 WHERE product_id = ? AND for_business = ?\"\r\n db.execute(query, (id, b_id,))\r\n db.commit()\r\n return redirect(url_for(\"main.products\"))", "def event_delete(request, event_ident):\n try:\n event = Event.get_by_ident(event_ident)\n event.delete()\n\n messages.success(request,\n 'Event and its tasks were deleted successfully.')\n return redirect(reverse('all_events'))\n except ObjectDoesNotExist:\n raise Http404(\"No event found matching the query.\")\n except ProtectedError as e:\n return _failed_to_delete(request, event, e.protected_objects)", "def delete_place(place_id):\n place_db = mongo.db.places.find_one_or_404({'_id': ObjectId(place_id)})\n if request.method == 'GET':\n form = ConfirmDelete(data=place_db)\n return render_template('delete_restaurant.html', title=\"Delete Restaurant\", form=form)\n form = ConfirmDelete(request.form)\n if form.validate_on_submit():\n places_db = mongo.db.places\n places_db.delete_one({\n '_id': ObjectId(place_id),\n })\n return redirect(url_for('home'))\n return render_template('delete_restaurant.html', place=place_db, form=form)", "def delete_item(request):\n if request.json_body[u'type'] == u'post':\n if DBSession.query(Post).filter(Post.name==request.json_body[u'name']).delete() == 1:\n return {\"deletion_status\":\"success\"}\n import ipdb; impdb.set_trace()\n return {\"deletion_status\":\"error\"}", "def deleteItem(request, itemid):\n try:\n item = ItemSerializer(Item.objects.get(id=itemid))\n Item.objects.get(id=itemid).delete()\n return Response(item.data)\n\n except Item.DoesNotExist:\n fail = {\n \"item\":\"item does not exist\"\n }\n return JsonResponse(fail)", "def cmd_delete_employee():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_employee_by_id(id)\r\n User.query.filter(User.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Employee '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.employees'))\r\n else:\r\n flash(f\"Employee '{id}' was not found\")\r\n return redirect(url_for('main.employees'))", "def deleteItem(category_id, item_id):\n category = session.query(Category).filter_by(id=category_id).first()\n item = session.query(Item).filter_by(id=item_id).first()\n if item.user_id != login_session['user_id']:\n flash(\"You are authorised to delete items created by you!\")\n return redirect(url_for(\"showCatalog\"))\n if request.method == \"POST\":\n session.delete(item)\n session.commit()\n flash('%s Item Successfully Deleted' % (item.name))\n return redirect(url_for('showItem', category_id=item.category_id))\n else:\n return render_template(\"deleteitem.html\", item=item,\n category=category)", "def delete(self, item, request):\n\n assert (\n isinstance(item, Election)\n or isinstance(item, ElectionCompound)\n or isinstance(item, Vote)\n )\n\n url = request.link(item)\n url = replace_url(url, request.app.principal.official_host)\n for result in self.query().filter_by(url=url):\n self.session.delete(result)\n\n self.session.delete(item)\n self.session.flush()", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n user = request.user\n success_url = reverse_lazy('muxic:user', kwargs={'username': user.username})\n self.object.delete()\n return HttpResponseRedirect(success_url)", "def cmd_delete_job():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_job_by_id(id)\r\n Job.query.filter(Job.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Job '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.jobs'))\r\n else:\r\n flash(f\"Job '{id}' was not found\")\r\n return redirect(url_for('main.jobs'))", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def delete(self):\n self.method = \"DELETE\"\n self.send()", "def delete(self):\n # type: () -> BoundAction\n return self._client.delete(self)", "def item_delete(request):\n if request.method == 'POST':\n item_to_delete = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_delete.active = False\n item_to_delete.save()\n return HttpResponse(status=200)", "def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json", "def deleteIngredient():\n \n if request.method == \"POST\":\n \n id = request.form.get('id')\n\n con = db_connect()\n cur = con.cursor()\n cur.execute(\"DELETE FROM ingredients WHERE id = :id \", {'id': id})\n con.commit()\n con.close()\n\n flash('Ingredient deleted')\n return redirect(url_for('ingredients.showIngredients'))", "def delete_selected_tree(self, modeladmin, request, queryset):\n # If this is True, the confirmation page has been displayed\n if request.POST:\n n = 0\n with queryset.model._tree_manager.delay_mptt_updates():\n for obj in queryset:\n if self.has_delete_permission(request, obj):\n obj.delete()\n n += 1\n obj_display = force_text(obj)\n self.log_deletion(request, obj, obj_display)\n self.message_user(\n request,\n _('Successfully deleted %(count)d items.') % {'count': n})\n # Return None to display the change list page again\n return None\n else:\n # (ab)using the built-in action to display the confirmation page\n return delete_selected(self, request, queryset)", "def delete(request):\n if request.method == \"POST\":\n Books.objects.get(isbn=request.POST['delete_book']).delete()\n return redirect('libros:home')", "def deleteListItem(category_id, item_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n category = session.query(Category).filter_by(id=category_id).one()\n itemToDelete = session.query(ListItem).filter_by(id=item_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n\n if request.method == 'POST':\n if \"btn_delete\" in request.form:\n session.delete(itemToDelete)\n session.commit()\n flash('Catalog Item Successfully Deleted')\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('deleteitem.html',\n item=itemToDelete,\n user=getUserInfo(login_session['user_id']))", "def confirm(request):\n return render(request,\"confirm.html\")", "def test_delete_item_using_delete(self):\n pass", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self):\n response = settings.database.delete_item(Key={'id': str(self.id)})\n raise_for_response(response)", "def deleteItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if editedItem.user_id != login_session['user_id']:\n flash('You are not authorized to delete this Item.\\\n Please create own Category with items in order to \\\n delete items.')\n return redirect(url_for('showallCategories'))\n if request.method == 'POST':\n session.delete(editedItem)\n session.commit()\n flash('Item Deletion successfull')\n return redirect(url_for('showCategory', category_id=category_id))\n else:\n return render_template(\n 'deleteItem.html', category_id=category_id,\n item_id=item_id, item=editedItem)", "def confirm():\n if request.method == 'POST':\n user_type = session.get('type', None)\n if user_type == 'Admin':\n return redirect('/index')\n elif user_type == 'Client':\n return redirect('/clients/' + session.get('name'))\n else:\n return redirect('/')\n\n confirmed = request.values['confirmed']\n \n return render_template('confirm.html', confirmed=confirmed)", "def remove_item_page(request):\n validate(instance=request.body, schema=item_schema_remove)\n body = json.loads(request.body)\n Item.remove_item(body['item_id'])\n return HttpResponse('success')", "def delete(self, name):\n global items\n items = _Helper.all_item_except_searching_for(name)\n return {\"message\": f\"Item {name} deleted successfully\"}, 204", "def delete_item(self, id: str, user: User) -> bool:", "def delete(request, shoppinglist_id):\n Shoppinglist.objects.filter(pk=shoppinglist_id,\n pantry__owner=request.user).delete()\n return redirect('blackem.users.views.home')", "def test_deletion_requires_confirmation(client, contributor):\n\n aid = AidFactory(status='published', author=contributor)\n client.force_login(contributor)\n delete_url = reverse('aid_delete_view', args=[aid.slug])\n res = client.post(delete_url)\n assert res.status_code == 302\n\n aid.refresh_from_db()\n assert aid.status == 'published'", "def covid_delete(request, id, covid_one=0):\n print(\"Process to delete an object at id = {}\".format(id))\n covid = CovidCase.objects.get(pk=id)\n result = covid.delete()\n print(\"Number of rows has been deleted = {}\".format(result[0]))\n\n # If this is coming from covid_one page\n if covid_one == 0:\n place = redirect('/covid/covid_one')\n # otherwise process as normal list\n else:\n place = redirect('/covid/list')\n return place", "def test_delete_confirmation_template(self):\n self.login()\n\n # BlogIndex needs translated pages before child pages can be translated\n self.fr_blog_index = self.en_blog_index.copy_for_translation(self.fr_locale)\n # Create a copy of the en_blog_post object as a translated page\n self.fr_blog_post = self.en_blog_post.copy_for_translation(self.fr_locale)\n\n # Create an alias page to test the `translations_to_move_count`\n # in the template context\n new_page = CreatePageAliasAction(\n self.en_blog_post,\n recursive=False,\n parent=self.en_blog_index,\n update_slug=\"alias-page-slug\",\n user=None,\n )\n new_page.execute(skip_permission_checks=True)\n\n response = self.client.get(\n reverse(\n \"wagtailadmin_pages:delete\",\n args=(self.en_blog_post.id,),\n ),\n follow=True,\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context[\"translation_count\"], 1)\n self.assertEqual(response.context[\"translation_descendant_count\"], 0)\n self.assertIn(\n \"Deleting this page will also delete 1 translation of this page.\",\n response.content.decode(\"utf-8\"),\n )" ]
[ "0.7169478", "0.6802842", "0.67705566", "0.66972566", "0.6590073", "0.6553968", "0.65416104", "0.65388113", "0.6523968", "0.6497277", "0.6489816", "0.64742243", "0.6455277", "0.643404", "0.6403796", "0.6396889", "0.6391679", "0.63716215", "0.63338363", "0.63180286", "0.6317787", "0.6316755", "0.6314079", "0.6312515", "0.6308978", "0.6304924", "0.6298722", "0.6251707", "0.6223464", "0.6194382", "0.61933094", "0.61869913", "0.6169203", "0.6167188", "0.61424303", "0.6136266", "0.61221695", "0.6116978", "0.61054677", "0.6105222", "0.6099558", "0.6099441", "0.6061474", "0.60590965", "0.605702", "0.6052625", "0.6033176", "0.6032193", "0.6028187", "0.60262984", "0.6020548", "0.60160434", "0.6013003", "0.5992162", "0.5987648", "0.59867513", "0.5981909", "0.5963748", "0.5962482", "0.59618014", "0.5961734", "0.5929103", "0.5925158", "0.59156203", "0.5909525", "0.59047896", "0.5904102", "0.59033334", "0.58966076", "0.589217", "0.58919185", "0.5889101", "0.5878165", "0.5877823", "0.58655393", "0.5864326", "0.585936", "0.585936", "0.5858991", "0.5855433", "0.5854791", "0.5851803", "0.5843391", "0.58342206", "0.58227515", "0.5822616", "0.5819104", "0.58100843", "0.58100736", "0.5787039", "0.57858676", "0.5770127", "0.5761974", "0.5756234", "0.57551765", "0.5752688", "0.5750176", "0.5748864", "0.57448727", "0.5744382" ]
0.6615197
4
Post endpoint to delete item. Redirects to home.
def delete_item(item_name, catagory_name): try: item = Item.fetch_by_name_and_catagory_name(item_name, catagory_name) except NoResultFound: abort(404) item.delete() return redirect(url_for('home'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_item(request):\n if request.json_body[u'type'] == u'post':\n if DBSession.query(Post).filter(Post.name==request.json_body[u'name']).delete() == 1:\n return {\"deletion_status\":\"success\"}\n import ipdb; impdb.set_trace()\n return {\"deletion_status\":\"error\"}", "def post_delete(self, *args, **kw):\n #obtenemos el id de la fase para hacer el filtrado despues de la redireccion\n item_to_del = DBSession.query(Item).filter_by(id_item=args[0]).one()\n fid = item_to_del.id_fase_fk\n pks = self.provider.get_primary_fields(self.model)\n d = {}\n for i, arg in enumerate(args):\n d[pks[i]] = arg\n self.provider.delete(self.model, d)\n\n path = './' + '../' * (len(pks) - 1) + '?fid=' + str(fid)\n\n redirect(path)", "def delete(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n self.object = self.get_object()\n success_url = self.get_success_url()\n success_message = _(f'Successfully deleted todo list: {self.object}')\n\n self.object.delete()\n\n messages.success(request=request, message=success_message)\n\n return redirect(success_url)", "def delete(id):\n result = delete_post(id)\n flash(result)\n return redirect(url_for(\"show\"))", "def delete(self, request, *args, **kwargs):\r\n self.object = self.get_object()\r\n success_url = self.get_success_url()\r\n self.object.delete()\r\n messages.success(self.request, self.success_message)\r\n return HttpResponseRedirect(success_url)", "def delete_todo_list_view(request: HttpRequest, pk: int) -> HttpResponseRedirect:\n todo_list = TodoListModel.objects.get(id=pk)\n\n if request.method == 'GET':\n return render(request, 'todo/delete_todo_list.html', {'todo_list': todo_list})\n elif request.method == 'POST':\n success_message = _(f'Successfully deleted todo list: {todo_list}')\n\n todo_list.delete()\n\n messages.success(request=request, message=success_message)\n\n return redirect(reverse_lazy('todo:list_todo_lists'))", "def delete(self):\n self.request().delete()", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete(id):\r\n get_post(id)\r\n db = get_db()\r\n db.cursor().execute('DELETE FROM novel.post WHERE id = %s', id)\r\n db.commit()\r\n return redirect(url_for('novel.index'))", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n return JsonResponse({'status': 'ok'})", "def delete(request, itemID):\n\ttry:\n\t\titem = get_object_or_404(Item, itemID = itemID)\n\n\t# Handle when the given itemID is not a UUID\n\texcept ValidationError:\n\t\traise Http404\n\n\tif item.seller.user != request.user:\n\t\traise Http404\n\t\n\titem.delete()\n\treturn HttpResponseRedirect(reverse('tailored:index'))", "def post_delete_view(request):\n\n if request.method == 'DELETE':\n token_type, token = request.META.get('HTTP_AUTHORIZATION').split()\n if(token_type != 'JWT'):\n return Response({'detail': 'No JWT Authentication Token Found'}, status=status.HTTP_400_BAD_REQUEST)\n\n token_data = {'token': token}\n\n try:\n valid_data = VerifyJSONWebTokenSerializer().validate(token_data)\n logged_in_user = valid_data.get('user')\n except:\n return Response({'detail': 'Invalid Token'}, status.HTTP_400_BAD_REQUEST)\n\n instance = Post.objects.get(slug=request.data.get('slug'))\n admin_user = User.objects.get(pk=1) # PK Of Admin User Is 1\n\n if(instance.author == logged_in_user or logged_in_user == admin_user):\n instance.delete()\n return Response({}, status=status.HTTP_200_OK)\n else:\n return Response({'detail': 'Something Went Wrong.'}, status=status.HTTP_400_BAD_REQUEST)\n\n else:\n return Response({'detail': 'You Are Not Authorised To Edit This Post'}, status.HTTP_403_FORBIDDEN)", "def delete_item(request, shoppinglist_id, item_id):\n Item.objects.filter(pk=item_id,\n shoppinglist__pantry__owner=request.user).delete()\n return redirect('shoppinglists.views.detail', shoppinglist_id)", "def delete_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('delete_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n sqlsession.delete(item)\n sqlsession.commit()\n return redirect(\"/\")\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item, Category).join(Category)\\\n .filter(Item.id == item_id).first()\n return render_template(\"delete_item.html\", item=item)", "def remove_item_page(request):\n validate(instance=request.body, schema=item_schema_remove)\n body = json.loads(request.body)\n Item.remove_item(body['item_id'])\n return HttpResponse('success')", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()", "def delete(item_id):\n session = current_app.config['db']\n if request.method == \"POST\":\n used = session.query(func.count(WineType.id).label('count'))\\\n .filter_by(abv_id=item_id).scalar()\n item = session.query(WineABV).filter_by(id=item_id).one()\n c_name = item.name\n if used == 0:\n session.delete(item)\n session.commit()\n flash(\"Successfully Deleted '%s'\" % (c_name,), 'success')\n else:\n flash(\"'%s' is still in use and cannot be deleted.\" % (c_name,),\n 'danger')\n return redirect(url_for('.show'))\n\n else:\n item = session.query(WineABV).filter_by(id=item_id).one()\n return render_template(template_prefix+'delete_form.html', item=item)", "def delete_item(id):\n return '', 201", "def delete(request, post, **kwargs):\n user = request.user\n url, msg = delete_post(post=post, user=user)\n messages.info(request, mark_safe(msg))\n db_logger(user=user, text=f\"{msg} ; post.uid={post.uid}.\")\n\n return url", "def delete(device):\n delete_subject(device)\n return redirect_back('index')", "def delete(self, item):\n self._createAction(item, \"delete\")", "def delete(request, todo_id):\n\n todo = get_object_or_404(Todo, pk=todo_id)\n todo.delete()\n\n return redirect('index')", "def delete_item(item_name):\n\n item = Item.query.filter_by(name=item_name).first_or_404()\n if item.owner != current_user:\n flash(\"Failed to delete item %s since you are not the owner.\" %\n item.name)\n return redirect(url_for('.index'))\n\n form = DeleteForm()\n if form.validate_on_submit():\n try:\n db.session.delete(item)\n db.session.commit()\n except:\n flash((\"Failed to delete item \\\"%s\\\".\") % item.name)\n else:\n flash(\"Item \\\"%s\\\" has been deleted.\" % item.name)\n finally:\n return redirect(url_for('.index'))\n return render_template('delete.html', form=form, name=item_name)", "def deleteItem(sport_id, item_id):\n\n sport = session.query(Sport).filter_by(id=sport_id).one()\n deletedItem = session.query(Item).filter_by(id=item_id).one()\n if request.method == 'POST':\n session.delete(deletedItem)\n session.commit()\n return redirect(url_for('showSports'))\n else:\n return render_template('deleteitem.html', sport_id=sport_id,\n item_id=item_id, sport=sport, item=deletedItem)", "def deleteItem(category_item_id):\n itemToDelete = db.findItem(id=category_item_id)\n if itemToDelete.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.deleteItem(itemToDelete)\n flash('%s Successfully Deleted' % itemToDelete.title, 'success')\n return redirect(url_for('showCatalog'))\n return render_template('delete_item.html', item=itemToDelete)", "def delete_item(item_name):\n item = Item.query.filter_by(name=item_name).first()\n\n # If the URL contains a bad item name, send a 404\n if not item:\n abort(404)\n\n # If the current user is not authorized to delete the item because\n # the item was created by a different user, send a 403\n elif current_user != item.user:\n abort(403)\n\n form = DeleteItemForm()\n\n # If the form is submitted, delete the item from the database,\n # send a flash message, and redirect home\n if form.validate_on_submit():\n db.session.delete(item)\n db.session.commit()\n flash(f'\"{item.name}\" has been deleted.', 'good')\n return redirect(url_for('main.home'))\n\n return render_template('delete_item.html', item=item, form=form)", "def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def delete_item(item_id):\n\n item = Item.query.filter(\n Item.id == item_id,\n Item.user_id == current_user.id\n ).first()\n\n if not item:\n flash(\"Couldn't find the item\", category='warning')\n return redirect(request.referrer)\n\n item_name = item.name\n db.session.delete(item)\n db.session.commit()\n flash(\n \"Successfully deleted item '{}'\".format(item_name),\n \"success\")\n\n return redirect(url_for('url.index'))", "def deleteItem(request, itemid):\n try:\n item = ItemSerializer(Item.objects.get(id=itemid))\n Item.objects.get(id=itemid).delete()\n return Response(item.data)\n\n except Item.DoesNotExist:\n fail = {\n \"item\":\"item does not exist\"\n }\n return JsonResponse(fail)", "def delete_item(request, product_id):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, you are not permitted to do that.')\n return redirect(reverse('home'))\n\n product = get_object_or_404(Product, pk=product_id)\n product.delete()\n messages.success(request, 'You have deleted the item!')\n return redirect(reverse('items'))", "def deleteItem(category_id, item_id):\n category = session.query(Category).filter_by(id=category_id).first()\n item = session.query(Item).filter_by(id=item_id).first()\n if item.user_id != login_session['user_id']:\n flash(\"You are authorised to delete items created by you!\")\n return redirect(url_for(\"showCatalog\"))\n if request.method == \"POST\":\n session.delete(item)\n session.commit()\n flash('%s Item Successfully Deleted' % (item.name))\n return redirect(url_for('showItem', category_id=item.category_id))\n else:\n return render_template(\"deleteitem.html\", item=item,\n category=category)", "def delete(request, shoppinglist_id):\n Shoppinglist.objects.filter(pk=shoppinglist_id,\n pantry__owner=request.user).delete()\n return redirect('blackem.users.views.home')", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n user = request.user\n success_url = reverse_lazy('muxic:user', kwargs={'username': user.username})\n self.object.delete()\n return HttpResponseRedirect(success_url)", "def item_delete(request):\n if request.method == 'POST':\n item_to_delete = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_delete.active = False\n item_to_delete.save()\n return HttpResponse(status=200)", "def clerk_delete_appointment():\n if request.method == 'POST':\n appointment_id = request.form['appointment_id']\n\n response_clerk_delete_appointment = requests.post(server_url + 'medical_clerk/delete_appointment', json={\n 'appointment_id': appointment_id\n })\n response_clerk_delete_appointment = response_clerk_delete_appointment.json()\n\n if response_clerk_delete_appointment.get('Status') == \"SUCCESS\":\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return \"An error occurred deleting the appointment\"", "def post(self):\n user = users.get_current_user()\n if user:\n user_obj = utils.get_user(user)\n args = self.request.arguments()\n feeds = [x for x in args if x != 'delete' and x != 'modify']\n delete = self.request.get('delete')\n if delete:\n utils.delete_feeds(feeds, user_obj)\n self.redirect('/')", "def delete(self, request, *args, **kwargs):\n\t\ttask_object = self.get_object()\n\t\tsuccess_url = self.get_success_url()\n\t\ttask_object.is_deleted =1\n\t\ttask_object.save()\n\t\treturn HttpResponseRedirect(success_url)", "def post(self, request, *args, **kwargs) -> Union[\n HttpResponsePermanentRedirect,\n HttpResponseRedirect,\n ]:\n try:\n response = self.delete(request, *args, **kwargs)\n messages.success(self.request, self.success_message)\n return response\n except ProtectedError:\n messages.error(self.request, _('CannotDeleteLabel'))\n return redirect('labels')", "def delete_item_details(item_id):\n item = is_user_the_creator(item_id)\n item_name = item.Item.name\n if request.method == 'GET':\n return render_template('item_delete_confirm.html', item_name=item_name, item_id=item_id,\n login_session=login_session,\n csrf_token=generate_csrf_token())\n else:\n session.delete(item.Item)\n session.commit()\n flash(item_name + \" deleted\")\n return redirect(url_for('show_homepage'))", "def delete(id):\r\n\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n query = \"UPDATE product SET quantity = 0 WHERE product_id = ? AND for_business = ?\"\r\n db.execute(query, (id, b_id,))\r\n db.commit()\r\n return redirect(url_for(\"main.products\"))", "def post(self):\n post_id = int(self.request.get('post_id'))\n post = Posts.get_by_id(post_id)\n if post.submitter_id == self.get_active_user().key().id():\n post.delete()\n else:\n self.error(403)\n self.redirect('/')", "def deleteListItem(category_id, item_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n category = session.query(Category).filter_by(id=category_id).one()\n itemToDelete = session.query(ListItem).filter_by(id=item_id).one()\n\n if category.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n category.name)\n return redirect(url_for('showItems', category_id=category.id))\n\n if request.method == 'POST':\n if \"btn_delete\" in request.form:\n session.delete(itemToDelete)\n session.commit()\n flash('Catalog Item Successfully Deleted')\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return redirect(url_for('showItems', category_id=category_id))\n else:\n return render_template('deleteitem.html',\n item=itemToDelete,\n user=getUserInfo(login_session['user_id']))", "def delete(request):\n # user_name == user_id\n required_fields = ['user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here remove the user's account from the database\n if not db.remove_user(data['user_id']):\n return Response({'error': str('Error when removing the user account!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def delete_item(category, item):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Query database with SQLAlchemy and store queries as objects\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n item = (session.query(Items)\n .filter_by(name=item\n .replace('-', ' '), category_id=category.id)\n .one())\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of creator\n creator_db_id = item.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Item creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Item to delete is \"{}\".'.format(item.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n session.delete(item)\n session.commit()\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Render webpage\n return render_template('delete_item.html',\n item=item,\n login_status=login_status)", "def _delete(self, pk, user=None):\n request = self.factory.delete(self.detail_url(pk), format='json')\n force_authenticate(request, user)\n resp = self.detail_view(request, pk=pk)\n resp.render()\n return resp", "def delete(self):\r\n return http.Request('DELETE', '{0}'.format(\r\n self.get_url())), parsers.parse_json", "def delete_post(user_id, post_id):\n\n post = Post.query.get_or_404(post_id)\n\n db.session.delete(post)\n db.session.commit()\n\n return redirect(f'/users/{user_id}')", "def delete_post(post_id):\n\n posts = models.Post.select().where(models.Post.id == post_id)\n if posts.count() == 0:\n abort(404)\n models.Post.get(models.Post.id == post_id).delete_instance()\n return redirect(url_for('index'))", "def delete(self, path):\n params = request.args.to_dict()\n if params.get(\"instances\"):\n int_list = params.get(\"instances\")\n return items_delete_response(path, int_list)\n abort(405)", "def delete(request):\n if request.method == \"POST\":\n Books.objects.get(isbn=request.POST['delete_book']).delete()\n return redirect('libros:home')", "def deleteItem(category_id, item_id):\n editedItem = session.query(Item).filter_by(id=item_id).one()\n if editedItem.user_id != login_session['user_id']:\n flash('You are not authorized to delete this Item.\\\n Please create own Category with items in order to \\\n delete items.')\n return redirect(url_for('showallCategories'))\n if request.method == 'POST':\n session.delete(editedItem)\n session.commit()\n flash('Item Deletion successfull')\n return redirect(url_for('showCategory', category_id=category_id))\n else:\n return render_template(\n 'deleteItem.html', category_id=category_id,\n item_id=item_id, item=editedItem)", "def delete_view(self, request, object_id):\r\n obj = self.get_object(unquote(object_id))\r\n obj.delete()\r\n return HttpResponse(\"Deleted\")", "def del_post(request, post_id):\n if not request.user.is_authenticated():\n return redirect('/login/?next=%s' % request.path)\n else:\n Post.objects.filter(id = post_id).delete()\n return redirect('/home/')", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete_item(item_id):\n if request.args.get('state') != login_session['state']:\n response = make_response(\n json.dumps({'error': 'Invalid state parameter.'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n if 'username' not in login_session:\n response = make_response(\n json.dumps({'error': 'User is logged out. This should not happen'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n try:\n if request.method == 'DELETE':\n item = session.query(Item).filter_by(id=item_id).one()\n session.delete(item)\n session.commit()\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': '', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def delete(id):\n r = requests.delete(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.no_content:\n return r.text, r.status_code\n return redirect(url_for('index'), code=278)", "def delete(item_number):\n qry = db_session.query(Price).filter(\n Price.item_number == item_number)\n price = qry.first()\n\n if price:\n form = PriceForm(formdata=request.form, obj=price)\n if request.method == 'POST' and form.validate():\n # delete the item from the database\n db_session.delete(price)\n db_session.commit()\n\n flash('Price deleted successfully!')\n return redirect('/')\n return render_template('delete_album.html', form=form)\n else:\n return 'Error deleting #{item_number}'.format(item_number=item_number)", "def deleteItem(category_id, item_id):\r\n if 'username' not in login_session:\r\n return redirect(url_for('showLogin'))\r\n session = DBSession()\r\n deletedItem = session.query(Item).filter_by(id=item_id).one()\r\n if deletedItem.user_id != login_session['user_id']:\r\n return \"<script>function myFunction() {alert('You are not authorized to delete this item.');}</script><body onload='myFunction()''>\"\r\n if request.method == 'POST':\r\n session.delete(deletedItem)\r\n session.commit()\r\n return redirect(url_for('showCategoryItems', category_id=category_id))\r\n else:\r\n return render_template('deleteitem.html', item=deletedItem)", "def delete_post_process(post_id):\n\n db_post = Post.query.get_or_404(post_id)\n user_id = db_post.user_id\n\n db.session.delete(db_post)\n db.session.commit()\n\n return redirect(f\"/users/{user_id}\")", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "def deleteIngredient():\n \n if request.method == \"POST\":\n \n id = request.form.get('id')\n\n con = db_connect()\n cur = con.cursor()\n cur.execute(\"DELETE FROM ingredients WHERE id = :id \", {'id': id})\n con.commit()\n con.close()\n\n flash('Ingredient deleted')\n return redirect(url_for('ingredients.showIngredients'))", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete_record(uuid):\n\n collection[uuid].delete()\n return redirect('/')", "def delete_patient_appointment():\n if request.method == 'POST':\n appointment_id = request.form['appointment_id']\n response_delete_patient_appointment = requests.post(server_url + 'patient/delete_appointment', json={\n 'appointment_id': appointment_id\n })\n response_delete_patient_appointment = response_delete_patient_appointment.json()\n if response_delete_patient_appointment.get('Status') == 'SUCCESS':\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return \"An error occurred deleting the appointment\"", "def delete(self, url_pattern):\n return self.route(url_pattern, methods=['DELETE'])", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete_item_page(item_name, catagory_name):\n return render_template(\n 'delete_item.html', item_name=item_name, catagory_name=catagory_name\n )", "def delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n db.session.delete(post)\n db.session.commit()\n\n return redirect(f\"/users/{post.user_id}\")", "def delete(self, request, *args, **kwargs):\n # We need to get the object ID before deleting the object as\n # it's needed in success template.\n self.object = self.get_object()\n context = self.get_success_context_data()\n\n # This should delete the object and return a HttpResponseRedirect\n response = super().delete(request, *args, **kwargs)\n\n success_template_name = self.get_success_template_name()\n if request.is_turbo and success_template_name:\n return TemplateResponse(request, success_template_name, context)\n\n return response", "def delete(self, url):\n return self.request(url, \"DELETE\")", "def delete_post(request, post_id):\n post = Post.objects.get(id=post_id)\n\n check_post_owner(request, post)\n post.delete()\n\n return redirect('/posts')", "def delete(self, item, request):\n\n assert (\n isinstance(item, Election)\n or isinstance(item, ElectionCompound)\n or isinstance(item, Vote)\n )\n\n url = request.link(item)\n url = replace_url(url, request.app.principal.official_host)\n for result in self.query().filter_by(url=url):\n self.session.delete(result)\n\n self.session.delete(item)\n self.session.flush()", "def funding_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n funding_reference = get_object_or_404(Funding, id=id,company=company)\n\n #deletes the view and redirects to the page.\n funding_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete(id):\n\tget_post(id)\n\tdb = get_db()\n\tget_post(id)\n\tdb = get_db()\n\tdb.execute('DELETE FROM post WHERE id = ?', (id,))\n\tdb.commit()\n\treturn redirect(url_for('blog.index'))", "def delete(id):\n get_autor(id)\n try:\n db.insert_bd('DELETE FROM autor WHERE id = %d' % id)\n return redirect(url_for('autor.index'))\n except:\n return render_template('404.html')", "def delete_post(post_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n db.session.delete(post)\n db.session.commit()\n\n return redirect(f\"/users/{user.id}\")", "def ng_delete(self, request, *args, **kwargs):\r\n if 'pk' not in request.GET:\r\n raise NgMissingParameterError(\"Object id is required to delete.\")\r\n\r\n obj = self.get_object()\r\n obj.delete()\r\n return self.build_json_response(obj)", "def destroy(self, request, pk=None): #delete a specific object\n return Response({'http_method': 'DELETE'})", "def get(self):\n\t\t\n\t\tself.SessionObj.delete()\n\t\t\n\t\tself.redirect(\"/\")", "def test_post(self):\n self.response = self.client.delete(self.url)\n self.assertEqual(\n status.HTTP_405_METHOD_NOT_ALLOWED, self.response.status_code)", "def delete(self):\r\n self.domain.delete_item(self)", "def delete_post(bid, pid):\n # pylint: disable=unused-argument\n if current_user.is_admin or current_user == Post.query.get(int(pid)).uid:\n Post.query.get(pid).deleted = True\n DB.session.commit()\n return redirect(request.referrer)", "def delete(self, app_prefix, path):\n return self.handle_request('delete', app_prefix, path)", "def delete_product_view(request, id):\n Product.objects.get(id=id).delete()\n messages.success(request, \"Product deleted successfully.\")\n return redirect(\"products\")", "def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.object.delete()\n if request.is_ajax():\n response = JSONResponse(True, {}, response_mimetype(self.request))\n response['Content-Disposition'] = 'inline; filename=files.json'\n return response\n else:\n return HttpResponseRedirect('/upload/new')", "def user_delete(self, request):\n\n try:\n if request.method == \"POST\":\n flash(\"Be careful you are about to delete all of your data\")\n self._student_handler.delete_students(current_user.scheme_id, current_user.k_number)\n return redirect(url_for(\"user.user\"))\n else:\n return render_template(\"user/delete_page.html\")\n\n except Exception as e:\n self._log.exception(\"Could not delete student\")\n return abort(500)", "def delete(self, id):\t\t\n\t\ttry:\n\t\t\tpost_service.delete(id)\n\t\texcept AssertionError as e:\n\t\t\tpost_space.abort(400, e.args[0], status = \"Could not delete post\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tpost_space.abort(500, e.args[0], status = \"Could not delete post\", statusCode = \"500\")", "def community_post_delete_view(request, slug):\n post = CommunityPostModel.objects.get(slug=slug) # Get the post\n\n if request.method == 'POST': # If the form has been submitted...\n post.delete() # Delete the object from the database\n return redirect('community-home') # Redirect to the home page\n\n context = {'post': post} # Pass the variables to the template\n return render(request,\n 'pages/patient-community/community-delete-post.html',\n context) # render the patient community delete page", "def todo_delete(request, todo_id):\n todo = get_object_or_404(TodoItem, pk=todo_id)\n event_ident = todo.event.get_ident()\n todo.delete()\n\n messages.success(request, 'TODO was deleted successfully.',\n extra_tags='todos')\n\n return redirect(event_details, event_ident)", "def deleteItem(category_id, item_id):\r\n # authentication\r\n if 'username' not in login_session:\r\n flash('Please login to add item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # validation\r\n category = session.query(Category).filter_by(id=category_id).first()\r\n if not category:\r\n flash('Attempted operation on non-existent category')\r\n return redirect(url_for('showCategories'))\r\n\r\n deletedItem = session.query(Item).\\\r\n filter_by(id=item_id, category_id=category_id).first()\r\n if not deletedItem:\r\n flash('Attempt to delete non-existent item')\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n # authorization\r\n if login_session['user_id'] != deletedItem.user_id:\r\n flash('Sorry, you are not authorized to delete the item \\'{}\\''\r\n .format(deletedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n\r\n if request.method == 'POST':\r\n # delete operation\r\n session.delete(deletedItem)\r\n session.commit()\r\n flash('Deleted Item \\'{}\\' Successfully'.format(deletedItem.name))\r\n return redirect(url_for('showItems', category_id=category_id))\r\n else:\r\n # serve GET requests with confirmation form\r\n return render_template(\"deleteItem.html\",\r\n category=deletedItem.category, item=deletedItem)", "def delete(request, message_id):\n return HttpResponse(\"error\")", "def task_delete(request, tasklist_id):\n tasklist = get_object_or_404(Todo, pk=tasklist_id)\n tasklist.delete()\n print(tasklist)\n messages.success(request, \"Successfully deleted\")\n return redirect('lists:alllist')", "def post_delete():\n req_data = request.get_json()\n print('This is the request itself \\n', req_data)\n print(req_data['name'])\n flask_wms.delete_entry(req_data['name'])\n return 'Request recieved, delete method'", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def post(self, request, aiid, *args, **kwargs):\n form = ProxyDeleteAIForm(request.POST)\n\n if form.is_valid():\n status = form.save(\n token=self.request.session.get('token', False)\n )\n\n message = status['status']['info']\n\n if status['status']['code'] in [200, 201]:\n level = messages.SUCCESS\n else:\n level = messages.ERROR\n else:\n level = messages.ERROR\n message = 'Something went wrong'\n\n messages.add_message(self.request, level, message)\n return redirect('studio:summary')", "def delete_post(post_id):\n mongo.db.blog.delete_one({'_id': ObjectId(post_id)})\n return redirect(url_for('admin.management'))", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)" ]
[ "0.7321518", "0.7296745", "0.7259978", "0.7202219", "0.70292133", "0.70238733", "0.70196456", "0.6984477", "0.69794494", "0.6974444", "0.6911067", "0.68968356", "0.6894365", "0.68761015", "0.6870689", "0.6862545", "0.6816911", "0.6768796", "0.6750122", "0.674466", "0.6742463", "0.6737714", "0.67345595", "0.673014", "0.6707191", "0.66929615", "0.66919005", "0.66614676", "0.66572005", "0.66311973", "0.6628432", "0.6619518", "0.66099894", "0.65693265", "0.6558324", "0.65573084", "0.65565336", "0.6546704", "0.65446734", "0.65288293", "0.6528369", "0.65205884", "0.65179855", "0.65164584", "0.65112805", "0.6508358", "0.650348", "0.64950114", "0.64933664", "0.6488106", "0.6479021", "0.64790064", "0.64759445", "0.64727014", "0.64527833", "0.64496875", "0.6436936", "0.64318216", "0.64206666", "0.6419378", "0.6391595", "0.6383604", "0.63708174", "0.6367812", "0.6354243", "0.6351055", "0.6346227", "0.6344292", "0.6344292", "0.6341705", "0.6338648", "0.6328949", "0.63287616", "0.63280725", "0.6319955", "0.63191414", "0.6289798", "0.6287845", "0.62853", "0.6284442", "0.6282402", "0.62779754", "0.6272873", "0.6259535", "0.62560976", "0.62513644", "0.62368065", "0.6230263", "0.6222639", "0.6211585", "0.6211226", "0.6210252", "0.62039727", "0.6202692", "0.6200363", "0.61847967", "0.6181909", "0.6173032", "0.61704975", "0.6170233" ]
0.680767
17
Return dict containing form validation errors for create / update item.
def form_errors(form): errors = {} max_name_length = Item.name.property.columns[0].type.length if not form.get('name', None): errors['name'] = 'Please enter a name.' elif len(form['name']) > max_name_length: errors['name'] = ( 'Name must be less than %s characters.' % max_name_length ) if not Catagory.exists(form.get('catagory', None)): errors['catagory'] = 'Not a valid catagory.' if not form.get('description', None): errors['description'] = 'Please enter a description.' return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_form_error(self):\n errors = {}\n if self._form_error:\n errors[\"base\"] = self._form_error\n self._form_error = None\n return errors", "def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors", "def render_errors(form):\n return {\n \"form\": form\n }", "def get_validation_errors(self):\n return [err.to_dict() for err in self._schema.validator.validation_errors]", "def form_invalid_add_global_errormessages(self, form):\n if self.get_selected_items_form_attribute() in form.errors:\n errormessages = form.errors[self.get_selected_items_form_attribute()]\n for errormessage in errormessages:\n messages.error(self.request, errormessage)", "def _generate_for_errors_object_when_updating(user_request):\n err_dict = {}\n for field in ['location', 'destination']:\n if field in user_request:\n err_dict[field] = \\\n [serialization_errors['cannot_update_flight_field_with_bookings'].format(field)]\n\n return err_dict", "def describe_invalid_form(form):\n return dict((i.name, i.note) for i in form.inputs if i.note is not None)", "def _validate_error(cls, item):\n if item.error and item.status_code not in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error: %s for job is not empty but '\n 'job status is %s' % (item.id, item.error, item.status_code))\n\n if not item.error and item.status_code in [\n job_models.STATUS_CODE_FAILED, job_models.STATUS_CODE_CANCELED]:\n cls._add_error(\n base_model_validators.ERROR_CATEGORY_ERROR_CHECK,\n 'Entity id %s: error for job is empty but '\n 'job status is %s' % (item.id, item.status_code))", "def get_field_errors(self, field):\r\n identifier = format_html('{0}.{1}', self.form_name, field.name)\r\n return self.error_class([SafeTuple((identifier, '$pristine', '$pristine', 'invalid', e))\r\n for e in self.errors.get(field.name, [])])", "def field_errors(bound_field):\n seen = []\n errors = {}\n if hasattr(bound_field.field, \"fields\"):\n for idx, subfield in enumerate(bound_field.field.fields):\n key = \"%s_%d\" % (bound_field.auto_id, idx)\n subfield_errors = getattr(subfield.widget, \"errors\", [])\n errors[key] = subfield_errors\n seen.extend(subfield_errors)\n for error in bound_field.errors:\n if error not in seen:\n errors.setdefault(bound_field.auto_id, [])\n errors[bound_field.auto_id].append(error)\n return errors.items()", "def validate():\n if request.method != 'POST':\n abort(400)\n\n is_update = True if request.args.get('is_update') == 'True' else False\n data = request.json or MultiDict({})\n formdata = MultiDict(data or {})\n form = AuthorUpdateForm(formdata=formdata, is_update=is_update)\n form.validate()\n\n result = {}\n changed_msgs = dict(\n (name, messages) for name, messages in form.messages.items()\n if name in formdata.keys()\n )\n result['messages'] = changed_msgs\n\n return jsonify(result)", "def form_invalid(self, form, request):\n if request.is_ajax():\n errors_dict = {}\n if form.errors:\n for error in form.errors:\n e = form.errors[error]\n errors_dict[error] = unicode(e)\n return HttpResponseBadRequest(json.dumps(errors_dict))\n else:\n return self.render_to_response(self.get_context_data(form=form))", "def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache", "def failure(self, validation_failure):\n \n self.request.response.status_int = 400\n return validation_failure.error.asdict()", "def errors(self):\n\n dict = {\"Stellar Mass Error\":[self.st_masserr1,self.st_masserr2],\n \"Stellar Radius Error\":[self.st_raderr1,self.st_raderr2]}\n\n return dict", "def get_field_errors(self, bound_field):\r\n errors = super(NgFormValidationMixin, self).get_field_errors(bound_field)\r\n identifier = format_html('{0}.{1}', self.form_name, self.add_prefix(bound_field.name))\r\n errors_function = '{0}_angular_errors'.format(bound_field.field.__class__.__name__)\r\n try:\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, errors_function)\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n except (TypeError, AttributeError):\r\n errors_function = getattr(VALIDATION_MAPPING_MODULE, 'Default_angular_errors')\r\n potential_errors = types.MethodType(errors_function, bound_field.field)()\r\n errors.append(SafeTuple((identifier, '$dirty', '$valid', 'valid', ''))) # for valid fields\r\n errors.extend([SafeTuple((identifier, '$dirty', pe[0], 'invalid', force_text(pe[1])))\r\n for pe in potential_errors])\r\n return errors", "def validation_errors(self):\n return self._validation_errors", "def validate(self):\n\n form = CallEventForm(self.data)\n if not form.is_valid():\n self.errors = form.errors\n map_dict_fields(self.errors, const.DB_FIELDS, const.API_FIELDS)", "def v_err(flaw):\n error_messages = {\n 'no_season': _(\n \"Season must contain at least 4 alphanumeric characters.\"\n ),\n 'no_items': _(\n \"Menu must contain at least 1 item.\"\n ),\n 'no_name': _(\n \"Name field must contain at least 4 alphanumeric characters.\"\n ),\n 'no_desc': _(\n \"Description must contain at least 10 characters.\"\n ),\n 'no_chef': _(\n \"Item must belong to a chef.\"\n ),\n 'no_ing': _(\n \"Item must contain at least 1 ingredient.\"\n ),\n 'elapsed': _(\n \"This date has elapsed.\"\n )\n }\n raise forms.ValidationError(\n error_messages[flaw],\n code=flaw,\n )", "def _post_clean(self):\r\n super(NgModelFormMixin, self)._post_clean()\r\n if self._errors and self.prefix:\r\n self._errors = ErrorDict((self.add_prefix(name), value) for name, value in self._errors.items())", "def request_validation_error(error):\n message = str(error)\n app.logger.error(message)\n return {\n 'status_code': status.HTTP_400_BAD_REQUEST,\n 'error': 'Bad Request',\n 'message': message\n }, status.HTTP_400_BAD_REQUEST", "def validate(self):\n errors = {}\n for typ, items in self._items.iteritems():\n for name, spec in items.iteritems():\n assert hasattr(spec, 'validate'), 'Does %s:%s descend from FrodoBase?' % (name, spec)\n spec_errors = spec.validate()\n if spec_errors:\n errors[name] = spec_errors\n return errors\n\n # sys.modules[__name__] = Configuration()", "def format_error(invalid, doc_type):\n # using string for checking is probably not ideal,\n # but voluptuous does not have specific sub error\n # types for these errors\n if invalid.error_message == 'extra keys not allowed':\n msg = \"Key '{}' is not allowed\".format(invalid.path[0])\n elif invalid.error_message == 'required key not provided':\n msg = \"{} '{}' is missing\".format(doc_type, invalid.path[0])\n else:\n msg = invalid.message\n return {'message': msg, 'field': str(invalid.path[0])}", "def test_form_errors(self):\n form = self.response.context.get('form')\n self.assertTrue(form.errors)", "def handle_validation_error(self, error, bundle_errors):\n \n error_str = six.text_type(error)\n error_msg = self.help.format(error_msg=error_str) if self.help else error_str\n msg = {self.name: error_msg}\n\n if bundle_errors:\n return error, msg\n flask_restful.abort(400, message=msg)", "def form_invalid(self, form, request):\n return", "def form_invalid(self, form, request):\n return", "def get_form_errors(form):\n all_errors = []\n for field in form.errors:\n all_errors += form.errors[field]\n return all_errors", "def security_errors(self):\n errors = ErrorDict()\n for f in [\"honeypot\", \"timestamp\", \"security_hash\"]:\n if f in self.errors:\n errors[f] = self.errors[f]\n return errors", "def validate_form(form, collection):\r\n\r\n # variable initialization\r\n max_title = 50\r\n max_ingredients = 500\r\n max_method = 1500\r\n max_recipe_URL = 250\r\n max_servings = 100\r\n max_category_name = 50\r\n max_category_URL = 250\r\n max_review = 250\r\n error_list = []\r\n\r\n # validates recipe form\r\n if collection == 'recipe':\r\n if not form['title'] or len(form['title']) > max_title:\r\n error_list.append(\r\n 'Title must not be empty or more than {} characters!'\r\n .format(max_title)\r\n )\r\n\r\n ingredient = form['ingredients']\r\n if not ingredient or len(ingredient) > max_ingredients:\r\n error_list.append(\r\n 'Ingredients must not be empty or more than {} characters!'\r\n .format(max_ingredients)\r\n )\r\n\r\n if not form['method'] or len(form['method']) > max_method:\r\n error_list.append(\r\n 'Method must not be empty or more than {} characters!'\r\n .format(max_method)\r\n )\r\n\r\n if 'appliance_categories' not in form:\r\n error_list.append(\r\n 'At least one of the appliances should be checked!'\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_recipe_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!!'\r\n .format(max_recipe_URL)\r\n )\r\n\r\n try:\r\n if not form['servings'] or int(form['servings']) > max_servings:\r\n error_list.append(\r\n 'Servings must not be empty or more than {}!'\r\n .format(max_servings)\r\n )\r\n\r\n except ValueError:\r\n error_list.append('Servings is not a number!')\r\n\r\n # validates recipe category form\r\n elif collection == 'recipe_category':\r\n if not form['name'] or len(form['name']) > max_category_name:\r\n error_list.append(\r\n 'Category name must not be empty or more than {} characters!'\r\n .format(max_category_name)\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_category_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!'\r\n .format(max_category_URL)\r\n )\r\n\r\n # validates review form\r\n elif collection == 'review':\r\n if not form['review'] or len(form['review']) > max_review:\r\n error_list.append(\r\n 'Review must not be empty or more than {} characters!'\r\n .format(max_review)\r\n )\r\n\r\n # returns errors on an empty list\r\n return error_list", "def validate_fields(row,fila,new_values):\n # \"Definiciones iniciales del diccionario de errores\n # y status de error.\n dict_error={}\n error_count = False\n\n \"\"\"Validaciones de cada campo segun el modelo Job.\"\"\"\n #Validate Job.id\n if row[0] == None:\n new_values['id'] =row[0]\n # dict_error['id'] = ''\n else:\n try:\n if isinstance(int(row[0]),int):\n new_values['id'] =row[0]\n # dict_error['id'] = ''\n except ValueError:\n error_count=True\n dict_error['id'] = ValueError('Error en id')\n\n #Validate Job.company_ruc\n if len(row[1])==11:\n new_values['company_ruc'] =row[1]\n # dict_error['company_ruc'] = ''\n else:\n error_count=True\n dict_error['company_ruc'] = ValidationError('Error en company_ruc')\n\n #Validate Job.company_name\n if len(row[2])<=100 and row[2] != None:\n new_values['company_name'] =row[2]\n # dict_error['company_name'] = ''\n else:\n error_count=True\n dict_error['company_name'] = ValidationError('Error en company_name')\n\n #Validate Job.title\n if len(row[3])<=100 and row[3] != None:\n new_values['title'] =row[3]\n # dict_error['title'] = ''\n else:\n error_count=True\n dict_error['title'] = ValidationError('Error en title')\n\n #Validate Job.description\n if isinstance(row[4],str) and row[4] != None:\n new_values['description'] =row[4]\n # dict_error['description'] = ''\n else:\n error_count=True\n dict_error['description'] = ValidationError('Error en description')\n\n #Validate Job.requeriments\n if isinstance(row[5],str) and row[5] != None:\n new_values['requeriments'] =row[5]\n # dict_error['requeriments'] = ''\n else:\n error_count=True\n dict_error['requeriments'] = ValidationError('Error en requeriments')\n\n #Validate Job.contact_email\n if row[6] != None:\n new_values['contact_email'] =row[6]\n # dict_error['contact_email'] = ''\n else:\n error_count=True\n dict_error['contact_email'] = ValidationError('Error en contact email')\n\n #Validate Job.location\n if len(row[7])<=50 and row[7] != None:\n new_values['location'] =row[7]\n # dict_error['location'] = ''\n else:\n error_count=True\n dict_error['location'] = ValidationError('Error en Location')\n\n #Validate Job.is_active\n if row[8] == None:\n new_values['is_active'] =row[8]\n # dict_error['is_active'] = ''\n else:\n try:\n if int(row[8])==1 or int(row[8])==0:\n new_values['is_active'] =row[8]\n # dict_error['is_active'] = ''\n except Exception as e:\n error_count=True\n dict_error['is_active'] = ValidationError('Error en el is_active')\n\n #Validate Job.is_verified\n if row[9] == None:\n new_values['is_verified'] =row[9]\n # dict_error['is_verified'] = ''\n else:\n try:\n if int(row[9])==1 or int(row[9])==0:\n new_values['is_verified'] =row[9]\n # dict_error['is_verified'] = ''\n except Exception as e:\n error_count=True\n dict_error['is_verified'] = ValidationError('Error en el is_verified')\n\n #Validate Job.is_public\n if row[10] == None:\n new_values['is_public'] =row[10]\n # dict_error['is_public'] = ''\n else:\n try:\n if int(row[10])==1 or int(row[10])==0:\n new_values['is_public'] =row[10]\n # dict_error['is_public'] = ''\n except Exception as e:\n error_count=True\n dict_error['is_public'] = ValidationError('Error en el is_public')\n\n #Validate Job.show_recruiter\n if row[11] == None:\n new_values['show_recruiter'] =row[11]\n # dict_error['show_recruiter'] = ''\n else:\n try:\n if int(row[11])==1 or int(row[11])==0:\n new_values['show_recruiter'] =row[11]\n # dict_error['show_recruiter'] = ''\n except Exception as e:\n error_count=True\n dict_error['show_recruiter'] = ValidationError('Error en el show_recruiter')\n\n #Validate Job.website_url\n if row[12] == None:\n new_values['website_url'] =row[12]\n # dict_error['website_url'] = ''\n else:\n if len(row[12])>4:\n new_values['website_url'] =row[12]\n # dict_error['website_url'] = ''\n else:\n error_count=True\n dict_error['website_url'] = ValidationError('Error en website_url')\n\n #Validate Job.benefits\n if isinstance(row[13],str) or row[13] == None:\n new_values['benefits'] =row[13]\n # dict_error['benefits'] = ''\n else:\n error_count=True\n dict_error['benefits'] = ValidationError('Error en benefits')\n\n #Validate Job.urgency\n if isinstance(row[14],str) or row[14] == None:\n new_values['urgency'] =row[14]\n # dict_error['urgency'] = ''\n else:\n error_count=True\n dict_error['urgency'] = ValidationError('Error en urgency')\n\n #Validate Job.schedule\n if isinstance(row[15],str) or row[15] == None:\n new_values['work_schedule'] =row[15]\n # dict_error['work_schedule'] = ''\n else:\n error_count=True\n dict_error['work_schedule'] = ValidationError('Error en work_schedule')\n\n #Validate Job.comment\n if isinstance(row[16],str) or row[16] == None:\n new_values['comment'] =row[16]\n # dict_error['comment'] = ''\n else:\n error_count=True\n dict_error['comment'] = ValidationError('Error en comment')\n\n #Validate Job.min_salary\n if isinstance(int(row[17]),int) or row[17] == None:\n new_values['min_salary'] =row[17]\n # dict_error['min_salary'] = ''\n else:\n error_count=True\n dict_error['min_salary'] = ValidationError('Error en min_salary')\n\n #Validate Job.max_salary\n if isinstance(int(row[18]),int) or row[18] == None:\n new_values['max_salary'] =row[18]\n # dict_error['max_salary'] = ''\n else:\n error_count=True\n dict_error['max_salary'] = ValidationError('Error en max_salary')\n\n #Validate Job.pay_range_period\n if row[19]=='annual' or row[19]=='monthly' or row[19] == None:\n new_values['pay_range_period'] =row[19]\n # dict_error['pay_range_period'] = ''\n else:\n error_count=True\n dict_error['pay_range_period'] = ValidationError('Error en pay_range_period')\n\n return {\"errors\": dict_error,\"fila\":fila,\"new_values\":new_values,\"error_status\":error_count}", "def validate(self, data):\n if data.has_key('site'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, site=data['site']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n elif data.has_key('project'):\n if FieldSightXF.objects.filter(\n xf__id=data['xf'], is_staged=False, is_scheduled=True, project=data['project']).exists():\n raise serializers.ValidationError(\"Form Already Exists, Duplicate Forms Not Allowded\")\n return data", "def validate(self, value):\n errors = {}\n if self.field:\n if hasattr(value, \"items\"):\n sequence = value.items()\n else:\n sequence = enumerate(value)\n for k, v in sequence:\n try:\n self.field._validate(v)\n except ValidationError as error:\n errors[k] = error.errors or error\n except (ValueError, AssertionError) as error:\n errors[k] = error\n\n if errors:\n field_class = self.field.__class__.__name__\n self.error(f\"Invalid {field_class} item ({value})\", errors=errors)\n # Don't allow empty values if required\n if self.required and not value:\n self.error(\"Field is required and cannot be empty\")", "def form_invalid(self, form):\n response = super().form_invalid(form)\n if self.is_ajax():\n return JsonResponse(form.errors, status=400)\n else:\n return response", "def invalid_item(upload_items: List[JSONDict]) -> JSONDict:\n altered = upload_items[0]\n altered[\"language\"] = \"engl\"\n altered[\"date\"] = \"02-2031-01\"\n altered[\"url\"] = \"incorrect.com\"\n return altered", "def form_invalid(self, form):\n return self.render_to_response(\n self.get_context_data(result=form.errors.as_text()))", "def _validation_errors(self):\n feed_dict = dict()\n feed_dict[self.model.get_layer('input')] = self.x_validate\n for id_ in self.task_ids.keys():\n feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_validate[id_]\n errors = {}\n for task_id, loss_type in self.task_ids.iteritems():\n if loss_type is LossTypes.mse:\n errors[task_id] = np.sqrt(self.model.get_layer(task_id + '-loss')\n .eval(session=self.sess, feed_dict=feed_dict))\n elif loss_type is LossTypes.cross_entropy:\n predictions = tf.argmax(self.model.get_layer(task_id + '-prediction'), 1)\n targets = tf.argmax(self.model.get_layer(task_id + '-ground-truth'), 1)\n correct_predictions = tf.equal(predictions, targets)\n accuracy_tensor = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n accuracy = accuracy_tensor.eval(session=self.sess, feed_dict=feed_dict)\n errors[task_id] = 1. - accuracy\n return errors", "def clean(self):\n\n\t\tfor form in self.forms:\n\t\t\tstatus = form.cleaned_data.get('status')\n\t\t\tif not status:\n\t\t\t\traise ValidationError('Keinen Status', 'error')", "def form_invalid(self, form):\n response = super().form_invalid(form)\n if self.request.is_turbo:\n response.status_code = 422 # Unprocessable Entity\n return response", "def clean(self):\n start_date = self.cleaned_data.get('start_date', None)\n end_date = self.cleaned_data.get('end_date', None)\n\n if start_date is None:\n self._errors['start_date'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('Please choose a start date')\n\n if end_date is None:\n self._errors['end_date'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('Please choose an end date')\n\n if start_date > end_date:\n self._errors['end_date'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('The end date must be AFTER the start date')\n\n # Limit to 10 days\n days_limit = 7\n time_diff = end_date - start_date\n \n if time_diff.days > days_limit:\n self._errors['end_date'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('You may only block off %s days at a time' % days_limit)\n \n \n \n start_time = datetime.combine(start_date, time.min)\n end_time = datetime.combine(end_date, time.max)\n\n conflict_checker = ConflictChecker()\n if conflict_checker.does_timeslot_conflict(TimeSlot(start_time, end_time)):\n self._errors['end_date'] = self.error_class(['Please choose a different date.'])\n raise forms.ValidationError('Sorry! The start/end dates conflict with another calendar event! Please choose other dates.')\n\n return self.cleaned_data", "def validations(self):\n return self.container['validations']", "def validate_update(cls, document: dict) -> dict:\n if document is None:\n return {\"\": [\"No data provided.\"]}\n\n if not isinstance(document, dict):\n return {\"\": [\"Must be a dictionary.\"]}\n\n new_document = copy.deepcopy(document)\n\n errors = {}\n\n updated_field_names = [\n field.name for field in cls.__fields__ if field.name in new_document\n ]\n unknown_fields = [\n field_name\n for field_name in new_document\n if field_name not in updated_field_names\n ]\n for unknown_field in unknown_fields:\n known_field, field_value = cls._to_known_field(\n unknown_field, new_document[unknown_field]\n )\n if known_field:\n new_document.setdefault(known_field.name, {}).update(field_value)\n elif not cls._skip_unknown_fields:\n errors.update({unknown_field: [\"Unknown field\"]})\n\n # Also ensure that primary keys will contain a valid value\n updated_fields = [\n field\n for field in cls.__fields__\n if field.name in new_document or field.is_primary_key\n ]\n for field in updated_fields:\n errors.update(field.validate_update(new_document))\n\n return errors", "def form_invalid(self, form):\n response = super(AjaxableResponseMixin, self).form_invalid(form)\n\n if self.is_ajax():\n return JsonResponse(form.errors, status=422)\n else:\n return response", "def form_invalid(self, form, prefix=None):\n response = super(FormAjaxMixin, self).form_invalid(form)\n if self.request.is_ajax():\n data = {\n \"errors_list\": self.add_prefix(form.errors, prefix),\n }\n return self.json_to_response(status=400, json_data=data,\n json_status=AjaxResponseStatus.ERROR)\n return response", "def validate(self):\n for name, field in self:\n try:\n field.validate_for_object(self)\n except ValidationError as error:\n raise ValidationError(\n \"Error for field '{name}'.\".format(name=name),\n error,\n )", "def form_invalid(self, form, formsets):\n return self.render_to_response(\n self.get_context_data(form=form, formsets=formsets)\n )", "def clean(self):\r\n cleaned_data = super(RPEventCreateForm, self).clean()\r\n self.check_risk()\r\n self.check_costs()\r\n self.check_location_or_plotroom()\r\n return cleaned_data", "def clean(self):\n start_time = self.cleaned_data.get('start_time', None)\n end_time = self.cleaned_data.get('end_time', None)\n\n if start_time is None:\n self._errors['start_time'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('Please choose a start time')\n\n if end_time is None:\n self._errors['end_time'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('Please choose an end time')\n\n if end_time <= start_time:\n self._errors['end_time'] = self.error_class(['This field is required.'])\n raise forms.ValidationError('The end time must be AFTER the start time')\n \n conflict_checker = ConflictChecker()\n if conflict_checker.does_timeslot_conflict(TimeSlot(start_time, end_time)):\n self._errors['end_time'] = self.error_class(['Please choose a different time.'])\n raise forms.ValidationError('Sorry! That time conflicts with another event reservation or message! Please choose another one.')\n \n return self.cleaned_data", "def _validatePayload(request):\n return {\n 'name': _validateField(request, 'name'),\n 'email': _validateField(request, 'email'),\n 'password': _validateField(request, 'password'),\n }", "def _custom_validate_fields(self, issues):\n\n common_section = 'basicinfo'\n\n if self.address is None or self.address == '':\n issues.create(section=common_section,\n field='address',\n code='required')\n\n if self.phone is None or self.phone == '':\n issues.create(section=common_section,\n field='phone',\n code='required')\n elif not phonenumber.to_python(self.phone).is_valid():\n issues.create(section=common_section,\n field='phone',\n code='invalid')\n\n if self.psu_email is None or self.psu_email == '':\n issues.create(section=common_section,\n field='psu_email',\n code='required')\n else:\n try:\n EmailValidator()(self.psu_email)\n if not self.psu_email.endswith('@psu.edu'):\n issues.create(section=common_section,\n field='psu_email',\n code='prohibited')\n except ValidationError:\n issues.create(section=common_section,\n field='psu_email',\n code='invalid')\n\n if self.preferred_email == '':\n # preferred_email is assumed to be psu_email if blank\n pass\n else:\n try:\n EmailValidator()(self.preferred_email)\n except ValidationError:\n issues.create(section=common_section,\n field='preferred_email',\n code='invalid')\n\n if self.psu_id is None or self.psu_id == '':\n issues.create(section=common_section,\n field='psu_id',\n code='required')\n elif not re.match(r'^9\\d{8}$', self.psu_id):\n issues.create(section=common_section,\n field='psu_id',\n code='invalid')\n\n if self.major is None or self.major == '':\n issues.create(section=common_section,\n field='major',\n code='required')\n\n if self.semester_initiated is None:\n issues.create(section=common_section,\n field='semester_initiated',\n code='required')\n elif self.semester_initiated > Semester(self.due_at.date()):\n issues.create(section=common_section,\n field='semester_initiated',\n code='invalid')\n elif self.semester_initiated < Semester(('Spring', 1928)):\n issues.create(section=common_section,\n field='semester_initiated',\n code='invalid')\n\n if self.semester_graduating is None:\n issues.create(section=common_section,\n field='semester_graduating',\n code='required')\n elif self.semester_graduating < Semester(self.due_at.date()):\n issues.create(section=common_section,\n field='semester_graduating',\n code='invalid')\n elif self.semester_graduating > Semester(('Fall', 2099)):\n issues.create(section=common_section,\n field='semester_graduating',\n code='invalid')\n\n if self.cumulative_gpa == None:\n issues.create(section=common_section,\n field='cumulative_gpa',\n code='required')\n elif (self.cumulative_gpa < 0.0 or self.cumulative_gpa > 4.0):\n issues.create(section=common_section,\n field='cumulative_gpa',\n code='invalid')\n\n if self.semester_gpa == None:\n issues.create(section=common_section,\n field='semester_gpa',\n code='required')\n elif (self.semester_gpa < 0.0 or self.semester_gpa > 4.0):\n issues.create(section=common_section,\n field='semester_gpa',\n code='invalid')", "def dict_alert_msg(form_is_valid, alert_title, alert_msg, alert_type):\n data = {\n 'form_is_valid': form_is_valid,\n 'alert_title': alert_title,\n 'alert_msg': alert_msg,\n 'alert_type': alert_type\n }\n return data", "def display_form_errors(form):\n\n for fieldname, errors in form.errors.items():\n for error in errors:\n err_str = 'Error in field <' + fieldname + '>: ' + error\n flash(err_str, 'error')", "def extract_form_fields(item):\n # Strip off any trailing \\r\\n\n formitems = item.value.rstrip('\\r\\n')\n # Split the items by newline, this gives us a list of either 1, 3, 4\n # or 5 items long\n itemlist = formitems.split(\"\\n\")\n # Setup some regular expressions to parse the items\n re_list = [\n re.compile(\n '^[0-1][0-9]:[0-5][0-9]:[0-5][0-9] DEBUG - $'),\n re.compile('^(payload)({\".*)$'),\n re.compile('^([a-z]+): (.*)$'),\n ]\n itemdict = {}\n # Go through the 1, 3, 4 or 5 items list\n for anitem in itemlist:\n # Compare each item to the regular expressions\n for a_re in re_list:\n match = re.search(a_re, anitem)\n if match:\n if len(match.groups()) == 0:\n # We have a match but no groups, must be\n # the preamble.\n itemdict['preamble'] = match.group(0)\n elif len(match.groups()) == 2:\n # All other re's should have 2 matches\n itemdict[match.group(1)] = match.group(2)\n # We already have a match, skip other regular expressions.\n continue\n return itemdict", "def clean(self):\n\n # First, check that something hasn't gone wrong already.\n if self.errors:\n return {}\n\n # Cleaning IDs\n category_name = self.cleaned_data['category']\n if category_name != u'all':\n matching_category_ids = get_all_matching_category_ids(\n category_name\n )\n self.cleaned_data['category_id'] = matching_category_ids[0]\n else:\n self.cleaned_data['category_id'] = 0\n\n location_name = self.cleaned_data['location']\n matching_location_ids = get_all_matching_location_ids(\n location_name)\n self.cleaned_data['location_id'] = matching_location_ids[0]\n location = Location.objects.get(id=matching_location_ids[0])\n\n # Cleaning date\n t = self.cleaned_data['date']\n now = datetime.now(pytz.timezone(location.timezone)).date()\n if t < now:\n raise forms.ValidationError(\"It looks like the date you searched for already happened...\")\n\n return self.cleaned_data", "def messages(obj):\n if isinstance(obj, BaseForm):\n return obj.non_field_errors()\n return get_messages(obj)", "def validate(self, attrs):\n\n unknown = set(self.initial_data) - set(self.fields)\n if unknown:\n raise ValidationError('Unknown field(s): {}'.format('', ''.join(unknown)))\n return attrs", "def clean_items(self):\n items = self.cleaned_data['items']\n if len(items) < 1:\n v_err('no_items')\n return items", "def get_validation_errors(response, field, index=0):\n assert response.status_code == 400\n i = 0\n for error in response.data[\"invalid_params\"]:\n if error[\"name\"] != field:\n continue\n\n if i == index:\n return error\n\n i += 1", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def request_validation_error(error):\n return bad_request(error)", "def test_wrong_upload_item(invalid_item: JSONDict) -> None:\n\n with pytest.raises(ValidationError) as e:\n invalid = UploadItem(**invalid_item) # noqa: F841\n assert e.value.errors() == [\n {\n \"ctx\": {\"limit_value\": 2},\n \"loc\": (\"language\",),\n \"msg\": \"ensure this value has at most 2 characters\",\n \"type\": \"value_error.any_str.max_length\",\n },\n {\n \"loc\": (\"date\",),\n \"msg\": \"Could not validate format '02-2031-01'. Must be YYYY-MM-DD or iso-formatted time stamp\",\n \"type\": \"value_error\",\n },\n {\n \"loc\": (\"url\",),\n \"msg\": \"invalid or missing URL scheme\",\n \"type\": \"value_error.url.scheme\",\n },\n ]", "def create_return_dict_validator(self):\n return {\n 'count': {'type': 'integer', 'required': True, 'empty': False},\n 'rows': {'type': 'list', 'required': True, 'schema': {'type': 'dict'}}\n }", "def get_form_properties(self):\n if self.REQUEST.has_key('properties_with_failures'):\n return self.REQUEST['properties_with_failures']\n properties = self.get_editable_properties()\n for property in properties:\n if not property.has_key('value'):\n property['value'] = getattr(self.aq_base, property['id'], utilities.get_type_default(property['type']))\n property['widget'] = getattr(widgets, property['type'])(property['id'], property['value'])\n property['failure_message'] = ''\n return properties", "def clean(self):\n cleaned_data = super().clean()\n variant = cleaned_data.get('variant')\n quantity = cleaned_data.get('quantity')\n if variant and quantity is not None:\n try:\n variant.check_quantity(quantity)\n except InsufficientStock as e:\n error = forms.ValidationError(\n pgettext_lazy(\n 'Add item form error',\n 'Could not add item. '\n 'Only %(remaining)d remaining in stock.' %\n {'remaining': e.item.quantity_available}))\n self.add_error('quantity', error)\n return cleaned_data", "def validate(self, request):\n values = {\n 'robot_match_comments':request.POST['robot_match_comments'],\n 'did_foul':'did_foul' in request.POST,\n 'did_technical_foul':'did_technical_foul' in request.POST,\n 'foul_description':request.POST['foul_description'],\n 'did_shoot':'did_shoot' in request.POST,\n 'auto_1':request.POST['auto_1'],\n 'auto_2':request.POST['auto_2'],\n 'auto_3':request.POST['auto_3'],\n 'auto_miss':request.POST['auto_miss'],\n 'teleop_1':request.POST['teleop_1'],\n 'teleop_2':request.POST['teleop_2'],\n 'teleop_3':request.POST['teleop_3'],\n 'teleop_5':request.POST['teleop_5'],\n 'teleop_miss':request.POST['teleop_miss'],\n 'shooting_description':request.POST['shooting_description'],\n 'did_climb':'did_climb' in request.POST,\n 'climb_start':request.POST['climb_start'],\n 'climb_finish':request.POST['climb_finish'],\n 'level_reached':request.POST.get('level_reached'),\n 'frisbees_dumped':request.POST['frisbees_dumped'],\n 'climbing_description':request.POST['climbing_description'],\n 'did_human_load':'did_human_load' in request.POST,\n 'did_ground_load':'did_ground_load' in request.POST,\n 'auto_frisbees_ground_loaded':\\\n request.POST['auto_frisbees_ground_loaded'],\n 'loading_description':request.POST['loading_description'],\n }\n if ((values['did_foul'] or values['did_technical_foul']) and\n not values['foul_description']):\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'Please enter a description of the foul(s) the robot committed',\n new_values\n )\n if values['did_shoot']:\n try:\n values['auto_1'] = int(values['auto_1'])\n values['auto_2'] = int(values['auto_2'])\n values['auto_3'] = int(values['auto_3'])\n values['auto_miss'] = int(values['auto_miss'])\n values['teleop_1'] = int(values['teleop_1'])\n values['teleop_2'] = int(values['teleop_2'])\n values['teleop_3'] = int(values['teleop_3'])\n values['teleop_5'] = int(values['teleop_5'])\n values['teleop_miss'] = int(values['teleop_miss'])\n except ValueError:\n raise ValidationError(\n 'You must enter a number for all of the shooting numbers',\n self.__dict__.copy().update(values)\n )\n if values['did_climb']:\n try:\n values['climb_start'] = int(values['climb_start'])\n values['climb_finish'] = int(values['climb_finish'])\n try:\n values['level_reached'] = int(values['level_reached'])\n except TypeError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'You must select a level the robot climbed too',\n new_values\n )\n values['frisbees_dumped'] = int(values['frisbees_dumped'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All climbing related numbers must be numbers',\n new_values\n )\n if values['did_ground_load']:\n try:\n values['auto_frisbees_ground_loaded'] = int(\n values['auto_frisbees_ground_loaded'])\n except ValueError:\n new_values = self.__dict__.copy()\n new_values.update(values)\n raise ValidationError(\n 'All numbers of frisbees ground loaded must be numbers',\n new_values\n )\n return values", "def get_errors(self, request):\n\n value = request._get_parameter_value(self)\n return value.errors", "def clean(self):\n if any(self.errors):\n return\n\n if len(set([r.id for r in self.instance.references.all()]\n + [f.instance.id for f in self.forms])) > self.max_forms:\n raise forms.ValidationError('Maximum number of allowed items exceeded.')\n\n descriptions = []\n for form in self.forms:\n # This is to allow empty unsaved form\n if 'description' in form.cleaned_data:\n description = form.cleaned_data['description']\n if description in descriptions:\n raise forms.ValidationError('References must be unique.')\n descriptions.append(description)", "def on_request_validation_error(err):\n print(err)\n return jsonify(message='Bad request'), 400", "def form_invalid(self, form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form):\r\n\t\tprint(cruiseday_form)\r\n\t\tprint(document_form)\r\n\t\tprint(equipment_form)\r\n\t\tprint(invoice_form)\r\n\t\treturn self.render_to_response(\r\n\t\t\tself.get_context_data(\r\n\t\t\t\tform=form,\r\n\t\t\t\tcruiseday_form=cruiseday_form,\r\n\t\t\t\tparticipant_form=participant_form,\r\n\t\t\t\tdocument_form=document_form,\r\n\t\t\t\tequipment_form=equipment_form,\r\n\t\t\t\tinvoice_form=invoice_form,\r\n\t\t\t\tis_NTNU=self.request.user.userdata.organization.is_NTNU,\r\n\t\t\t\tbilling_type=\"auto\",\r\n\t\t\t\tis_invalid=True,\r\n\t\t\t)\r\n\t\t)", "def clean_form_with_field_errors(original_function, self):\n \n from django.core.exceptions import ValidationError\n try:\n self.cleaned_data = self.clean()\n except ValidationError, e:\n if hasattr(e, 'message_dict'):\n for field, error_strings in e.message_dict.items():\n self._errors[field] = self.error_class(error_strings)\n else:\n self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages)", "def errors(self) -> Tuple[MqexsErrorInfo, ...]:\n return self.__errors", "def semantic_validate(instance):\n unknown_templates = {}\n for name, requires in instance[\"application\"][\"requires\"].items():\n if name in instance[\"application\"][\"services\"]:\n raise ValidationError(errors=[\n \"/application/requires/{}: the name {} conflicts with service\"\n \" /application/services/{}\".format(name,\n repr(name),\n name),\n ])\n if requires[\"template\"] not in instance[\"local\"][\"templates\"]:\n unknown_templates[\"/application/requires/{}/template\".format(\n name)] = requires[\"template\"]\n for service_name, service in instance[\"application\"][\"services\"].items():\n for name, requires in service[\"requires\"].items():\n if name in instance[\"application\"][\"requires\"]:\n raise ValidationError(errors=[\n \"/application/services/{}/requires/{}: the name {}\"\n \" conflicts with /application/requires/{}\".format(\n service_name, name, repr(name), name)\n ])\n if requires[\"template\"] not in instance[\"local\"][\"templates\"]:\n unknown_templates[\n \"/application/services/{}/requires/{}/template\".\n format(service_name, name)] = requires[\"template\"]\n if unknown_templates:\n raise ValidationError(errors=[\n \"{}: the template {} does not exist \"\n \"in /local/templates\".format(path, repr(name))\n for (path, name) in unknown_templates.items()\n ])", "def filter_validation_errors(errors):\n error_messages = []\n for field, msgs in errors.items():\n if isinstance(msgs, dict):\n for f, m in msgs.items():\n error_messages.append(dict(\n field=f,\n message=m,\n code=error_codes['validation_error'],\n ))\n else:\n error_messages.append(dict(\n field=field,\n message=msgs,\n code=error_codes['validation_error'],\n ))\n return error_messages", "def form_invalid(self, form):\n return self.render_to_response(self.get_context_data(form=form))", "def form_invalid(self, form):\n return self.render_to_response(self.get_context_data(form=form))", "def _validate_post_fields(self, value, name, result):\n state = result.get(\"state\")\n persistent_state = result.get(\"persistent_state\")\n\n # minimal settings not related to runtime changes\n valid_fields = [\"actions\", \"ignore_errors\", \"name\", \"persistent_state\", \"state\"]\n\n # when type is present, a profile is completely specified (using\n # defaults or other settings)\n if \"type\" in result:\n valid_fields += list(self.nested.keys())\n\n # If there are no runtime changes, \"wait\" and \"force_state_change\" do\n # not make sense\n # FIXME: Maybe this restriction can be removed. Need to make sure that\n # defaults for wait or force_state_change do not interfer\n if not state:\n while \"wait\" in valid_fields:\n valid_fields.remove(\"wait\")\n while \"force_state_change\" in valid_fields:\n valid_fields.remove(\"force_state_change\")\n else:\n valid_fields += [\"force_state_change\", \"wait\"]\n\n # FIXME: Maybe just accept all values, even if they are not\n # needed/meaningful in the respective context\n valid_fields = set(valid_fields)\n for k in result:\n if k not in valid_fields:\n raise ValidationError(\n name + \".\" + k,\n \"property is not allowed for state '%s' and persistent_state '%s'\"\n % (state, persistent_state),\n )\n\n if \"name\" not in result:\n if persistent_state == \"absent\":\n result[\"name\"] = \"\" # set to empty string to mean *absent all others*\n else:\n raise ValidationError(name, \"missing 'name'\")\n\n # FIXME: Seems to be a duplicate check since \"wait\" will be removed from\n # valid_keys when state is considered to be not True\n if \"wait\" in result and not state:\n raise ValidationError(\n name + \".wait\",\n \"'wait' is not allowed for state '%s'\" % (result[\"state\"]),\n )\n\n result[\"state\"] = state\n result[\"persistent_state\"] = persistent_state\n\n self.VALID_FIELDS = valid_fields\n return result", "def _validate_post_fields(self, value, name, result):\n state = result.get(\"state\")\n persistent_state = result.get(\"persistent_state\")\n\n # minimal settings not related to runtime changes\n valid_fields = [\"actions\", \"ignore_errors\", \"name\", \"persistent_state\", \"state\"]\n\n # when type is present, a profile is completely specified (using\n # defaults or other settings)\n if \"type\" in result:\n valid_fields += list(self.nested.keys())\n\n # If there are no runtime changes, \"wait\" and \"force_state_change\" do\n # not make sense\n # FIXME: Maybe this restriction can be removed. Need to make sure that\n # defaults for wait or force_state_change do not interfer\n if not state:\n while \"wait\" in valid_fields:\n valid_fields.remove(\"wait\")\n while \"force_state_change\" in valid_fields:\n valid_fields.remove(\"force_state_change\")\n else:\n valid_fields += [\"force_state_change\", \"wait\"]\n\n # FIXME: Maybe just accept all values, even if they are not\n # needed/meaningful in the respective context\n valid_fields = set(valid_fields)\n for k in result:\n if k not in valid_fields:\n raise ValidationError(\n name + \".\" + k,\n \"property is not allowed for state '%s' and persistent_state '%s'\"\n % (state, persistent_state),\n )\n\n if \"name\" not in result:\n if persistent_state == \"absent\":\n result[\"name\"] = \"\" # set to empty string to mean *absent all others*\n else:\n raise ValidationError(name, \"missing 'name'\")\n\n # FIXME: Seems to be a duplicate check since \"wait\" will be removed from\n # valid_keys when state is considered to be not True\n if \"wait\" in result and not state:\n raise ValidationError(\n name + \".wait\",\n \"'wait' is not allowed for state '%s'\" % (result[\"state\"]),\n )\n\n result[\"state\"] = state\n result[\"persistent_state\"] = persistent_state\n\n self.VALID_FIELDS = valid_fields\n return result", "def _validate(self):\n REQUIRED_KEYS = [ 'name', 'year', 'artist_id', 'genre_ids', 'sources' ]\n\n missing_keys = get_missing_keys(self.request.data, REQUIRED_KEYS)\n if len(missing_keys) > 0:\n return f\"Request body is missing the following required properties: {', '.join(missing_keys)}.\"\n\n artist_id = self.request.data['artist_id']\n\n try:\n Artist.objects.get(pk=artist_id)\n except Artist.DoesNotExist:\n return \"`artistId` supplied does not match an existing artist.\" \n\n genre_ids = self.request.data['genre_ids']\n if len(genre_ids) == 0:\n return \"You must specify at least one genre id in `genreIds` array.\"\n\n for genre_id in genre_ids:\n try:\n Genre.objects.get(pk=genre_id)\n except Genre.DoesNotExist:\n return f\"The genre id {genre_id} does not match an existing genre.\"\n\n sources = self.request.data['sources']\n if len(sources) == 0:\n return \"You must specify at least one source in `sources` array.\"\n\n for source in sources:\n if 'service' not in source or 'url' not in source or 'is_primary' not in source:\n return \"All sources must contain `service`, `url`, and `is_primary` properties.\"\n\n primary_sources = [ source for source in sources if source['is_primary'] == True ]\n if len(primary_sources) != 1:\n return \"There must be one and only one primary source.\"\n\n return False", "def clean(self):\n super(ContentAdminForm, self).clean()\n cleaned_data = self.cleaned_data\n isDuplicate = False\n # check for duplicate slug\n title = cleaned_data.get('title')\n slug = cleaned_data.get('slug')\n pub_date = cleaned_data.get('published_date')\n body = cleaned_data.get('body_html')\n pubstatus = cleaned_data.get('status')\n story_status = cleaned_data.get('story_status')\n if pubstatus == 2 and story_status != 0:\n self._errors['story_status'] = ErrorList([mark_safe(\"Can not publish with this status.\")])\n if pubstatus == 2 and not pub_date:\n self._errors['published_date'] = ErrorList([mark_safe(\"published date can not be null.\")])\n s = len(body.split('.'))\n if s < 8 or len(body) < 300:\n self._errors['body_html'] = ErrorList([mark_safe(\"Word count minimum error\")])\n if not slug:\n slug = slugify(title)\n storyQs = Content.objects.only('id', 'slug').filter(slug=slug)\n if self.instance:\n storyQs = storyQs.exclude(id=self.instance.id)\n if storyQs:\n isDuplicate = True\n self._errors['title'] = ErrorList([mark_safe(\n \"\"\"<p><a href=\"/admin/content_management_system/content/%d/\" target=\"_blank\">Potential Duplicate: story with same title already exists.</a></p>\"\"\" % (\n storyQs.values_list('id', flat=True)[0]))])\n if pub_date:\n pub_date = cleaned_data.get('published_date').replace(tzinfo=None)\n d = datetime.datetime.now().replace(hour=23, minute=59, second=59, microsecond=0)\n print pub_date, d\n if pub_date:\n if pub_date > d:\n self._errors['published_date'] = ErrorList([mark_safe(\"published date can't be future date\")])\n return cleaned_data", "def clean(self):\n if any(self.errors):\n return\n\n if len(set([r.id for r in self.instance.languages.all()]\n + [f.instance.id for f in self.forms])) > self.max_forms:\n raise forms.ValidationError('Maximum number of allowed items exceeded.')\n\n names = []\n for form in self.forms:\n # This is to allow empty unsaved form\n if 'name' in form.cleaned_data:\n name = form.cleaned_data['name']\n if name in names:\n raise forms.ValidationError('Languages must be unique.')\n names.append(name)", "def ajax_form_invalid(self, form):\n\n return core_utils.respond_with_json({\n 'success': False,\n 'reason': str(form.errors)\n })", "def form_invalid(self, form):\n return self.get(self.request, form=form)", "def errors(self):\n return self.__errors", "def clean(self):\n cleaned_data = super(UserUpdateForm, self).clean()\n if cleaned_data.get(\"password\") != cleaned_data.get(\"password2\"):\n raise forms.ValidationError(\"The new passwords do not match.\")\n elif cleaned_data.get(\"password\") == cleaned_data.get(\"oldpassword\"):\n raise forms.ValidationError(\n \"The new password needs to be different from the old one.\"\n )\n\n return cleaned_data", "def test_that_view_return_errors_in_json(self):\n\n self.client.login(username='admin', password='admin')\n url = reverse(\"to_form\", args=str(self.my_instance.id))\n response = self.client.post(url, data={'name': 'Oleg'}, format='json')\n self.assertEqual(response.status_code, 200)\n for c in json.loads(response.content):\n self.assertEqual(['This field is required.'], json.loads(response.content)[c])", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages", "def get_validator_kwargs(self):\n return {\n 'schema': self.get_validation_schema(),\n }", "def validate_entries(self):\n message = ''\n if self.class_name.text() == '' or \\\n self.title.text() == '' or \\\n self.description.text() == '' or \\\n self.module_name.text() == '' or \\\n self.plugin_version.text() == '' or \\\n self.qgis_minimum_version.text() == '' or \\\n self.author.text() == '' or \\\n self.email_address.text() == '':\n message = (\n 'Some required fields are missing. '\n 'Please complete the form.\\n')\n try:\n # Assigning to _ is python sugar for a variable that will be unused\n _ = float(str(self.plugin_version.text()))\n _ = float(str(self.qgis_minimum_version.text()))\n except ValueError:\n message += 'Version numbers must be numeric.\\n'\n # validate plugin name\n # check that we have only ascii char in class name\n try:\n unicode(self.class_name.text()).decode('ascii')\n except UnicodeEncodeError:\n self.class_name.setText(\n unicode(\n self.class_name.text()).encode('ascii', 'ignore'))\n message += (\n 'The Class name must be ASCII characters only, '\n 'the name has been modified for you. \\n')\n # check space and force CamelCase\n if str(self.class_name.text()).find(' ') > -1:\n class_name = capwords(str(self.class_name.text()))\n self.class_name.setText(class_name.replace(' ', ''))\n message += (\n 'The Class name must use CamelCase. '\n 'No spaces are allowed; the name has been modified for you.')\n # noinspection PyArgumentList\n if message != '':\n QMessageBox.warning(\n self, 'Information missing or invalid', message)\n else:\n return True", "def to_dict_impl(cls, self: 'ErrorsAndWarnings') -> Dict[str, Any]:\n # See comment above.\n return {'errors': [e.to_dict() for e in self._errors.values() # pylint: disable=protected-access\n if e.is_persistant]}", "def validation_errors_to_error_messages(validation_errors):\n error_messages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n error_messages.append(f\"{field}: {error}\")\n return error_messages", "def handle_marshmallow_validaton(err): # except ValidationError as err\n return jsonify(err.messages), 400 # bad request", "def form_invalid(self, form, factura_form, remito_form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n remito_form=remito_form,\n ot_linea_form=ot_linea_form))", "def form_invalid(self, form, factura_form, remito_form, ot_linea_form):\n return self.render_to_response(\n self.get_context_data(form=form,\n factura_form=factura_form,\n remito_form=remito_form,\n ot_linea_form=ot_linea_form))", "def test_create_valiation(self):\n with self.assertRaises(ValidationError) as test:\n contact = Contact.objects.create(email='3232131')\n contact.full_clean()\n the_exception = dict(test.exception)\n self.assertEqual(the_exception['name'], ['This field cannot be blank.'])\n self.assertEqual(the_exception['email'], ['Enter a valid email address.'])", "def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f'{field} : {error}')\n return errorMessages" ]
[ "0.7247966", "0.65767854", "0.65024495", "0.6369957", "0.62522954", "0.6153304", "0.6130591", "0.6103089", "0.6074817", "0.6054684", "0.5964537", "0.5951978", "0.594963", "0.59282666", "0.59004563", "0.58859175", "0.5877393", "0.5859856", "0.58356667", "0.5771657", "0.5703528", "0.5685815", "0.56731915", "0.56563056", "0.5656086", "0.5612645", "0.5612645", "0.5612308", "0.5612098", "0.56110543", "0.5543907", "0.5539298", "0.55343205", "0.5519178", "0.55034506", "0.5487031", "0.54813516", "0.54740566", "0.545879", "0.5458027", "0.5456374", "0.54512", "0.54451174", "0.5424579", "0.5417492", "0.54068816", "0.54049325", "0.539753", "0.5389857", "0.5383416", "0.53779763", "0.53504086", "0.53465056", "0.53349286", "0.53326494", "0.53311133", "0.53230935", "0.53096855", "0.5307079", "0.5307079", "0.5307079", "0.52931935", "0.52881557", "0.52844256", "0.52796984", "0.52692395", "0.52598655", "0.5257097", "0.5245495", "0.52420753", "0.5239761", "0.52262586", "0.5225697", "0.52064145", "0.5205708", "0.5205708", "0.51998955", "0.51998955", "0.5194873", "0.51939785", "0.518704", "0.51807445", "0.51750445", "0.5170927", "0.51594216", "0.51592815", "0.515476", "0.515476", "0.515476", "0.515476", "0.515476", "0.5147836", "0.5144893", "0.51437366", "0.5143701", "0.5136677", "0.51341426", "0.51341426", "0.5130663", "0.5126403" ]
0.7545022
0
This method splits each document in the batch into chunks wuth the maximal length of max_chunk_len
def __call__(self, docs_batch: List[str]) -> Tuple[List[List[str]], List[List[int]]]: text_batch_list = [] text_batch = [] nums_batch_list = [] nums_batch = [] count_texts = 0 text = "" curr_doc = 0 for n, doc in enumerate(docs_batch): sentences = sent_tokenize(doc) for sentence in sentences: if len(text) + len(sentence) < self.max_chunk_len and n == curr_doc: text += f"{sentence} " else: if count_texts < self.batch_size: text_batch.append(text.strip()) if n == curr_doc: nums_batch.append(n) else: nums_batch.append(n - 1) count_texts += 1 else: text_batch_list.append(text_batch) text_batch = [] nums_batch_list.append(nums_batch) nums_batch = [n] count_texts = 0 curr_doc = n text = f"{sentence} " if text: text_batch.append(text.strip()) text_batch_list.append(text_batch) nums_batch.append(len(docs_batch) - 1) nums_batch_list.append(nums_batch) return text_batch_list, nums_batch_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_split(self, batch_text, threads=8):\n pass", "def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]", "def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")", "def split_into_batches_of_size(self, batch_size: int) -> Iterator[List]:\n if batch_size >= len(self):\n yield type(self)(self)\n else:\n for run in range(0, len(self), batch_size):\n yield self[run:run + batch_size]", "def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)", "def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]", "def __call__(self, batch_docs: List[Union[str, List[str]]]) -> \\\n List[Union[List[str], List[List[str]]]]:\n\n result = []\n\n for docs in batch_docs:\n batch_chunks = []\n if isinstance(docs, str):\n docs = [docs]\n for doc in docs:\n if self.paragraphs:\n split_doc = doc.split('\\n\\n')\n split_doc = [sd.strip() for sd in split_doc]\n split_doc = list(filter(lambda x: len(x) > 40, split_doc))\n batch_chunks.append(split_doc)\n else:\n doc_chunks = []\n if self.keep_sentences:\n sentences = sent_tokenize(doc)\n n_tokens = 0\n keep = []\n for s in sentences:\n n_tokens += len(s.split())\n if n_tokens > self.tokens_limit:\n if keep:\n doc_chunks.append(' '.join(keep))\n n_tokens = 0\n keep.clear()\n keep.append(s)\n if keep:\n doc_chunks.append(' '.join(keep))\n batch_chunks.append(doc_chunks)\n else:\n split_doc = doc.split()\n doc_chunks = [split_doc[i:i + self.tokens_limit] for i in\n range(0, len(split_doc), self.tokens_limit)]\n batch_chunks.append(doc_chunks)\n result.append(batch_chunks)\n\n if self.flatten_result:\n if isinstance(result[0][0], list):\n for i in range(len(result)):\n flattened = list(chain.from_iterable(result[i]))\n result[i] = flattened\n\n return result", "def pack_data_into_batches(self, ids):\n\n # create buckets sorted by the number of src tokens\n # each bucket is also sorted by the number of tgt tokens\n buckets = {}\n for i, line_ids in enumerate(ids):\n len_ = len(line_ids)\n if len_ not in buckets:\n buckets[len_] = [i]\n else:\n buckets[len_].append(i)\n\n for b_idx in buckets:\n buckets[b_idx] = sorted(buckets[b_idx])\n\n buckets = OrderedDict(sorted(buckets.items()))\n\n batches = []\n batch_elem_lengths = []\n curr_batch = []\n len_of_longest_sent = 0\n for sent_len, bucket in buckets.items():\n for sent_i in bucket:\n if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:\n if not curr_batch:\n raise ValueError(\n f\"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong.\"\n f\"Several sentences contain {sent_len} tokens.\"\n )\n batches.append(curr_batch)\n batch_elem_lengths.append(sent_len)\n curr_batch = []\n curr_batch.append(sent_i)\n len_of_longest_sent = sent_len\n if curr_batch:\n batches.append(curr_batch)\n batch_elem_lengths.append(len_of_longest_sent)\n return batches, batch_elem_lengths", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r", "def batch_by_size(\r\n self,\r\n indices,\r\n max_tokens=None,\r\n max_sentences=None,\r\n required_batch_size_multiple=1,\r\n ):\r\n from fairseq.data import data_utils\r\n\r\n fixed_shapes = self.get_batch_shapes()\r\n if fixed_shapes is not None:\r\n\r\n def adjust_bsz(bsz, num_tokens):\r\n if bsz is None:\r\n assert max_tokens is not None, \"Must specify --max-tokens\"\r\n bsz = max_tokens // num_tokens\r\n if max_sentences is not None:\r\n bsz = min(bsz, max_sentences)\r\n elif (\r\n bsz >= required_batch_size_multiple\r\n and bsz % required_batch_size_multiple != 0\r\n ):\r\n bsz -= bsz % required_batch_size_multiple\r\n return bsz\r\n\r\n fixed_shapes = np.array(\r\n [\r\n [adjust_bsz(bsz, num_tokens), num_tokens]\r\n for (bsz, num_tokens) in fixed_shapes\r\n ]\r\n )\r\n\r\n try:\r\n num_tokens_vec = self.num_tokens_vec(indices).astype('int64')\r\n except NotImplementedError:\r\n num_tokens_vec = None\r\n\r\n return data_utils.batch_by_size(\r\n indices,\r\n num_tokens_fn=self.num_tokens,\r\n num_tokens_vec=num_tokens_vec,\r\n max_tokens=max_tokens,\r\n max_sentences=max_sentences,\r\n required_batch_size_multiple=required_batch_size_multiple,\r\n fixed_shapes=fixed_shapes,\r\n )", "def split_to_chunks(of_list, chunk_size):\n assert of_list is not None\n\n for i in range(0, len(of_list), chunk_size):\n yield of_list[i:i + chunk_size]", "def split_and_batch(data_loader, \n batch_size, \n doclength,\n h5_path,\n rng_seed=888,\n normalizer_fun=data_utils.normalize,\n transformer_fun=data_utils.to_one_hot,\n balance_labels=False,\n max_records=None):\n data_batches = batch_data(data_loader, batch_size,\n normalizer_fun=normalizer_fun,\n transformer_fun=None,\n max_records=max_records,\n balance_labels=balance_labels,\n nlabels=2)\n (_, _), (train_size, test_size) = split_data(data_batches, \n h5_path, overwrite_previous=False, rng_seed=rng_seed)\n def train_batcher():\n (a,b),(a_size,b_size)=split_data(None, h5_path=h5_path, overwrite_previous=False, shuffle=True)\n return batch_data(a,\n normalizer_fun=lambda x: x,\n transformer_fun=transformer_fun,\n flatten=True,\n batch_size=batch_size)\n def test_batcher():\n (a,b),(a_size,b_size)=split_data(None, h5_path, overwrite_previous=False,shuffle=False)\n return batch_data(b,\n normalizer_fun=lambda x: x,\n transformer_fun=transformer_fun,\n flatten=True,\n batch_size=batch_size)\n\n return (train_batcher, test_batcher), (train_size, test_size)", "def chunks(lst, chunk_size=MAX_BATCH_SIZE):\n for i in range(0, len(lst), chunk_size):\n yield lst[i : i + chunk_size]", "def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]", "def split_into_batches(self, num_batches: int) -> Iterator[List]:\n if num_batches >= len(self):\n yield type(self)(self)\n else:\n batch_size, final_batch_size_extra = len(self) // num_batches, len(self) % num_batches\n\n if not final_batch_size_extra:\n for run in range(0, len(self), batch_size):\n yield self[run:run + batch_size]\n else:\n for run in range(0, (final_batch_position := batch_size*(num_batches - 1)), batch_size):\n yield self[run:run + batch_size]\n\n yield self[final_batch_position:]", "def iter_chunks(sequence, chunk_size) :\n res = []\n for item in sequence :\n res.append(item)\n if len(res) >= chunk_size :\n yield res\n res = []\n if res : yield res", "def generate_batch_doc2VecC_tail(doc_ids, word_ids, doc_len, batch_size, window_size, sample_size):\n data_index = 0\n assert batch_size % window_size == 0\n span = window_size + 1\n buffer = collections.deque(maxlen=span)\n buffer_doc = collections.deque(maxlen=span)\n batches = np.ndarray(shape=(batch_size, window_size + 1), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n batch_doc = np.ndarray(shape=(batch_size, sample_size), dtype=np.int32)\n mask = [1] * span\n mask[-1] = 0\n i = 0\n\n while data_index < len(word_ids):\n if len(set(buffer_doc)) == 1 and len(buffer_doc) == span:\n doc_id = buffer_doc[-1]\n batches[i, :] = list(compress(buffer, mask)) + [doc_id]\n labels[i, 0] = buffer[-1]\n batch_doc[i, :] = random.sample(word_ids[doc_len[doc_id]:doc_len[doc_id + 1]],\n sample_size)\n i += 1\n buffer.append(word_ids[data_index])\n buffer_doc.append(doc_ids[data_index])\n data_index = (data_index + 1) % len(word_ids)\n if i == batch_size:\n yield batches, labels, batch_doc", "def chunkize_serial(iterable, chunksize, as_numpy=False):\n\n it = iter(iterable)\n while True:\n if as_numpy:\n # convert each document to a 2d numpy array (~6x faster when transmitting\n # chunk data over the wire, in Pyro)\n wrapped_chunk = [[np.array(doc) for doc in itertools.islice(it, int(chunksize))]]\n else:\n wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]\n if not wrapped_chunk[0]:\n break\n # memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference\n yield wrapped_chunk.pop()", "def get_chunks(sequence, window_size, step=1):\n k = len(sequence)\n for i in range(0, k - window_size + 1, step):\n end = i + window_size\n chunk = sequence[i:i + window_size]\n assert len(chunk) == window_size\n yield chunk, end", "def chunkize_serial(iterable, chunksize, as_numpy=False, dtype=np.float32):\n it = iter(iterable)\n while True:\n if as_numpy:\n # convert each document to a 2d numpy array (~6x faster when transmitting\n # chunk data over the wire, in Pyro)\n wrapped_chunk = [[np.array(doc, dtype=dtype) for doc in itertools.islice(it, int(chunksize))]]\n else:\n wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]\n if not wrapped_chunk[0]:\n break\n # memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference\n yield wrapped_chunk.pop()", "def _split_in_chunks(lst: Sequence[Any], chunksize: int) -> Iterator[Sequence[Any]]:\n for i in range(0, len(lst), chunksize):\n yield lst[i:i + chunksize]", "def __divide_into_batches(self):\n print('Creating batches for parallel execution')\n num_suites = len(self.execution_file_json['suites'])\n full_batches = num_suites // self.max_suites\n print('- Full batches=%s' % full_batches)\n if num_suites % self.max_suites > 0:\n has_partial = True\n else:\n has_partial = False\n print('- Partial batch at end: %s' % has_partial)\n if has_partial:\n total_batches = full_batches + 1\n else:\n total_batches = full_batches\n print('- %s suites will be divided into %s container batches using max suites %s' % (\n num_suites, total_batches, self.max_suites))\n self.suite_batches = []\n # split full batches\n for batch_counter in range(0, full_batches):\n start_index = batch_counter * self.max_suites\n batch = []\n for counter in range(start_index, start_index + self.max_suites):\n batch.append(self.execution_file_json['suites'][counter])\n self.suite_batches.append(batch)\n print('- full batches created', self.suite_batches)\n # add partial batch\n if has_partial:\n start_index = full_batches * self.max_suites\n batch = []\n for counter in range(start_index, num_suites):\n batch.append(self.execution_file_json['suites'][counter])\n self.suite_batches.append(batch)\n print('- partial batch created', self.suite_batches)", "def divide_list_in_chunks(self, elements, chunk_size):\n if len(elements) == 0:\n yield []\n for i in range(0, len(elements), chunk_size):\n yield elements[i:i + chunk_size]", "def batches(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield chain([next(batchiter)], batchiter)", "def batches(self, batch_size, count):\n entries = self.entries()\n for _ in range(count):\n yield [next(entries) for _ in range(batch_size)]", "def split_iterable_to_batches(iterable):\n iterable_length = len(iterable)\n batch_size = int(ceil(iterable_length/NUM_THREADS))\n for i in range(0, iterable_length, batch_size):\n yield iterable[i:i + batch_size]", "def batch_by_size(iterable, max_buffer=20000):\n all_batches = []\n current_batch = []\n current_size = 0\n\n for next_item in iterable:\n # An approximated way to determine size\n next_size = len(str(next_item))\n expected_total_size = current_size + next_size\n\n if next_size > max_buffer:\n raise BufferExceedError('Buffer exceeded')\n\n elif expected_total_size > max_buffer:\n # If expected to exceed max size, then current batch is finalized\n all_batches.append(current_batch)\n current_batch = [next_item]\n current_size = next_size\n\n else:\n # Else add current set of instructions to current batch\n current_batch.append(next_item)\n current_size = expected_total_size\n\n # Group remaining instructions as a single batch\n if len(current_batch) > 0:\n all_batches.append(current_batch)\n\n return all_batches", "def get_chunks(sequence, window_size, step=1):\n # get the sequence length\n k = len(sequence)\n # get the index for each end and chunk\n for i in range(0, k - window_size + 1, step):\n # generate the end of the window\n end = i + window_size\n # get the slice of the sequence\n chunk = sequence[i:i + window_size]\n # assure the the chunk is the expected size\n assert len(chunk) == window_size\n yield chunk, end", "def chunkify(iterable, chunk_size):\n _it = iter(iterable)\n while True:\n batch = islice(_it, chunk_size)\n yield chain([batch.__next__()], batch)", "def chunk(items, chunk_size):\n start_index = 0\n for start_index in xrange(0, len(items), chunk_size):\n end_index = min(start_index+chunk_size, len(items))\n yield items[start_index:end_index]", "def batchify(l, n):\n n = min(len(l), n)\n n = max(1, n)\n chunksize = int(math.ceil(len(l) / n))\n\n for i in range(0, len(l), chunksize):\n # Create an index range for l of chunksize items:\n yield l[i:i + chunksize]", "def get_chunks(self,file_size):\n chunk_start = 0\n chunk_size = 0xA00000 # 10485760 bytes, default max ssl buffer size\n while chunk_start + chunk_size <= file_size:\n yield(chunk_start, chunk_size)\n chunk_start += chunk_size\n final_chunk_size = file_size - chunk_start\n yield(chunk_start, final_chunk_size)", "def embed_documents(\n self, texts: List[str], chunk_size: Optional[int] = 0\n ) -> List[List[float]]:\n # handle batches of large input text\n if self.embedding_ctx_length > 0:\n return self._get_len_safe_embeddings(texts, engine=self.document_model_name)\n else:\n results = []\n _chunk_size = chunk_size or self.chunk_size\n for i in range(0, len(texts), _chunk_size):\n response = embed_with_retry(\n self,\n input=texts[i : i + _chunk_size],\n engine=self.document_model_name,\n )\n results += [r[\"embedding\"] for r in response[\"data\"]]\n return results", "def chunks(sequence: Iterable[T], chunk_size: int = 2) -> Iterable[List[T]]:\n lsequence = list(sequence)\n while lsequence:\n size = min(len(lsequence), chunk_size)\n yield lsequence[:size]\n lsequence = lsequence[size:]", "def batch_split(self) -> np.array:\n pass", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]", "def split_train_into_chunks(chunk_size):\n for syscall_type in SYSCALLS:\n syscalls_split_file = open(f\"{TEMP_DIR}/{syscall_type}-split.train\", \"w\")\n snd_train_path = f\"{FILE_PATH}/{syscall_type}/{syscall_type}.train\"\n with open(snd_train_path) as train_file:\n for syscall in train_file:\n # Generate all n-grams of the current syscall\n n_grams = extract_n_grams(syscall.strip(),chunk_size,unique=True)\n if len(n_grams)==0:\n continue\n # Write n-grams to syscall chunks file\n syscalls_split_file.writelines(n_grams)\n syscalls_split_file.close()", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def process_sentence_chunks(\n ds: MMapRetrievalIndexedDataset,\n tokenizer,\n chunk_size: int,\n stage: int,\n workers: int,\n shard_id: int,\n total_shards: int,\n):\n total_chunks = ds.chunks\n start = 0\n threshold = 0\n\n if stage == 1:\n start, total_chunks = calculate_start_end(\n total_chunks=total_chunks, total_shards=total_shards, shard_id=shard_id\n )\n logging.info(f'shard_id {shard_id}, create index from chunk {start} to {total_chunks}')\n\n with Pool(workers) as p:\n while start < total_chunks:\n if start / total_chunks > threshold:\n logging.info(f\"sentence processing {start / total_chunks} is done\")\n threshold += 0.1\n slice_id = (start, min(start + chunk_size, total_chunks))\n beg = time.time()\n id_slices = ds.get_chunk(slice(*slice_id), force_no_cont_ids=True)\n end = time.time()\n logging.info(f\"load {chunk_size} chunks takes {end-beg}\")\n start = min(start + chunk_size, total_chunks)\n sentences = p.map(tokenizer.ids_to_text, id_slices)\n end2 = time.time()\n logging.info(f\"tokenize {chunk_size} chunks takes {end2-end}\")\n queue.put((sentences, slice_id))\n queue.put((None, None))", "def split_lod_by_item(lod, max_items=10000):\n max_items = min(max_items, SF_BULK_MAX_ITEM)\n files = []\n for i in range(0, len(lod), max_items):\n files.append(lod[i:i + max_items])\n return files", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def corpus2chunks(corpus_fname: Union[str, Path], n: int):\n with open(corpus_fname) as f:\n out = f.read()\n print(\"Starting spacy processing of document\")\n doc = nlp(out)\n print(\"Finished spacy processing.\")\n sentences = [s.text for s in doc.sents]\n\n chunked_sents = [\n remove_newlines_and_spaces(\" \".join(b)) for b in batch_list(sentences, n)\n ]\n return chunked_sents", "def chunks(lst, chunk_size):\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]", "def batchify(self, i, iterator):\n print(f'Starting Batch {i}')\n iterator = [item.strip() for item in iterator]\n max_length = self.max_seq_length - 2 # for special tokens\n\n batches = []\n n = len(iterator)\n sentence_count = 0\n index_start = 0\n index_stop = 0\n\n while index_stop < n:\n if (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length):\n index_start += 1\n index_stop += 1\n while (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length) and (index_stop<n):\n index_stop += 1\n batches.append(iterator[index_start:index_stop])\n index_start = index_stop\n print(f'Batch {i} Done')\n return batches", "def yield_batches(self, texts):\n batch = []\n for text in self._iter_texts(texts):\n batch.append(text)\n if len(batch) == self.batch_size:\n yield batch\n batch = []\n\n if batch:\n yield batch", "def get_chunks(sequence, chunk_size):\n seq_length = len(sequence)\n seq_list = []\n treshold = int(seq_length) // int(chunk_size)\n if treshold <4:\n raise ValueError(\"Change chunk size\")\n for i in range(treshold):\n seq = sequence[i*chunk_size:(i+1)*chunk_size]\n seq_list.append(seq)\n return seq_list", "def batch_size(self) -> int:\n ...", "def chunk(lst, chunk_len):\n\n for index in range(0, len(lst), chunk_len):\n yield lst[index:index + chunk_len]", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def create_batches(self, batch_size: int, repeat: bool, drop_last: bool, device: Device) -> None:\n self.repeat = repeat\n\n # Work out how cleanly we can divide the dataset into batch-sized parts\n num_batched_steps = self.indexed_corpus.shape[0] // batch_size\n\n # Trim off any extra elements that wouldn't cleanly fit (remainders)\n self.indexed_corpus = self.indexed_corpus.narrow(0, 0, num_batched_steps * batch_size)\n\n # Evenly divide the data across the bsz batches.\n raw_batches = self.indexed_corpus.view(batch_size, -1).t().contiguous().to(device)\n\n # If the last batch would be too short and drop_last is true, remove it\n if num_batched_steps % self.seq_len > 0 and drop_last:\n num_batched_steps -= num_batched_steps % self.seq_len\n\n self.num_batches = math.ceil(num_batched_steps / self.seq_len)\n\n self.batches = [raw_batches[n * self.seq_len: (n + 1) * self.seq_len + 1, :] for n in range(self.num_batches)]", "def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)", "def batch(iterable, max_size, action, prepare_element=None):\n batch_range = range(max_size)\n iterable = iter(iterable)\n ret = []\n last = False\n while not last:\n batch = []\n for i in batch_range:\n try:\n elm = iterable.next()\n except StopIteration:\n last = True\n break\n if prepare_element is not None:\n elm = prepare_element(elm)\n batch.append(elm)\n if batch:\n elm2 = action(batch)\n ret.append(elm2)\n return ret", "def chunk(list, chunksize):\n for i in range(0, len(list), chunksize):\n yield list[i:i + chunksize]", "def chunked(size, source):\n for i in range(0, len(source), size):\n yield source[i : i + size]", "def chunked_insert(model, items, chunk_size=150):\n # https://www.sqlite.org/limits.html#max_compound_select\n with db.atomic():\n for idx in range(0, len(items), chunk_size):\n model.insert_many(items[idx:idx+chunk_size]).execute()", "def chunks(iterator, size):\n for index in range(0, len(iterator), size):\n yield iterator[index:index + size]", "def batch_iterator(iterator, batch_size):\n entry = True # Make sure we loop once\n while entry:\n batch = []\n while len(batch) < batch_size:\n try:\n entry = next(iterator) ##had to change this for python3\n except StopIteration:\n entry = None\n if entry is None:\n # End of file\n break\n batch.append(entry)\n if batch:\n yield batch", "def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n inputs_iter = iter(inputs)\n while True:\n try:\n chunk_data = []\n for _ in range(chunk_size):\n processed_data = next(inputs_iter)\n chunk_data.append(processed_data)\n yield chunk_data\n except StopIteration:\n if chunk_data:\n yield chunk_data\n break", "def _chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def yield_chunks(arr, chunk_size):\r\n larr = len(arr)\r\n if larr < chunk_size:\r\n raise ValueError(\"The array length (%d) must be larger than the chunk size (%d)\" % (len(arr), chunk_size))\r\n\r\n cursor = 0\r\n while cursor < larr:\r\n next_cursor = min(cursor + chunk_size, larr)\r\n yield arr[cursor:next_cursor]\r\n cursor = next_cursor", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def batch_generator(id_sent_tsv: Path,\n batch_size: int = 128 * 128) -> GEN_BATCH:\n ids = list()\n sents = list()\n with open(p.abspath(id_sent_tsv)) as tsv:\n for line in tsv:\n sent_id, sent_text = str(line).replace('\\n', '').split('\\t')\n ids.append(int(sent_id)), sents.append(str(sent_text))\n\n while len(sents):\n yield (list(ids[:batch_size]), list(sents[:batch_size]))\n ids, sents = list(ids[batch_size:]), list(sents[batch_size:])\n gc.collect()", "def chunks(seq: Sequence[T], n: int = 1000) -> Sequence[Sequence[T]]:\n # we batch 1000 sql commands instead of 10. 10 was extremely slow\n for i in range(0, len(seq), n):\n yield seq[i:i + n]", "def get_batches(int_text, batch_size, n_steps):\n # todo 需要编程:\n # 计算有多少个批量\n n_batches = len(int_text) // (batch_size * n_steps)\n\n origin_x = np.array(int_text[:n_batches * batch_size * n_steps])\n origin_y = np.array(int_text[1: n_batches * batch_size * n_steps + 1])\n\n # 对origin重塑\n x_reshaped = np.reshape(origin_x, newshape=[batch_size, -1])\n y_reshaped = np.reshape(origin_y, newshape=[batch_size, -1])\n\n batch_reshape_x = np.split(x_reshaped, n_batches, axis=1)\n batch_reshape_y = np.split(y_reshaped, n_batches, axis=1)\n\n batches = np.array(list(zip(batch_reshape_x, batch_reshape_y)))\n return batches", "def get_chunks(self, chunk_size, max_chunks=None):\n from .helpers import get_chunks as chunker\n return chunker(self, chunk_size, max_chunks=max_chunks)", "def split_to_batches(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]", "def make_chunks(l, chunk_length):\n for i in range(0, len(l), chunk_length):\n yield l[i:i + chunk_length]", "def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size", "def get_chunks(list_object, chunk_size):\n size = len(list_object)\n if size <= chunk_size:\n yield list_object\n else:\n chunks_nb = math.ceil(size / chunk_size)\n iter_ints = range(0, chunks_nb)\n for i in iter_ints:\n j = i * chunk_size\n if i + 1 < chunks_nb:\n k = j + chunk_size\n yield list_object[max(j - 1, 0):k]\n else:\n yield list_object[max(j - 1, 0):]", "def gen_batches(data, batch_size):\n data = np.array(data)\n\n for i in range(0, data.shape[0], batch_size):\n yield data[i:i+batch_size]", "def next_batch(self, batch_size):\r\n raise NotImplementedError", "def _get_chunks(l, n = 10):\n \n for i in range(0, len(l), n): yield l[i: i + n]", "def chunks(self, big_list, n):\n for i in range(0, len(big_list), n):\n yield big_list[i:i + n]", "def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)", "def split_chunk(chunk: List) -> List[List]:\n encoded = urlencode({'queries': json.dumps(chunk)})\n if len(encoded) > URL_MAX_LENGTH:\n # Split chunk in half to avoid HTTP 414 error.\n mid = len(chunk) // 2\n left, right = chunk[:mid], chunk[mid:]\n # Recurse in case either half is still too long.\n return flatten([split_chunk(left), split_chunk(right)])\n else:\n return [chunk]", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]", "def split_large_groups(ctx):\n asyncio.run(split_large_groups_impl(ctx.obj[\"config\"]))", "def get_batches(int_text, batch_size, seq_length):\n n_batches = len(int_text) // (batch_size * seq_length)\n len_int_text = n_batches * (batch_size*seq_length)\n \n x = np.array(int_text[: len_int_text])\n y = np.hstack((np.array(int_text[1: len_int_text]) , np.array(int_text[0]))) #np.hstack()水平合并\n \n x_batches = np.split(x.reshape(batch_size, -1), n_batches, -1)\n y_batches = np.split(y.reshape(batch_size, -1), n_batches, -1)\n \n all_batches= np.array(list(zip(x_batches, y_batches)))\n return all_batches", "def iter_chunks(iterable, size):\n it = iter(iterable)\n while True:\n chunk = tuple(itertools.islice(it, size))\n if len(chunk) == 0:\n break\n yield chunk", "def get_batches(summaries, texts, batch_size):\r\n for batch_i in range(0, len(texts)//batch_size):\r\n start_i = batch_i * batch_size\r\n summaries_batch = summaries[start_i:start_i + batch_size]\r\n texts_batch = texts[start_i:start_i + batch_size]\r\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\r\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\r\n \r\n # Need the lengths for the _lengths parameters\r\n pad_summaries_lengths = []\r\n for summary in pad_summaries_batch:\r\n pad_summaries_lengths.append(len(summary))\r\n \r\n pad_texts_lengths = []\r\n for text in pad_texts_batch:\r\n pad_texts_lengths.append(len(text))\r\n \r\n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths", "def _chunks(l, n):\n\tfor i in range(0, len(l), n):\n\t\tyield l[i:i + n]", "def test_create_chunks():\n items = list(range(0, 100))\n size = 3\n\n chunks = create_chunks(items, size)\n\n current = next(chunks)\n assert len(current) == size\n assert current == [0, 1, 2]\n\n current = next(chunks)\n assert current == [3, 4, 5]", "def _batching_scheme(self,\n batch_size,\n max_length,\n min_length_bucket,\n length_bucket_step,\n drop_long_sequences=False,\n shard_multiplier=1,\n length_multiplier=1):\n max_length = max_length or batch_size\n boundaries = self._bucket_boundaries(max_length, min_length_bucket, length_bucket_step)\n boundaries = [boundary * length_multiplier for boundary in boundaries]\n max_length *= length_multiplier\n batch_sizes = [max(1, batch_size // length) for length in boundaries + [max_length]]\n max_batch_size = max(batch_sizes)\n # Since the Datasets API only allows a single constant for window_size,\n # and it needs divide all bucket_batch_sizes, we pick a highly-compoisite\n # window size and then round down all batch sizes to divisors of that window\n # size, so that a window can always be divided evenly into batches.\n # TODO: remove this when Dataset API improves.\n highly_composite_numbers = [\n 1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, 2520, 5040, 7560,\n 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440, 83160, 110880, 166320, 221760,\n 277200, 332640, 498960, 554400, 665280, 720720, 1081080, 1441440, 2162160, 2882880, 3603600,\n 4324320, 6486480, 7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400,\n 36756720, 43243200, 61261200, 73513440, 110270160\n ]\n window_size = max([i for i in highly_composite_numbers if i <= 3 * max_batch_size])\n divisors = [i for i in xrange(1, window_size + 1) if window_size % i == 0]\n batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes]\n window_size *= shard_multiplier\n batch_sizes = [bs * shard_multiplier for bs in batch_sizes]\n max_batches_per_window = window_size // min(batch_sizes)\n shuffle_queue_size = max_batches_per_window * 3\n ret = {\n \"boundaries\": boundaries,\n \"batch_sizes\": batch_sizes,\n \"max_length\": (max_length if drop_long_sequences else 10**9),\n \"shuffle_queue_size\": shuffle_queue_size,\n \"window_size\": window_size,\n }\n return ret", "def batch(self, lo=None, hi=None, max_recs=None, max_bytes=None,\n preserve=True, packer=None, txn=None, max_phys=None,\n grouper=None):\n assert max_bytes or max_recs, 'max_bytes and/or max_recs is required.'\n txn = txn or self.engine\n packer = packer or self.packer\n it = self._iter(txn, None, lo, hi, False, None, True, max_phys)\n groupval = None\n items = []\n\n for batch, key, data in it:\n if preserve and batch:\n self._write_batch(txn, items, packer)\n else:\n txn.delete(encode_keys(self.prefix, key))\n items.append((key, data))\n if max_bytes:\n _, encoded = self._prepare_batch(items, packer)\n if len(encoded) > max_bytes:\n items.pop()\n self._write_batch(txn, items, packer)\n items.append((key, data))\n done = max_recs and len(items) == max_recs\n if (not done) and grouper:\n val = grouper(self.encoder.unpack(data))\n done = val != groupval\n groupval = val\n if done:\n self._write_batch(txn, items, packer)\n self._write_batch(txn, items, packer)", "def split_into_batches(elements, period_size, sample_size):\r\n elements.reverse()\r\n batches = []\r\n current_batch = []\r\n target_counter = 0\r\n is_period_target = True\r\n while elements:\r\n target = period_size if is_period_target else sample_size\r\n e = elements.pop()\r\n current_batch.append(e)\r\n target_counter += 1\r\n if target_counter == target:\r\n target_counter = 0\r\n is_period_target = not is_period_target\r\n batches.append(current_batch)\r\n current_batch = []\r\n if current_batch:\r\n batches.append(current_batch)\r\n return batches", "def batch_iterator(iterator, batch_size):\n entry = True # Make sure we loop once\n while entry:\n batch = []\n while len(batch) < batch_size:\n try:\n entry = next(iterator)\n except StopIteration:\n entry = None\n if entry is None:\n # End of file\n break\n batch.append(entry)\n if batch:\n yield batch", "async def split_large_groups_impl(config):\n async with create_sessionmaker(config)() as dbsession:\n progress = ClickIndeterminate(\"Splitting large groups\")\n progress.start()\n splitting = True\n stmt = select(Group).options(selectinload(Group.items), selectinload(Group.children))\n while splitting:\n splitting = False\n result = await dbsession.execute(stmt)\n for group in result.scalars():\n if len(group.children) == 0:\n if len(group.items) > 120 and len(group.items) < 300: # noqa: PLR2004\n if split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n elif len(group.items) >= 300: # noqa: PLR2004\n if split_by_attribute(dbsession, group, \"concepts\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"subjects\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"materials\"):\n splitting = True\n elif split_by_attribute(dbsession, group, \"techniques\"):\n splitting = True\n elif split_by_year(config, dbsession, group):\n splitting = True\n else:\n split_by_similarity(dbsession, group)\n splitting = True\n await dbsession.commit()\n progress.stop()", "def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group", "def iter_chunks(chunksize, *iterables):\n iterables = iter(zip(*iterables))\n\n while 1:\n chunk = tuple(islice(iterables, chunksize))\n\n if not chunk:\n return\n\n yield chunk", "def batch(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield list(chain([batchiter.next()], batchiter))", "def get_batches(summaries, texts, batch_size):\n for batch_i in range(0, len(texts)//batch_size):\n start_i = batch_i * batch_size\n summaries_batch = summaries[start_i:start_i + batch_size]\n texts_batch = texts[start_i:start_i + batch_size]\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\n \n # Need the lengths for the _lengths parameters\n pad_summaries_lengths = []\n for summary in pad_summaries_batch:\n pad_summaries_lengths.append(len(summary))\n \n pad_texts_lengths = []\n for text in pad_texts_batch:\n pad_texts_lengths.append(len(text))\n \n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths", "def preprocess_split(self, input_dataset, last_id, num_sents, max_sent_len, prefix_id = \"\"):\n dataset = []\n for sent in input_dataset[last_id:]:\n last_id += 1\n if type(sent) == tuple or len(sent) > max_sent_len or len(sent) <= 1:\n continue\n dataset.append(self.preprocess_sent(sent, prefix_id + str(len(dataset))))\n if len(dataset) == num_sents:\n break\n\n return dataset, last_id", "def _string_to_chunks(text, **kwargs):\n text_limit = kwargs.get('text_limit', 1024)\n lines = \"\"\n for line in text:\n if len(lines) + len(line) < text_limit:\n lines += line\n else:\n yield lines\n lines = line[0:text_limit]\n else:\n yield lines", "def Docu_Size_Normalization(batch, batch_len, pad_token, batch_size):\n padded_batch = []\n max_doc_len = max(batch_len)\n need_more_list = []\n\n for doc in batch:\n pos_doc = doc[0]\n neg_doc = doc[1]\n\n need_more = max_doc_len-len(pos_doc)\n if need_more==0:\n continue\n else:\n padding_array = ['<pad>']\n need_more_list.append(need_more)\n for i in range(need_more):\n pos_doc.append(padding_array)\n neg_doc.append(padding_array)\n\n return batch", "def chunk(max_elems = 8192, dtype = numpy.float64):\n\n @filters\n def _dagpype_internal_fn_act(target):\n assert max_elems > 0\n dtype_ = dtype\n\n l = []\n try:\n while True:\n while len(l) < max_elems:\n l.append((yield))\n target.send(numpy.array(l, dtype = dtype_))\n l = []\n except GeneratorExit:\n if len(l) > 0:\n target.send(numpy.array(l, dtype = dtype_)) \n \n return _dagpype_internal_fn_act", "def batch_iterator(iterator, batch_size):\n entry = True # Make sure we loop once\n while entry:\n batch = []\n while len(batch) < batch_size:\n try:\n entry = iterator.__next__()\n except StopIteration:\n entry = None\n if entry is None:\n # End of file\n break\n batch.append(entry)\n if batch:\n yield batch", "def chunks(data: List[Any], num: int) -> Generator[List[Any], None, None]:\n for i in range(0, len(data), num):\n yield data[i : i + num]", "def batch_chunks(exp_chunks):\n import numpy as np\n batch_idx = np.array([chunk[0]['batch_id'] for chunk in exp_chunks])\n unique_batch_idx = np.unique(batch_idx)\n ids_per_array = [np.where(batch_idx == array_bidx)[0] for array_bidx in unique_batch_idx]\n exp_arrays = [[exp_chunks[idx] for idx in chunk_ids] for chunk_ids in ids_per_array]\n return exp_arrays", "def get_chunks(size):\n chunk_start = 0\n chunk_size = 0x20000\n\n while chunk_start + chunk_size < size:\n yield (chunk_start, chunk_size)\n chunk_start += chunk_size\n if chunk_size < 0x100000:\n chunk_size += 0x20000\n\n if chunk_start < size:\n yield (chunk_start, size - chunk_start)" ]
[ "0.67054933", "0.6599035", "0.6590152", "0.6497465", "0.6494132", "0.6458296", "0.6434048", "0.63691735", "0.6354726", "0.6328011", "0.6231897", "0.62225235", "0.61968756", "0.6193412", "0.61856794", "0.61689913", "0.6143832", "0.61328125", "0.61289346", "0.6128609", "0.6125625", "0.611865", "0.61160487", "0.6108773", "0.6099811", "0.60996324", "0.60630894", "0.60593766", "0.60564476", "0.60467994", "0.60250247", "0.60187745", "0.60112923", "0.60101646", "0.6004977", "0.5999154", "0.5997342", "0.5991571", "0.5986733", "0.5985229", "0.59828776", "0.5976041", "0.5972554", "0.5961092", "0.59533095", "0.59476405", "0.5941512", "0.59370184", "0.5915685", "0.59128195", "0.58779746", "0.58763945", "0.5874279", "0.58729404", "0.5871238", "0.58699286", "0.5863662", "0.58560157", "0.58556724", "0.5854706", "0.58523965", "0.5848359", "0.58476424", "0.5847488", "0.58446294", "0.58416486", "0.5836187", "0.583381", "0.58323485", "0.58268535", "0.58195084", "0.58122593", "0.58111006", "0.5808281", "0.5800185", "0.579435", "0.57853705", "0.57852954", "0.578481", "0.5780907", "0.5778481", "0.57710814", "0.5767917", "0.5765363", "0.5764287", "0.5763578", "0.57629067", "0.5762628", "0.57606363", "0.575894", "0.57549995", "0.5754863", "0.57522964", "0.57494694", "0.5747607", "0.574697", "0.57439226", "0.5742548", "0.5742515", "0.5740862", "0.573862" ]
0.0
-1
Chooses a BoTorch `Model` using the given data.
def choose_model_class( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], task_features: List[int], fidelity_features: List[int], ) -> Type[Model]: if len(task_features) > 0: raise NotImplementedError("Currently do not support `task_features`!") if len(fidelity_features) > 1: raise NotImplementedError("Currently support only a single fidelity parameter!") # NOTE: We currently do not support `task_features`. This code block will only # be relevant once we support `task_features`. # if len(task_features) > 1: # raise NotImplementedError( # f"This model only supports 1 task feature (got {task_features})" # ) # elif len(task_features) == 1: # task_feature = task_features[0] # else: # task_feature = None task_feature = None # NOTE: In the current setup, `task_feature = None` always. if task_feature is None: Yvars_cat = torch.cat(Yvars).clamp_min_(MIN_OBSERVED_NOISE_LEVEL) is_nan = torch.isnan(Yvars_cat) any_nan_Yvar = torch.any(is_nan) all_nan_Yvar = torch.all(is_nan) if any_nan_Yvar and not all_nan_Yvar: raise ValueError( "Mix of known and unknown variances indicates valuation function " "errors. Variances should all be specified, or none should be." ) if len(fidelity_features or []) > 0: return SingleTaskMultiFidelityGP elif all_nan_Yvar: return SingleTaskGP return FixedNoiseGP # TODO: Replace ValueError with `ModelListGP`. # raise ValueError("Unexpected training data format. Cannot choose `Model`.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_model(model_name: str):\r\n global predictor, currently_selected_model\r\n predictor = FeatureExtractor(model_name)\r\n currently_selected_model = model_name", "def get_model():\n SUPPORTED_DATASETS = ('imagenet', 'cifar10', 'mnist')\n\n # ensure the dataset is supported\n dataset = args.dataset.lower()\n if dataset not in SUPPORTED_DATASETS:\n raise ValueError('Dataset {} is not supported'.format(dataset))\n net = None\n cadene = None\n\n if args.dataset == 'cifar10':\n if args.model == \"mobilenet\":\n from models.mobilenet import MobileNet\n net = MobileNet(n_class=10)\n\n elif args.model == \"alexnet\":\n from models.alexnet import AlexNet\n net = AlexNet(n_class=10)\n # else:\n # net = _create_cifar10_model(arch, pretrained)\n\n elif args.dataset == 'imagenet':\n if args.model ==\"mobilenet\":\n from models.mobilenet import MobileNet\n net = MobileNet(n_class=1000)\n # else:\n # net, cadene = _create_imagenet_model(arch, pretrained)\n\n # elif args.dataset == 'mnist':\n # net = _create_mnist_model(arch, pretrained)\n\n if net is None:\n raise NotImplementedError\n \n return net.cuda() if use_cuda else net", "def get_model(name, dataset):\n field_dims = dataset.field_dims\n if name == 'lr':\n return LogisticRegressionModel(field_dims)\n elif name == 'fm':\n return FactorizationMachineModel(field_dims, embed_dim=16)\n elif name == 'hofm':\n return HighOrderFactorizationMachineModel(\n field_dims, order=3, embed_dim=16)\n elif name == 'ffm':\n return FieldAwareFactorizationMachineModel(field_dims, embed_dim=4)\n elif name == 'fnn':\n return FactorizationSupportedNeuralNetworkModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'wd':\n return WideAndDeepModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'ipnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='inner',\n dropout=0.2)\n elif name == 'opnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='outer',\n dropout=0.2)\n elif name == 'dcn':\n return DeepCrossNetworkModel(\n field_dims,\n embed_dim=16,\n num_layers=3,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'nfm':\n return NeuralFactorizationMachineModel(\n field_dims, embed_dim=64, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'ncf':\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\n assert isinstance(dataset, MovieLens20MDataset) or isinstance(\n dataset, MovieLens1MDataset)\n return NeuralCollaborativeFiltering(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, 16),\n dropout=0.2,\n user_field_idx=dataset.user_field_idx,\n item_field_idx=dataset.item_field_idx)\n elif name == 'fnfm':\n return FieldAwareNeuralFactorizationMachineModel(\n field_dims, embed_dim=4, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'dfm':\n return DeepFactorizationMachineModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'xdfm':\n return ExtremeDeepFactorizationMachineModel(\n field_dims,\n embed_dim=16,\n cross_layer_sizes=(16, 16),\n split_half=False,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'afm':\n return AttentionalFactorizationMachineModel(\n field_dims, embed_dim=16, attn_size=16, dropouts=(0.2, 0.2))\n elif name == 'afi':\n return AutomaticFeatureInteractionModel(\n field_dims,\n embed_dim=16,\n atten_embed_dim=64,\n num_heads=2,\n num_layers=3,\n mlp_dims=(400, 400),\n dropouts=(0, 0, 0))\n elif name == 'afn':\n print('Model:AFN')\n return AdaptiveFactorizationNetwork(\n field_dims,\n embed_dim=16,\n LNN_dim=1500,\n mlp_dims=(400, 400, 400),\n dropouts=(0, 0, 0))\n else:\n raise ValueError('unknown model name: ' + name)", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def pick_model(self):\n return ConvModel(self.model_pmeter)", "def prepare_model_(model, *data, device='cpu'):\n _auto_name('', model)\n set_default_parent(model)\n def _prep_data(d):\n if isinstance(d, (np.ndarray, torch.Tensor)):\n return torch.as_tensor(d).to(device)\n elif isinstance(d, (list, tuple)):\n if all(isinstance(x, int) for x in d):\n return torch.randn(*d, device=device)\n return [_prep_data(x) for x in d]\n elif isinstance(d, dict):\n return {k:_prep_data(v) for k, v in d.items()}\n with torch.no_grad():\n is_training = model.training\n data = [_prep_data(d) for d in data]\n model.eval()\n model.to(device)\n model(*data)\n model.train(is_training)\n return model", "def get_model(*args):\n return Model()", "def pick_model(model_name, alpha):\n if model_name == \"purename\":\n return PureNameLNN(alpha, -1, False)\n elif model_name == \"context\":\n return ContextLNN(alpha, -1, False)\n elif model_name == \"type\":\n return TypeLNN(alpha, -1, False)\n elif model_name == \"complex_pure_ctx\":\n print(\"===ComplexRuleWithoutTypeLNN===\")\n return ComplexRuleWithoutTypeLNN(alpha, -1, False)\n elif model_name == \"complex_pure_ctx_type\":\n return ComplexRuleWithTypeLNN(alpha, -1, False)\n elif model_name == \"lr\":\n return LogitsRegression()\n else:\n print(\"WRONG name input\")\n return None", "def load_model(model, trained_models_dir, image_name):\n# if model == \"keras\":\n if model == 1:\n return load_keras_model(trained_models_dir, image_name)\n# elif model == \"lgb\":\n elif model == 3:\n return load_lgb_model(trained_models_dir, image_name)\n# elif model = \"sklearn\":\n else:\n return load_joblib_model(trained_models_dir, image_name)", "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]", "def _get_model(self, user_id, model_name, data_reviews):\n\t\treturn MAPPING_MODEL[self.model](\n\t\t\tuser_id=user_id,\n\t\t\tmodel_name=model_name,\n\t\t\tdata_reviews=data_reviews\n\t\t\t)", "def initialize_model(self, initial_data):\n # EDIT THIS METHOD TO RETURN A MINIMAX MODEL ###\n return None", "def get_model(model_name: str, map_location=torch.device('cpu')):\n # model urls on Zenodo\n model_urls = {'ParallelNets': 'https://zenodo.org/record/7245516/files/ParallelNets.pth?download=1',\n 'UNetPath': 'https://zenodo.org/record/7245516/files/UNetPath.pth?download=1'}\n\n # check if model_name is supported\n if model_name not in ['ParallelNets', 'UNetPath']:\n raise ValueError(\"Model name needs to be 'ParallelNets' or 'UNetPath'.\")\n\n model_path = pkg_resources.resource_filename('crackpy', f'crack_detection/models/{model_name}.pth')\n\n # check if model folder exists\n origin, _ = os.path.split(model_path)\n if not os.path.exists(origin):\n os.makedirs(origin)\n\n if not os.path.exists(model_path):\n print(f\"Downloading {model_name}...\")\n torch.hub.download_url_to_file(model_urls[model_name], model_path)\n\n if model_name == 'ParallelNets':\n model = ParallelNets(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n else: # model_name == 'UNetPath'\n model = UNet(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n\n return model", "def get_model(model_name: str = \"\", cfg={}) -> torch.nn.Module:\n if model_name == \"default\":\n model = AudioNTT2020(n_mels=cfg.n_mels, d=cfg.feature_d)\n\n elif model_name == \"resnetish34\":\n model = resnetish34()\n\n elif model_name == \"clstm\":\n model = CLSTM()\n\n elif model_name == \"cvt\":\n s1_depth, s2_depth, s3_depth = cfg.depths\n s1_emb_dim, s2_emb_dim, s3_emb_dim = cfg.embed_dims\n s1_mlp_mult, s2_mlp_mult, s3_mlp_mult = cfg.mlp_mults\n\n model = CvT(\n s1_emb_dim=s1_emb_dim,\n s1_depth=s1_depth,\n s1_mlp_mult=s1_mlp_mult,\n s2_emb_dim=s2_emb_dim,\n s2_depth=s2_depth,\n s2_mlp_mult=s2_mlp_mult,\n s3_emb_dim=s3_emb_dim,\n s3_depth=s3_depth,\n s3_mlp_mult=s3_mlp_mult,\n pool=cfg.cvt_pool,\n )\n else:\n raise ValueError(\"Model not found.\")\n return model", "def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type", "def load_model(opt, device):\n device_ids = list(range(opt.num_gpus))\n model = BiDateNet(13, 2).to(device)\n model = nn.DataParallel(model, device_ids=device_ids)\n\n return model", "def select_classifier(model, X, A, n_splits=5, loss_type='01', seed=None):\n if isinstance(model, (GridSearchCV, RandomizedSearchCV)):\n selected_model = _select_classifier_from_sk_search(model, X, A)\n elif isinstance(model, list):\n selected_model = _select_classifier_from_list(candidates=model, X=X, A=A, n_splits=n_splits, seed=seed,\n loss_type=loss_type)\n elif isinstance(model, dict):\n selected_model = _select_classifier_from_grid(X=X, A=A, n_splits=n_splits, seed=seed, **model,\n loss_type=loss_type)\n else: # A regular classifier was passed\n selected_model = model\n return selected_model", "def load_model(which_model):\n if which_model == 'mazhari':\n return myokit.load_model(os.path.join(MODEL, 'mazhari-ikr-markov.mmt'))\n elif which_model == 'mazhari-reduced':\n return myokit.load_model(os.path.join(MODEL, 'mazhari-reduced-MBAM-ikr-markov.mmt'))\n elif which_model == 'wang':\n return myokit.load_model(os.path.join(MODEL, 'wang-ikr-markov.mmt'))\n elif which_model == 'wang-r1':\n return myokit.load_model(os.path.join(MODEL, 'wang-r1-ikr-markov.mmt'))\n elif which_model == 'wang-r2':\n return myokit.load_model(os.path.join(MODEL, 'wang-r2-ikr-markov.mmt'))\n elif which_model == 'wang-r3':\n return myokit.load_model(os.path.join(MODEL, 'wang-r3-ikr-markov.mmt'))\n elif which_model == 'wang-r4':\n return myokit.load_model(os.path.join(MODEL, 'wang-r4-ikr-markov.mmt'))\n elif which_model == 'wang-r5':\n return myokit.load_model(os.path.join(MODEL, 'wang-r5-ikr-markov.mmt'))\n elif which_model == 'wang-r6':\n return myokit.load_model(os.path.join(MODEL, 'wang-r6-ikr-markov.mmt'))\n elif which_model == 'wang-r7':\n return myokit.load_model(os.path.join(MODEL, 'wang-r7-ikr-markov.mmt'))\n else:\n pass", "def get_model(name, dataset):\r\n field_dims = dataset.field_dims\r\n\r\n if name == 'ncf':\r\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\r\n assert isinstance(dataset, MovieLens1MDataset)\r\n return NeuralCollaborativeFiltering(field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2,\r\n user_field_idx=dataset.user_field_idx,\r\n item_field_idx=dataset.item_field_idx)\r\n else:\r\n raise ValueError('unknown model name: ' + name)", "def get_model(parameters):\n if MODEL == 6:\n return get_model_6(parameters)\n elif MODEL == 5:\n return get_model_5(parameters)\n elif MODEL == 4:\n return get_model_4(parameters)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return get_cv_model_3(parameters)\n else:\n return get_model_3(parameters)\n elif MODEL == 2:\n return get_model_2(parameters)\n else:\n return get_model_1(parameters)", "def load_model(name, input_node):\n # Find the model class from its name\n all_models = models.get_models()\n net_class = [model for model in all_models if model.__name__ == name][0]\n\n # Construct and return the model\n return net_class({'data': input_node})", "def get_model(model_name, problem_type):\n # if user isn't \"sallamander\", it's on a dedicated instance - use all the cores\n num_usable_cores = multiprocessing.cpu_count() \\\n if os.environ['USER'] != 'sallamander' else 1\n rand_state=609\n\n if model_name == 'linear':\n model = ElasticNet(random_state=rand_state)\n elif model_name == 'logistic': \n model = LogisticRegression(random_state=rand_state)\n elif model_name == 'random_forest':\n if problem_type == 'regression':\n model = RandomForestRegressor(n_jobs = num_usable_cores, \n random_state=rand_state)\n elif problem_type == 'classification': \n model = RandomForestClassifier(n_jobs = num_usable_cores, \n random_state=rand_state)\n else: \n raise RuntimeError('Unsupported `model_name` inputted!')\n\n return model", "def load_model(name):\n\tmodel = joblib.load(\"data/{}/{}.model\".format(name, name))\n\t# Setting n_jobs to 1 in case it was set to a higher number while training the model seems to makes predictions of single samples much faster.\n\tmodel.n_jobs = 1\n\treturn model", "def _get_model_by_name(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['model_name']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)", "def train(self, data, option, param_map):\n if option == \"lr\":\n md = self.logistic_regression(elastic_param=param_map[\"elastic_param\"],\n reg_param=param_map[\"reg_param\"],\n family=param_map[\"family\"])\n elif option == \"rf\":\n md = self.random_forest(max_depth=param_map[\"max_depth\"],\n max_num_tree=param_map[\"max_num_tree\"])\n elif option == \"gbdt\":\n md = self.gbdt(max_depth=param_map[\"max_depth\"],\n max_bins=param_map[\"max_bins\"])\n else:\n raise ValueError(\"ERROR | model %s does not support yet\" % option)\n\n self.model = md.fit(data)\n return self.model", "def load_default_model(name):\n return DEFAULT_MODEL_DICT[name]", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def _get_model():\n with open('models/catapp_gp_model.pickle', 'rb') as modelfile:\n model = pickle.load(modelfile)\n return model", "def get_model(model, source=\"kipoi\", with_dataloader=True):\n # TODO - model can be a yaml file or a directory\n source_name = source\n\n source = kipoi.config.get_source(source)\n\n # pull the model & get the model directory\n yaml_path = source.pull_model(model)\n source_dir = os.path.dirname(yaml_path)\n\n # Setup model description\n with cd(source_dir):\n md = ModelDescription.load(os.path.basename(yaml_path))\n # TODO - is there a way to prevent code duplication here?\n # TODO - possible to inherit from both classes and call the corresponding inits?\n # --------------------------------------------\n # TODO - load it into memory?\n\n # TODO - validate md.default_dataloader <-> model\n\n # attach the default dataloader already to the model\n if \":\" in md.default_dataloader:\n dl_source, dl_path = md.default_dataloader.split(\":\")\n else:\n dl_source = source_name\n dl_path = md.default_dataloader\n\n if with_dataloader:\n # allow to use relative and absolute paths for referring to the dataloader\n default_dataloader_path = os.path.join(\"/\" + model, dl_path)[1:]\n default_dataloader = kipoi.get_dataloader_factory(default_dataloader_path,\n dl_source)\n else:\n default_dataloader = None\n\n # Read the Model - append methods, attributes to self\n with cd(source_dir): # move to the model directory temporarily\n if md.type == 'custom':\n Mod = load_model_custom(**md.args)\n assert issubclass(Mod, BaseModel) # it should inherit from Model\n mod = Mod()\n elif md.type in AVAILABLE_MODELS:\n # TODO - this doesn't seem to work\n mod = AVAILABLE_MODELS[md.type](**md.args)\n else:\n raise ValueError(\"Unsupported model type: {0}. \" +\n \"Model type needs to be one of: {1}\".\n format(md.type,\n ['custom'] + list(AVAILABLE_MODELS.keys())))\n\n # populate the returned class\n mod.type = md.type\n mod.args = md.args\n mod.info = md.info\n mod.schema = md.schema\n mod.dependencies = md.dependencies\n mod.default_dataloader = default_dataloader\n mod.name = model\n mod.source = source\n mod.source_name = source_name\n mod.source_dir = source_dir\n # parse the postprocessing module\n mod.postprocessing = md.postprocessing\n if with_dataloader:\n mod.pipeline = Pipeline(model=mod, dataloader_cls=default_dataloader)\n else:\n mod.pipeline = None\n return mod", "def load_model(path_model, model_type, device):\n if model_type == 'torch':\n model = torch.load(path_model).to(device)\n if hasattr(model, 'linblocks'):\n for linblock in model.linblocks:\n linblock.to(device)\n model.eval()\n return model\n elif model_type == 'sklearn':\n raise NotImplementedError\n else:\n raise Exception('Model type not known.')", "def get_model(name, **model_args):\n module = importlib.import_module('.' + name, 'models')\n return module.build_model(**model_args)", "def get_model(name, **kwargs):\n models = {'standard_lstm_lm_200' : standard_lstm_lm_200,\n 'standard_lstm_lm_650' : standard_lstm_lm_650,\n 'standard_lstm_lm_1500': standard_lstm_lm_1500,\n 'awd_lstm_lm_1150': awd_lstm_lm_1150,\n 'awd_lstm_lm_600': awd_lstm_lm_600,\n 'big_rnn_lm_2048_512': big_rnn_lm_2048_512,\n 'elmo_2x1024_128_2048cnn_1xhighway': elmo_2x1024_128_2048cnn_1xhighway,\n 'elmo_2x2048_256_2048cnn_1xhighway': elmo_2x2048_256_2048cnn_1xhighway,\n 'elmo_2x4096_512_2048cnn_2xhighway': elmo_2x4096_512_2048cnn_2xhighway,\n 'transformer_en_de_512': transformer_en_de_512,\n 'bert_12_768_12' : bert_12_768_12,\n 'bert_24_1024_16' : bert_24_1024_16,\n 'distilbert_6_768_12' : distilbert_6_768_12,\n 'roberta_12_768_12' : roberta_12_768_12,\n 'roberta_24_1024_16' : roberta_24_1024_16,\n 'ernie_12_768_12' : ernie_12_768_12}\n name = name.lower()\n if name not in models:\n raise ValueError(\n 'Model %s is not supported. Available options are\\n\\t%s'%(\n name, '\\n\\t'.join(sorted(models.keys()))))\n return models[name](**kwargs)", "def get_model(config):\n if not isinstance(config, ModelConfig):\n raise ValueError(\"Get model must be a config file. \")\n\n identifier = str(config.class_id).lower()\n if identifier in ['vgg', 'vgg16', 'vgg19']:\n return vgg.get_model(config)\n elif identifier in ['resnet', 'resnet50',]:\n return resnet.get_model(config)", "def _choose_model(self, model_str):\n if model_str == 'lg':\n return(LogisticRegression())\n elif model_str == 'rf':\n return(RandomForestClassifier())\n elif model_str == 'svm':\n # return SVC(C=1, kernel='linear') # linear boundary\n return SVC(C=1, kernel='poly', degree=2) # non-linear boundary\n # return SVC(C=1, kernel='rbf')\n # return SVC(C=1, kernel='sigmoid') # binary classification", "def get_model(model_name):\n model = CNN().get_model(model_name=model_name)\n\n return model", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def get_model_class(model_name, task_name):\n if task_name == 'rocstories':\n return OpenAIGPTDoubleHeadsModel if model_name == 'openai-gpt' else GPT2DoubleHeadsModel\n else:\n return OpenAIGPTLMHeadModel if model_name == 'openai-gpt' else GPT2LMHeadModel", "def load_custom_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model = models.alexnet()\n num_ftrs = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading ResNet18 Model\")\n model = models.resnet18() #Load the pytorch. torchvision model\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #Set it to match the ImageNet-100 Classes.\n elif model_name==\"ResNet50\":\n print(\"Loading ResNet50 Model\")\n model = models.resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #ImageNet-100 has 100 classes.\n elif model_name==\"DenseNet\":\n print(\"Loading DenseNet161 Model\")\n model = models.densenet161()\n num_ftrs = model.classifier.in_features\n model.classifier = nn.Linear(num_ftrs, 100)\n elif model_name==\"MyNet\":\n print(\"Loading Pyramid Model\")\n model = pyramid_net.create_model() # Load the model I implemented.\n\n if cfg.load_model_true: # Load the model that was stopped during training.\n model.load_state_dict(torch.load(cfg.load_model_path))\n\n return model", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n bic_models = map(lambda x: self.run_bic_model(x),\n range(self.min_n_components, self.max_n_components + 1))\n valid_models = [x for x in bic_models if x is not None]\n\n if len(valid_models) > 0:\n best_model = sorted(valid_models, key=lambda x: x[1])[0]\n return best_model[0]\n else:\n return None", "def retrieve_model(self, model_name):\n\t\tmodel_detail = dbop.get_model(self, model_name)\n\t\t#since the 'owner' field of model_detail is only owner's username,\n\t\t#we have to change it to a User object\n\t\t#In this case, the owner of this model is the user itself\n\t\tmodel_detail['owner'] = self\n\t\tif model_detail['model_type'] == 'SPSS Predictive Model':\n\t\t\treturn model.SPSSModel(**model_detail)\n\t\telif model_detail['model_type'] == 'DashDB In-database Model':\n\t\t\treturn model.DashdbModel(**model_detail)", "def load_model(model_name):\n if hasattr(torchvision.models, model_name):\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n try:\n import pretrainedmodels\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")", "def get_model(model_name: str, *args, **kwargs):\n try:\n if '.' in model_name:\n module_name, class_name = model_name.rsplit('.', 1)\n else:\n module_name = model_name\n class_name = model_name.capitalize().replace(\"_\",\"\")\n\n model_module = import_module('.' + module_name, package='models')\n\n model_class = getattr(model_module, class_name)\n\n instance = model_class(*args, **kwargs)\n\n except (AttributeError, ModuleNotFoundError):\n raise ImportError('{} is not part of our model/architecture collection.'.format(model_name))\n else:\n if not issubclass(model_class, Model):\n raise ImportError(\"{} is not a valid model/architecture.\".format(model_class))\n\n return instance", "def load_model(model_root, arch, dataset, dataset_name,\n dataset_type, device='cuda'):\n \n if model_root is None and dataset_type == 'language':\n model_root = lm.LANGUAGE_MODEL_DICT[dataset_name]\n \n pooled_output = None\n if dataset_type == 'vision':\n model, _ = make_and_restore_model(arch=arch, \n dataset=dataset,\n resume_path=model_root,\n pytorch_pretrained=(model_root is None)\n )\n else:\n config = AutoConfig.from_pretrained(model_root)\n if config.model_type == 'bert':\n if model_root == 'barissayil/bert-sentiment-analysis-sst': \n model = lm.BertForSentimentClassification.from_pretrained(model_root)\n pooled_output = False\n else: \n model = lm.BertForSequenceClassification.from_pretrained(model_root)\n pooled_output = True\n elif config.model_type == 'roberta': \n model = lm.RobertaForSequenceClassification.from_pretrained(model_root)\n pooled_output = False\n else:\n raise ValueError('This transformer model is not supported yet.')\n \n model.eval()\n model = ch.nn.DataParallel(model.to(device))\n return model, pooled_output", "def get_model():\n model = ecole.scip.Model.from_file(str(DATA_DIR / \"bppc8-02.mps\"))\n model.disable_cuts()\n model.disable_presolve()\n model.set_param(\"randomization/permuteconss\", True)\n model.set_param(\"randomization/permutevars\", True)\n model.set_param(\"randomization/permutationseed\", 784)\n model.set_param(\"randomization/randomseedshift\", 784)\n model.set_param(\"randomization/lpseed\", 784)\n return model", "def get_sklearn_model(x):\n if is_sklearn_model(x):\n return x # already a valid model\n elif type(x) is dict:\n if hasattr(x, 'model'):\n return get_sklearn_model(x['model'])\n else:\n return None\n elif type(x) is str:\n # noinspection PyBroadException\n try:\n return get_sklearn_model(eval(x))\n except:\n pass\n return None", "def get_or_create_model(self) -> Model:\n assert self.model_name\n\n print(\"Check if Model exists.\")\n if self.model_name in self.models:\n print(\"Model does exists.\")\n # if get_model(self.model_name).tags['train_py_hash'] == self.get_file_md5(\n # self.source_directory + \"/\" + self.script):\n model = Model(self, name=self.model_name)\n if not os.path.isdir(\"outputs\"):\n model.download(\"outputs\", exist_ok=True)\n return model\n print(\"Model does not exists.\")\n model = self.train_model()\n\n assert model\n if self.show_output:\n print(model.name, model.version, model.url, sep=\"\\n\")\n return model", "def get_model(cls):\n if cls.model == None:\n with open(os.path.join(model_path, 'vdok3_rf.pkl'), 'rb') as inp:\n cls.model = pickle.load(inp)\n return cls.model", "def _get_model(data_class: Type[Table[Any]]) -> TableModel[ModelledTable]:\n\n if data_class not in _MODELS:\n # Prevent recursion problems with self-referential classes.\n _MODELS[data_class] = ... # type: ignore\n _MODELS[data_class] = _make_model(data_class)\n\n return _MODELS[data_class]", "def build_model():", "def from_pretrained(model_name: str, aliases: Dict = None, device: str = None):\n if device == None:\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n model = None\n\n models = AutoModel.list_models(return_dict=True)\n model_config = models.get(model_name)\n\n # Try to find by alias\n if model_config == None and aliases:\n name_from_alias = aliases.get(model_name)\n\n if name_from_alias:\n model_config = models.get(name_from_alias)\n if model_config:\n model_name = name_from_alias\n\n # Try to load from local saved model\n if model_config == None:\n try:\n model = load(model_name)\n model.to(device)\n except ValueError:\n raise ValueError(f\"Model '{model_name}' not found\")\n \n if model == None:\n model_class = model_config[\"class\"]\n init_kwargs = model_config[\"init_kwargs\"]\n\n model = model_config[\"class\"](**init_kwargs,\n description=model_config[\"description\"],\n tasks=model_config[\"tasks\"],\n name=model_name,\n details=model_config.get(\"details\"),\n device=device)\n\n return model", "def bestModel(self, channel_type):", "def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)", "def choose_model(\n name: str,\n log_dir: str = \"logs\",\n n_estimators: int = 100,\n max_depth: int = 6,\n xgb_lr: float = 0.3,\n gamma_xgb: float = 0.0,\n min_child_weight: float = 1.0,\n subsample: float = 1.0,\n colsample_bytree: float = 1.0,\n reg_lambda: float = 1.0,\n C: float = 1.0,\n nn_wt: float = 1.0,\n epochs: int = 50,\n batch_size: int = 64,\n nn_lr: float = 1e-3,\n lr_step: int = 10000,\n lr_decay: float = 0.75,\n weight_decay: float = 1e-3,\n balance_weights: bool = True,\n **kwargs,\n) -> BaseClassifier:\n xgb_model = XGBClassifier(\n n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=xgb_lr,\n gamma=gamma_xgb,\n min_child_weight=min_child_weight,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n reg_lambda=reg_lambda,\n random_state=0,\n )\n svm_model = SVC(C=C, class_weight=\"balanced\", random_state=0)\n random_forest_classifier = RandomForestClassifier()\n\n nn_model = NN(\n epochs=epochs,\n batch_size=batch_size,\n log_dir=log_dir,\n learning_rate=nn_lr,\n lr_step=lr_step,\n lr_decay=lr_decay,\n weight_decay=weight_decay,\n balance_weights=balance_weights,\n random_state=0,\n )\n\n if name == \"xgb\":\n return xgb_model\n elif name == \"svm\":\n return svm_model\n elif name == \"ensemble\":\n model_wt = np.array([1.0, nn_wt])\n model_wt /= sum(model_wt)\n return VotingClassifier(\n [(\"xgb\", xgb_model), (\"nn\", nn_model)], voting=\"soft\", weights=model_wt\n )\n elif name == \"forest\":\n return random_forest_classifier\n elif name == \"nn\":\n return nn_model\n else:\n raise ValueError(f\"Invalid model name: {name}\")", "def predict(data, model: str = None, **kwargs):\n\n model_instance = get_model(model)\n log.debug(\"Predict with \" + str(model_instance))\n return model_instance.predict(data, **kwargs)", "def train_model(x_data, y_data, model_type):\n # def lr model object\n clr = None\n try:\n clr = model_list[model_type]()\n except Exception as e:\n print(e)\n # fit model\n clr.fit(x_data, y_data)\n # save model in pkl file\n try:\n joblib.dump(clr, \"model/\" + model_type + \".pkl\")\n except Exception as e:\n print(e)\n return clr", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n # DONE implement model selection based on BIC scores\n\n # Bayesian information criteria: BIC = −2 * log L + p * log N,\n # where\n # • L is the likelihood of the fitted model,\n # • p is the number of parameters, and\n # • N is the number of data points.\n # The term −2 log L decreases with increasing model complexity\n # (more parameters), whereas the penalties 2p or p log N increase with\n # increasing complexity. The BIC applies a larger penalty\n # when N > e 2 = 7.4.\n # Model selection: The lower the BIC value the better the model\n\n select_bic = float(\"inf\")\n select_model = None\n for n_components in range(self.min_n_components, self.max_n_components + 1):\n try:\n model = self.base_model(n_components)\n logL = model.score(self.X, self.lengths)\n # https://discussions.udacity.com/t/verifing-bic-calculation/246165/5\n # https://discussions.udacity.com/t/number-of-parameters-bic-calculation/233235/17\n p = n_components**2 + 2*n_components * model.n_features - 1\n logN = math.log(sum(self.lengths))\n bic = - 2 * logL + p * logN\n if bic < select_bic:\n select_bic = bic\n select_model = model\n except:\n continue\n return select_model", "def loadModel(name, path=None):\n\n # if a path is given, try to load from that path first\n if path:\n try:\n model = TFT5ForConditionalGeneration.from_pretrained(path)\n tokenizer = T5Tokenizer.from_pretrained(path)\n \n return model, tokenizer\n except:\n print(f\"WARNING: Could not load the model from the path ({path}) specified with --from-pretrained flag. Trying to load '{name}' from cloud instead.\")\n\n # if no path was specified, or the load from path failed, try to load from cloud using the given model name\n model = TFT5ForConditionalGeneration.from_pretrained(name)\n tokenizer = T5Tokenizer.from_pretrained(name)\n \n return model, tokenizer", "def load_model():\n global model_tok, model_mlm, model, model_cls\n if model is None:\n model_name_or_path = os.getenv('TRANSFORMER_MODEL', default='distilbert-base-multilingual-cased')\n # 'bert-base-multilingual-cased'\n model_tok = AutoTokenizer.from_pretrained(model_name_or_path)\n model_mlm = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n model_mlm.eval()\n model = model_mlm.base_model\n\n if isinstance(model_mlm, BertPreTrainedModel):\n model_cls = model_mlm.cls\n elif isinstance(model_mlm, DistilBertPreTrainedModel):\n model_cls = nn.Sequential(\n model_mlm.vocab_transform,\n nn.GELU(),\n model_mlm.vocab_layer_norm,\n model_mlm.vocab_projector\n )\n else:\n raise ValueError(f'{model_name_or_path} is not supported yet. try one of '\n f'{\", \".join(list(AvailableModels.__members__.keys()))}')\n model.to(device)\n model_mlm.to(device)\n # model_tok.to(device)\n model_cls.to(device)", "def get_model(name):\n\n try:\n from .model_defs import get_model_from_def\n model = get_model_from_def(name)\n logger.info(\"Model {n} loaded from model_defs module\".format(n=name))\n except NameError:\n try:\n model = get_model_from_yaml(name)\n logger.info(\"Model {n} loaded from yaml\".format(n=name))\n except KeyError:\n try:\n from .model_defs import parse_model_name\n model = parse_model_name(name)\n logger.info(\"Model {n} parsed from name\".format(n=name))\n except NameError:\n sys.exit(\"Unknown model {n}\".format(n=name))\n\n if not hasattr(model, 'name'):\n model.name = name\n\n return model", "def get_model(self, **kwargs):\n # use the following line to insert a local model\n # qos_model = mos_client_local()\n\n # read the model from a file\n if 'filename' not in kwargs:\n filename = 'qoe_client.p'\n else:\n filename = kwargs['filename']\n\n predictor = pickle.load(open(args.predictor, 'rb'))\n n_components, param, fit, mae, mse, rmse, amp, mapNys, y_test, y_pred = predictor\n qos_model = mos_client(fit, mapNys)\n\n return qos_model", "def test_get_model(self) -> None:\n get_model()", "def load_model(data):\n K.clear_session()\n\n # creating the Deep Neural Net Model\n model = Sequential()\n\n # layer 1\n model.add(Dense(units=128,\n activation='relu',\n input_shape=(data.shape[1], )))\n model.add(BatchNormalization())\n model.add(Dropout(0.7))\n\n # layer 2\n model.add(Dense(units=64,\n activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.7))\n\n # output layer\n model.add(Dense(units=n_classes,\n activation='softmax'))\n\n # compile model\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=0.005),\n metrics=['acc'])\n\n return model", "def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")", "def get_model(**kwargs: bool) -> nn.Module:\n return heejung(**kwargs)", "def set_model(self):\n self.model = self.get_model()", "def pick_model(self):\n self.x = self.train[self.use_columns]\n try:\n self.x = pd.get_dummies(self.x)\n except:\n pass # if no categorical features\n self.final_columns = self.x.columns\n print(self.x.columns)\n self.scaler = StandardScaler()\n self.x = self.scaler.fit_transform(self.x)\n self.y = self.train['y']\n\n if len(np.unique(self.y))<50:\n print('Consider using classification, probably not continuos target variable!')\n\n # for picking the best model\n lr = Ridge(max_iter=1500)\n rf = RandomForestRegressor(n_estimators=500, max_depth=20, min_samples_leaf=3,\n max_features='auto', n_jobs=-1)\n svr = SVR(max_iter=-1)\n\n self.models = {'lr': lr, 'rf': rf, 'svr': svr}\n self.scores = {'lr': [], 'rf': [], 'svr': []}\n print('selecting model')\n for i, (train_index, test_index) in enumerate(self.kf.split(self.x, self.y)):\n x_tr, x_val = self.x[train_index], self.x[test_index]\n y_tr, y_val = self.y[train_index], self.y[test_index]\n if len(x_tr)>10000:\n print('reduced train size')\n y_tr.index, y_val.index = range(len(y_tr)), range(len(y_val))\n mask_train = np.random.choice(range(len(x_tr)),size=10000)\n x_tr, y_tr = x_tr[mask_train], y_tr[mask_train]\n for k, model in self.models.items():\n print('fold: ', i+1)\n print('model: ', k)\n model = clone(self.models[k])\n model.fit(x_tr, y_tr)\n p = model.predict(x_val)\n # score = mean_squared_error(y_val, p)\n score = mean_absolute_error(y_val, p)\n self.scores[k] = self.scores[k] + [score]\n\n self.best_score = 9e10\n self.old_score = 9e10\n self.best_model = ''\n self.old_model = ''\n for k, l in self.scores.items():\n mean = np.mean(l)\n if mean < self.best_score:\n self.old_score = self.best_score\n self.old_model = self.best_model\n self.best_score = mean\n self.best_model = k\n print(self.best_model, self.best_score)", "def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model", "def make_predictions(model_choice, model_name, loader):\n\n torch.multiprocessing.set_sharing_strategy('file_system')\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # I made a mistake in the saving script\n model_path = os.path.join('../trained_models', model_name, model_name + '.pth')\n\n if model_choice == 'baby':\n from models.BabyC3D import BabyC3D\n\n # from models.BabyC3D import Crazy\n\n model = BabyC3D()\n # model = Crazy()\n elif model_choice == 'small':\n from models.SmallC3D import SmallC3D\n\n model = SmallC3D()\n elif model_choice == 'se3cnn':\n from models.Se3cnn import Se3cnn\n\n model = Se3cnn()\n elif model_choice == 'c3d':\n from models.C3D import C3D\n\n model = C3D()\n elif model_choice == 'small_siamese':\n from models.Siamese import SmallSiamese\n\n model = SmallSiamese()\n elif model_choice == 'baby_siamese':\n from models.Siamese import BabySiamese\n\n model = BabySiamese()\n elif model_choice == 'babyse3cnn':\n from models.BabySe3cnn import BabySe3cnn\n\n model = BabySe3cnn()\n else:\n # Not possible because of argparse\n raise ValueError('Not a possible model')\n model.to(device)\n model = torch.nn.DataParallel(model)\n\n # import torch.optim as optim\n # optimizer = optim.Adam(None)\n # print(model, model_path)\n\n dict_results = run_model(loader, model, model_path)\n pickle.dump(dict_results, open(f'../data/post_processing/predictions/{model_name}.p', 'wb'))\n return dict_results", "def load_model(cls) -> Classifier:\n if cls.model is None:\n cls.model = Classifier.load(model_path)\n return cls.model", "def get_model(\n model: PipelineModel,\n use_auth_token: Union[Text, None] = None,\n) -> Model:\n\n if isinstance(model, Model):\n pass\n\n elif isinstance(model, Text):\n model = Model.from_pretrained(\n model, use_auth_token=use_auth_token, strict=False\n )\n\n elif isinstance(model, Mapping):\n model.setdefault(\"use_auth_token\", use_auth_token)\n model = Model.from_pretrained(**model)\n\n else:\n raise TypeError(\n f\"Unsupported type ({type(model)}) for loading model: \"\n f\"expected `str` or `dict`.\"\n )\n\n model.eval()\n return model", "def _get_model(self, fl_ctx: FLContext):\n if isinstance(self.model, str):\n # treat it as model component ID\n model_component_id = self.model\n engine = fl_ctx.get_engine()\n self.model = engine.get_component(model_component_id)\n if not self.model:\n self.log_error(fl_ctx, f\"cannot find model component '{model_component_id}'\")\n return\n if self.model and isinstance(self.model, dict):\n # try building the model\n try:\n engine = fl_ctx.get_engine()\n # use provided or default optimizer arguments and add the model parameters\n if \"args\" not in self.model:\n self.model[\"args\"] = {}\n self.model = engine.build_component(self.model)\n except Exception as e:\n self.system_panic(\n f\"Exception while parsing `model`: \" f\"{self.model} with Exception {e}\",\n fl_ctx,\n )\n return\n if self.model and not isinstance(self.model, torch.nn.Module):\n self.system_panic(fl_ctx, f\"expect model to be torch.nn.Module but got {type(self.model)}: {self.model}\")\n return\n if self.model is None:\n self.system_panic(fl_ctx, f\"Model wasn't built correctly! It is {self.model}\")\n return\n self.log_info(fl_ctx, f\"Running model {self.model}\")", "def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj", "def load_model(self):\n pass", "def get_model(name, disable_logging=False):\n return PluginLoader._import(\"train.model\", name, disable_logging)", "def load_model(model: nn.Module, model_args: dict, model_weights: str, device: torch.device):\n model = model(**model_args)\n state_dict = torch.load(model_weights, map_location=device)\n model.load_state_dict(state_dict[\"model\"])\n return model", "def load_model(self) -> None:\n\n try:\n model_class = MODEL_TYPES[self.model_type]\n except KeyError:\n raise KeyError(f\"model type: {self.model_type} not supported\")\n\n if (\n os.path.exists(self.resources_path)\n and len(os.listdir(self.resources_path)) > 0\n ):\n model_name_or_path = self.resources_path\n else:\n model_name_or_path = self.model_name\n\n if self.model_type == \"stable_diffusion\":\n self.model = model_class.from_pretrained(\n model_name_or_path,\n use_auth_token=self.auth_token,\n )\n else:\n self.model = model_class.from_pretrained(model_name_or_path)\n\n self.model.to(self.device)", "def select_model(self, verbose = 0):\n\n #first get list of the available models\n tickers = [x.split('\\\\')[1] for x,_,_ in os.walk(ModelLoader.root_path()) if len(x.split('\\\\')) > 1 ]\n \n #now let find the best model\n best_model = None\n lowest_test_error = 2.0\n \n #prepare our sequence data\n for idx, ticker in enumerate(tickers,1):\n try: \n loaded_model = ModelLoader(ticker)\n seq_obj = MultiSequence(self.symbol,loaded_model.window_size,1)\n testing_error =loaded_model.model.evaluate(seq_obj.X,seq_obj.y, verbose=0)\n \n if verbose == 1:\n print(\">{0:>3}) Now checking model: {1:<5} Test error result: {2:.4f}\".format(idx,ticker, testing_error))\n \n if lowest_test_error > testing_error:\n best_model = loaded_model\n lowest_test_error = testing_error\n except :\n pass\n \n #save best model\n self.__best_model = best_model\n self.__test_error = lowest_test_error\n if verbose in[1,2]:\n print(\"==> Best model ticker {0:} with error of {1:.4f}\".format(self.__best_model.ticker,self.__test_error))", "def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model", "def load_model(self) -> Any:", "def _pick_model(self):\r\n\r\n if self.network_type == 'AutoEncoder_3l':\r\n return AutoEncoder_3l.AutoEncoder_3l(self.network_type, self.loss_type, self.accuracy_type, self.learning_rate,\r\n training=self.is_training, num_filters=self.num_filters, nonlin=self.nonlin,\r\n num_classes=self.num_classes, optimizer=self.optimizer)\r\n\r\n else:\r\n raise ValueError('Architecture does not exist')", "def _construct_model(\n self,\n dataset: SupervisedDataset,\n **kwargs: Any,\n ) -> None:\n if self.botorch_model_class is None:\n raise ValueError(\n \"botorch_model_class must be set to construct single model Surrogate.\"\n )\n botorch_model_class = self.botorch_model_class\n\n input_constructor_kwargs = {**self.model_options, **(kwargs or {})}\n botorch_model_class_args = inspect.getfullargspec(botorch_model_class).args\n\n # Temporary workaround to allow models to consume data from\n # `FixedNoiseDataset`s even if they don't accept variance observations\n if \"train_Yvar\" not in botorch_model_class_args and isinstance(\n dataset, FixedNoiseDataset\n ):\n warnings.warn(\n f\"Provided model class {botorch_model_class} does not accept \"\n \"`train_Yvar` argument, but received `FixedNoiseDataset`. Ignoring \"\n \"variance observations and converting to `SupervisedDataset`.\",\n AxWarning,\n )\n dataset = SupervisedDataset(X=dataset.X(), Y=dataset.Y())\n\n self._training_data = [dataset]\n\n formatted_model_inputs = botorch_model_class.construct_inputs(\n training_data=dataset, **input_constructor_kwargs\n )\n self._set_formatted_inputs(\n formatted_model_inputs=formatted_model_inputs,\n inputs=[\n [\n \"covar_module\",\n self.covar_module_class,\n self.covar_module_options,\n None,\n ],\n [\"likelihood\", self.likelihood_class, self.likelihood_options, None],\n [\"outcome_transform\", None, None, self.outcome_transform],\n [\"input_transform\", None, None, self.input_transform],\n ],\n dataset=dataset,\n botorch_model_class_args=botorch_model_class_args,\n robust_digest=kwargs.get(\"robust_digest\", None),\n )\n # pyre-ignore [45]\n self._model = botorch_model_class(**formatted_model_inputs)", "def get_model(self):\n # just return the first model, since all replicas are the same\n return self.call_async(0, '_async_get_model').gen()", "def train_specific_model(data_file: str, model_name: ModelName) -> (bool, ArrayLike):\n if not validate_csv(data_file):\n return False, [None, None, None]\n trained_model = train_model.train_model(model_name=model_name, file=data_file)\n model_filename = save_model.save_model_uuid(trained_model)\n save_model.update_specific_ensemble_model(model_name=model_name, model_filename=model_filename)\n prediction.load_ensemble_models()\n return True, [model_filename, *trained_model[1:]]", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def get_classifier(name, model, param, rand_iter=-1):\r\n assert isinstance(name, str)\r\n if param: # Do grid search only if parameter list is not empty\r\n N_p = np.prod([len(l) for l in param.values()])\r\n if (N_p <= rand_iter) or rand_iter<=0:\r\n logging.info(\"Using grid search for %s\" % name)\r\n model = GridSearchCV(model, param, cv=5, scoring=\"accuracy\",\r\n n_jobs=PROCESSORS)\r\n else:\r\n logging.info(\"Using random search for %s\" % name)\r\n model = RandomizedSearchCV(model, param, cv=5, scoring=\"accuracy\",\r\n n_jobs=PROCESSORS, n_iter=rand_iter)\r\n else:\r\n logging.info(\"Not using grid search for %s\" % name)\r\n return model", "def get_model(model_name, type):\n\n MODEL_MAP = {\"torchvision\": ([\"*\"], load_torchvision),\n \"torchtransformers\": ([\"bert\", \"transformer_xl\"], load_torchtransformers),\n \"github\": ([\"deepspeech\"], load_deepspeech),\n \"custom\": ([\"simple_transformer\"], load_simple_transformer),\n \"op\": ([\"matmul1\", \"matmul2\", \"convolution1\", \"convolution2\"], load_single_operators)}\n\n if type not in MODEL_MAP:\n raise ValueError(f'{type} is not supported. Unknown type name.')\n\n model_map_item = MODEL_MAP[type]\n supported_model_names = model_map_item[0]\n\n if model_name not in supported_model_names and \\\n (len(supported_model_names) and supported_model_names[0] != \"*\"):\n raise ValueError(f'{model_name} is not supported. Unknown model name.')\n\n baseline_model, baseline_input = model_map_item[1](model_name)\n\n # Extract model to PyTorch graph\n if torch.cuda.is_available():\n if isinstance(baseline_model, torch.nn.Module):\n baseline_model = baseline_model.cuda()\n baseline_input = [inp.cuda() for inp in baseline_input]\n\n trace = torch.jit.trace(baseline_model, baseline_input)\n if isinstance(baseline_model, torch.nn.Module):\n trace = trace.float().eval()\n\n if torch.cuda.is_available():\n trace = trace.cuda()\n else:\n trace = trace.cpu()\n\n input_names = [\"input{}\".format(idx) for idx, inp in enumerate(baseline_input)]\n input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))\n return trace, input_shapes", "def get_model(name, **kwargs):\n models = {'resnet18_v1': resnet18_v1,\n 'resnet34_v1': resnet34_v1,\n 'resnet50_v1': resnet50_v1,\n 'resnet101_v1': resnet101_v1,\n 'resnet152_v1': resnet152_v1,\n 'resnet18_v1b': resnet18_v1b,\n 'resnet34_v1b': resnet34_v1b,\n 'resnet50_v1b': resnet50_v1b,\n 'resnet101_v1b': resnet101_v1b,\n 'resnet152_v1b': resnet152_v1b,\n 'resnet18_v2': resnet18_v2,\n 'resnet34_v2': resnet34_v2,\n 'resnet50_v2': resnet50_v2,\n 'resnet101_v2': resnet101_v2,\n 'resnet152_v2': resnet152_v2,\n 'resnext50_32x4d': resnext50_32x4d,\n 'resnext101_32x4d': resnext101_32x4d,\n 'resnext101_64x4d': resnext101_64x4d,\n 'se_resnext50_32x4d': se_resnext50_32x4d,\n 'se_resnext101_32x4d': se_resnext101_32x4d,\n 'se_resnext101_64x4d': se_resnext101_64x4d,\n }\n name = name.lower()\n if name not in models:\n raise ValueError(\n 'Model %s is not supported. Available options are\\n\\t%s' % (\n name, '\\n\\t'.join(sorted(models.keys()))))\n return models[name](**kwargs)", "def get_model(self) -> BaseLanguageModel:\n model = available_models[self.model_name.value]\n kwargs = model._lc_kwargs\n secrets = {secret: getattr(model, secret) for secret in model.lc_secrets.keys()}\n kwargs.update(secrets)\n\n model_kwargs = kwargs.get(\"model_kwargs\", {})\n for attr, value in self.dict().items():\n if attr == \"model_name\":\n # Skip model_name\n continue\n if hasattr(model, attr):\n # If the model has the attribute, add it to kwargs\n kwargs[attr] = value\n else:\n # Otherwise, add it to model_kwargs (necessary for chat models)\n model_kwargs[attr] = value\n kwargs[\"model_kwargs\"] = model_kwargs\n\n # Initialize a copy of the model using the config\n model = model.__class__(**kwargs)\n return model", "def load_trained_model(filename = 'pricing_model.p'):\n # with ZipFile(\"model.zip\",\"r\") as w:\n # w.extractall()\n \n with open(filename, 'rb') as model:\n pricingmodel = pickle.load(model)\n \n # pricingmodel.Model_made = tf.keras.models.load_model(\"Model_made.h5\")\n # pricingmodel.Model_claim = tf.keras.models.load_model(\"Model_claim.h5\")\n \n \n return pricingmodel", "def load_model():\n with open(MODEL_FILENAME, \"rb\") as file:\n model = pickle.load(file)\n return model", "def _prepare_model(model):\n\n # Ensure there is at least 1 load combination to solve if the user didn't define any\n if model.LoadCombos == {}:\n # Create and add a default load combination to the dictionary of load combinations\n model.LoadCombos['Combo 1'] = LoadCombo('Combo 1', factors={'Case 1':1.0})\n \n # Generate all meshes\n for mesh in model.Meshes.values():\n if mesh.is_generated == False:\n mesh.generate()\n\n # Activate all springs and members for all load combinations\n for spring in model.Springs.values():\n for combo_name in model.LoadCombos.keys():\n spring.active[combo_name] = True\n \n # Activate all physical members for all load combinations\n for phys_member in model.Members.values():\n for combo_name in model.LoadCombos.keys():\n phys_member.active[combo_name] = True\n \n # Assign an internal ID to all nodes and elements in the model. This number is different from the name used by the user to identify nodes and elements.\n _renumber(model)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def select(self):\n best_num_components = self.n_constant\n return self.base_model(best_num_components)", "def get_model(\n model_type: str,\n model_name: str = None,\n num_classes: t.Optional[int] = 1000,\n input_shape: t.Optional[t.Tuple] = (3, 224, 224),\n model: t.Optional[Module] = None,\n model_path: t.Optional[str] = None,\n classifier_params: t.Optional[t.Dict] = None,\n show: bool = False,\n) -> torch.nn.Module:\n if model_type == 'classifier':\n if isinstance(model_path, str) and model_path.lower() == 'imagenet':\n pretrained = True\n else:\n pretrained = False\n m_facade = models_facade.ModelFacade(task='classification')\n parameters = dict(requires_grad=True, pretrained=pretrained)\n model = m_facade.get_model_class(model_definition=model_name)(**parameters)\n\n # Patch last linear layer if needed\n if num_classes is not None and num_classes != 1000:\n _patch_last_linear(model=model, num_classes=num_classes)\n\n elif model_type == 'opti-classifier':\n m_facade = models_facade.ModelFacade(task='opti-classification')\n if model_path.lower() == 'imagenet':\n pretrained = model_path.lower()\n else:\n pretrained = None\n\n if classifier_params is not None:\n model_params = classifier_params\n else:\n model_params = dict(\n backbone=model_name,\n depth=5,\n num_classes=num_classes,\n num_input_channels=input_shape[0],\n num_last_filters=128,\n dropout=0.2,\n pretrained=pretrained,\n unfreeze_encoder=True,\n custom_enc_start=False,\n use_complex_final=False,\n conv_type='default',\n bn_type='default',\n activation_type='relu',\n depthwise=False,\n )\n logging.info(f\"\\tArgument classifier_params is empty, use default:\\n\\t{model_params}\")\n model = m_facade.get_model_class(model_definition='basic_classifier')(**model_params)\n elif model_type == 'custom':\n if model is None:\n raise NotImplementedError('Parameter model_mode is set to \"custom\", but model not specified.')\n # TODO: Add segmentation, detection, OCR tasks\n else:\n raise NotImplementedError(\n f\"Model type {model_type} not implemented.\" f\"Use one of ['classifier', 'opti-classifier', 'custom']\"\n )\n\n if isinstance(model_path, str) and model_path != 'ImageNet':\n if os.path.exists(model_path):\n try:\n model.load_state_dict(torch.load(model_path))\n except RuntimeError:\n model.load_state_dict(torch.load(model_path)['model_state_dict'])\n except Exception:\n raise RuntimeError(\n 'Please provide model weights either as the whole file, '\n 'or as a \\'model_state_dict\\' part of the file'\n )\n else:\n raise FileNotFoundError(f\"No such file or directory: {model_path}\")\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n if show:\n summary(model, input_size=input_shape)\n return model" ]
[ "0.66947436", "0.6254575", "0.6168464", "0.6149604", "0.61225873", "0.6105856", "0.6098028", "0.6089826", "0.6040291", "0.6034758", "0.60276425", "0.599368", "0.5964817", "0.58874965", "0.5885631", "0.58818895", "0.58405644", "0.58335435", "0.580409", "0.58008975", "0.5786825", "0.57797956", "0.5753536", "0.5749821", "0.57475245", "0.57456917", "0.57435966", "0.5738406", "0.5734517", "0.5733888", "0.5732601", "0.57317024", "0.5699894", "0.5695999", "0.5695441", "0.5680142", "0.56797415", "0.566548", "0.5665122", "0.566462", "0.56484735", "0.5617687", "0.56113034", "0.56097084", "0.5605945", "0.560582", "0.55885434", "0.5585719", "0.5585309", "0.55820763", "0.5580764", "0.55799097", "0.5573115", "0.5572468", "0.5570524", "0.5569361", "0.5569143", "0.55688506", "0.5566369", "0.5563039", "0.55582565", "0.55564123", "0.55545557", "0.55495393", "0.5543988", "0.5539857", "0.5535896", "0.55251086", "0.55236626", "0.551656", "0.55157954", "0.5511958", "0.5509081", "0.5505066", "0.54982424", "0.54938453", "0.54862076", "0.54818004", "0.54814", "0.547735", "0.54755217", "0.54735076", "0.547288", "0.5460123", "0.54501826", "0.54369795", "0.54335403", "0.54303354", "0.54271334", "0.54102856", "0.54077405", "0.54076076", "0.54076076", "0.54076076", "0.54076076", "0.54076076", "0.54076076", "0.54076076", "0.54076076", "0.54076076", "0.540581" ]
0.0
-1
r"""Chooses a BoTorch `MarginalLogLikelihood` class using the given `Model` class.
def choose_mll_class( model_class: Type[Model], state_dict: Optional[Dict[str, Tensor]] = None, refit: bool = True, ) -> Type[MarginalLogLikelihood]: # NOTE: We currently do not support `ModelListGP`. This code block will only # be relevant once we support `ModelListGP`. if (state_dict is None or refit) and issubclass(model_class, ModelListGP): return SumMarginalLogLikelihood return ExactMarginalLogLikelihood
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, likelihood, model):\n if not isinstance(likelihood, GaussianLikelihood):\n raise RuntimeError(\"Likelihood must be Gaussian for exact inference\")\n super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)", "def from_botorch(\n cls,\n model: Model,\n mll_class: Type[MarginalLogLikelihood] = ExactMarginalLogLikelihood,\n ) -> Surrogate:\n surrogate = cls(botorch_model_class=model.__class__, mll_class=mll_class)\n surrogate._model = model\n # Temporarily disallowing `update` for surrogates instantiated from\n # pre-made BoTorch `Model` instances to avoid reconstructing models\n # that were likely pre-constructed for a reason (e.g. if this setup\n # doesn't fully allow to constuct them).\n surrogate._constructed_manually = True\n return surrogate", "def set_model(self, likelihood_model_instance):\n pass", "def set_model(self, likelihood_model_instance):\n pass", "def define_model_log_prob(model, model_loss, x, y, params_flattened_list, params_shape_list, tau_list, tau_out, normalizing_const=1., predict=False, prior_scale = 1.0, device = 'cpu'):\n\n fmodel = util.make_functional(model)\n dist_list = []\n for tau in tau_list:\n dist_list.append(torch.distributions.Normal(torch.zeros_like(tau), tau**-0.5))\n\n def log_prob_func(params):\n # model.zero_grad()\n # params is flat\n # Below we update the network weights to be params\n params_unflattened = util.unflatten(model, params)\n\n i_prev = 0\n l_prior = torch.zeros_like( params[0], requires_grad=True) # Set l2_reg to be on the same device as params\n for weights, index, shape, dist in zip(model.parameters(), params_flattened_list, params_shape_list, dist_list):\n # weights.data = params[i_prev:index+i_prev].reshape(shape)\n w = params[i_prev:index+i_prev]\n l_prior = dist.log_prob(w).sum() + l_prior\n i_prev += index\n\n # Sample prior if no data\n if x is None:\n # print('hi')\n return l_prior/prior_scale\n\n x_device = x.to(device)\n y_device = y.to(device)\n\n\n output = fmodel(x_device, params=params_unflattened)\n\n if model_loss == 'binary_class_linear_output':\n crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device))\n elif model_loss == 'multi_class_linear_output':\n # crit = nn.MSELoss(reduction='mean')\n crit = nn.CrossEntropyLoss(reduction='sum')\n # crit = nn.BCEWithLogitsLoss(reduction='sum')\n ll = - tau_out *(crit(output, y_device.long().view(-1)))\n # ll = - tau_out *(torch.nn.functional.nll_loss(output, y.long().view(-1)))\n elif model_loss == 'multi_class_log_softmax_output':\n ll = - tau_out *(torch.nn.functional.nll_loss(output, y_device.long().view(-1)))\n\n elif model_loss == 'regression':\n # crit = nn.MSELoss(reduction='sum')\n ll = - 0.5 * tau_out * ((output - y_device) ** 2).sum(0)#sum(0)\n\n elif callable(model_loss):\n # Assume defined custom log-likelihood.\n ll = - model_loss(output, y_device).sum(0)\n else:\n raise NotImplementedError()\n\n if torch.cuda.is_available():\n del x_device, y_device\n torch.cuda.empty_cache()\n\n if predict:\n return (ll + l_prior/prior_scale), output\n else:\n return (ll + l_prior/prior_scale)\n\n return log_prob_func", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def make_mlp_likelihood(model=None, model_config=None, wiener_params=None, **kwargs):\n\n def random(\n self,\n keep_negative_responses=True,\n add_model=False,\n add_model_parameters=False,\n add_outliers=False,\n keep_subj_idx=False,\n ):\n \"\"\"\n Generate random samples from a given model (the dataset matches the size of the respective observated dataset supplied as an attribute of self).\n \"\"\"\n\n # This can be simplified so that we pass parameters directly to the simulator ...\n theta = np.array(model_config[\"params_default\"], dtype=np.float32)\n keys_tmp = self.parents.value.keys()\n cnt = 0\n\n for param in model_config[\"params\"]:\n if param in keys_tmp:\n theta[cnt] = np.array(self.parents.value[param]).astype(np.float32)\n cnt += 1\n\n sim_out = simulator(theta=theta, model=model, n_samples=self.shape[0], max_t=20)\n\n # Add outliers:\n if add_outliers:\n if self.parents.value[\"p_outlier\"] > 0.0:\n sim_out = hddm_dataset_generators._add_outliers(\n sim_out=sim_out,\n p_outlier=self.parents.value[\"p_outlier\"],\n max_rt_outlier=1 / wiener_params[\"w_outlier\"],\n )\n\n sim_out_proc = hddm_preprocess(\n sim_out,\n keep_negative_responses=keep_negative_responses,\n keep_subj_idx=keep_subj_idx,\n add_model_parameters=add_model_parameters,\n )\n\n if add_model:\n sim_out_proc[\"model\"] = model\n\n return sim_out_proc\n\n def pdf(self, x):\n # Check if model supplied has only two choice options\n # If yes --> check if two-dimensional input (rt, response) or one-dimensional input (rt) --> processing depends on it\n # If not --> input x has to be two dimensional (rt, response) becasuse we can't deduce response from rt\n x = np.array(x, dtype=np.float32)\n\n if len(x.shape) == 1 or x.shape[1] == 1:\n rt = x\n response = rt / np.abs(rt)\n rt = np.abs(rt)\n elif x.shape[1] == 2:\n rt = x[:, 0]\n response = x[:, 1]\n\n params = np.array(\n [self.parents[param] for param in model_config[\"params\"]]\n ).astype(np.float32)\n\n return hddm.wfpt.wiener_like_nn_mlp_pdf(\n rt,\n response,\n params,\n p_outlier=self.parents.value[\"p_outlier\"],\n w_outlier=wiener_params[\"w_outlier\"],\n network=kwargs[\"network\"],\n )\n\n def cdf(self, x):\n # TODO: Implement the CDF method for neural networks\n return \"Not yet implemented\"\n\n def make_likelihood():\n likelihood_str = make_likelihood_str_mlp(\n config=model_config, wiener_params=wiener_params\n )\n exec(likelihood_str)\n my_fun = locals()[\"custom_likelihood\"]\n return my_fun\n\n # TODO: Allow for rt's of -999 in LAN likelihoods\n def make_likelihood_missing_data():\n return\n\n likelihood_ = make_likelihood()\n\n wfpt_nn = stochastic_from_dist(\"Wienernn_\" + model, partial(likelihood_, **kwargs))\n\n wfpt_nn.pdf = pdf\n wfpt_nn.cdf_vec = None # AF TODO: Implement this for neural nets (not a big deal actually but not yet sure where this is ever used finally)\n wfpt_nn.cdf = cdf\n wfpt_nn.random = random\n return wfpt_nn", "def __init__(self, model):\n TreeLikelihoodBase.__init__(self, model)", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n if self.BICscore is None:\n BIC = 0\n for i, model in enumerate(self.models):\n n = model.n \n k = model.m.num_params\n L = model.m.log_likelihood()\n BIC += L - k/2*np.log(n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))", "def predict_log_likelihood_ratio(self, X):\n class_probs = np.maximum(np.squeeze(self.model.predict(X)[0]), self.class_min)\n return np.log(class_probs / (1 - class_probs))", "def compute_log_marginal_likelihood(\n K_i: torch.Tensor,\n logDetK: torch.Tensor,\n y: torch.Tensor,\n normalize: bool = True,\n log_prior_dist=None,\n):\n lml = (\n -0.5 * y.t() @ K_i @ y\n + 0.5 * logDetK\n - y.shape[0]\n / 2.0\n * torch.log(\n 2\n * torch.tensor(\n np.pi,\n )\n )\n )\n if log_prior_dist is not None:\n lml -= log_prior_dist\n return lml / y.shape[0] if normalize else lml", "def get_log_marginal_likelihood(self, mode='BIC'):\n if mode == 'BIC':\n if not self.isOptimized:\n print('Parameters have not been optimized; training now')\n self.train()\n \n if self.BICscore is None:\n k = self.m.num_params\n L = self.m.log_likelihood()\n BIC = L - k/2*np.log(self.n)\n self.BICscore = BIC\n return self.BICscore\n elif mode in ['laplace', 'Laplace']:\n raise NotImplementedError('Laplace approximation is not yet implemented')\n elif mode == 'AIS':\n raise NotImplementedError('Annealed importance sampling is not yet implemented')\n else:\n raise NotImplementedError('Unrecognized marginal likelihood approximation {:s}'.format(mode))", "def get_log_likelihood(response_probability, response):\n pass", "def ICA_log_likelihood(X, model):\n\n # TODO: YOUR CODE HERE", "def _build(self,\n model_type: str,\n **kwargs) -> Predictor:\n if model_type == 'classifier':\n modelcls = sklearn.gaussian_process.GaussianProcessClassifier\n elif model_type == 'regressor':\n modelcls = sklearn.gaussian_process.GaussianProcessRegressor\n else:\n raise ValueError(\n '`model_type` should be \"classifier\" or \"regressor\"')\n model = modelcls(**kwargs)\n return model", "def _choose_model(self, model_str):\n if model_str == 'lg':\n return(LogisticRegression())\n elif model_str == 'rf':\n return(RandomForestClassifier())\n elif model_str == 'svm':\n # return SVC(C=1, kernel='linear') # linear boundary\n return SVC(C=1, kernel='poly', degree=2) # non-linear boundary\n # return SVC(C=1, kernel='rbf')\n # return SVC(C=1, kernel='sigmoid') # binary classification", "def GPy_log_marginal_likelihood(X, Y, keep_model=True, plot=False, variance=1., lengthscale=3., input_dim=1, length=10., view_ratio=1.1):\r\n kernel= GPy.kern.RBF(input_dim=input_dim, variance=variance, lengthscale=lengthscale)\r\n gp = GPy.models.GPRegression(X, Y, kernel)\r\n # print(gp)\r\n # print(gp.rbf.lengthscale.values)\r\n if plot:\r\n gp.plot(plot_limits=np.array([0., view_ratio*length]))\r\n if keep_model:\r\n return gp.log_likelihood(), gp\r\n elif keep_model==False:\r\n return gp.log_likelihood()\r\n else:\r\n return print(\"keep_model must be True or False.\")", "def predict_log_likelihood_ratio(self, X):\n Xs = self.scaler.transform(X)\n class_probs = np.maximum(self.model.predict_proba(Xs)[:, 1], self.class_min)\n return np.log(class_probs / (1 - class_probs))", "def initialize_model(model_type, **kwargs):\n try:\n model_class = MODEL_DICT[model_type]\n except KeyError:\n raise RuntimeError(f\"Cannot find model class for {model_type}. Pick one of {list(MODEL_DICT.keys())}\")\n\n return model_class(**kwargs)", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def model_class(self):\n model_name = self.model_name()\n\n if not model_name:\n return None\n\n try:\n (app, mdl) = model_name.strip().split('.')\n except ValueError:\n logger.error(f\"Invalid 'model' parameter for setting {self.key} : '{model_name}'\")\n return None\n\n app_models = apps.all_models.get(app, None)\n\n if app_models is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no app named '{app}'\")\n return None\n\n model = app_models.get(mdl, None)\n\n if model is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no model named '{mdl}'\")\n return None\n\n # Looks like we have found a model!\n return model", "def MVN_log_likelihood(X, model):\n D, M = X.shape\n X_normalized = normalize_log_likelihoods(X.copy())\n mvn = multivariate_normal(mean=model.mean, cov=model.cov)\n return mvn.logpdf(X_normalized.T).sum()\n # log_2pi = D * np.log(2 * np.pi)\n # log_det = np.log(np.linalg.det(model.cov))\n # residuals = calc_residuals(X_normalized, model.mean, \"minus\")\n # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals)\n # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()", "def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact", "def MH_step(log_like, log_prior, model_func, prop_params, curr_params,\\\n curr_like, curr_prior, max_like, maxL_params):\n # proposed model:\n prop_model = model_func(prop_params)\n prop_like = log_like(prop_model)\n prop_prior = log_prior(prop_params)\n\n # posterior:\n post_old = curr_like + curr_prior\n post_new = prop_like + prop_prior\n \n # acceptance testing:\n a = np.exp(post_new - post_old)\n draw = np.random.uniform(0, 1)\n \n if (a > draw) and (a < np.inf):\n accept = True\n curr_params = prop_params\n #print(curr_like, max_like)\n if prop_like > max_like:\n max_like = prop_like\n maxL_params = curr_params\n else:\n accept = False\n curr_params = curr_params\n \n return(accept, curr_params, maxL_params, max_like)", "def __init__(self, reg_penalty='l2', reg_inv=1.0, k_fold=5, random_state=0):\n print(\"Initialize model Logistic Regression\")\n self.reg_penalty = reg_penalty\n self.reg_inv = reg_inv\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.linear_model.LogisticRegression(penalty=self.reg_penalty,\n C=self.reg_inv,\n max_iter=1000, \n random_state=self.random_state)", "def log_likelihood(self, data, reward_model, bias_params):", "def get_model_class(model_name, task_name):\n if task_name == 'rocstories':\n return OpenAIGPTDoubleHeadsModel if model_name == 'openai-gpt' else GPT2DoubleHeadsModel\n else:\n return OpenAIGPTLMHeadModel if model_name == 'openai-gpt' else GPT2LMHeadModel", "def __init__(self, model: MT):\n self.model: Final[MT] = model", "def multinomial_class(\n distribution_or_probs: Union[tfd.Distribution, jnp.DeviceArray]\n) -> jnp.DeviceArray:\n if isinstance(distribution_or_probs, tfd.Distribution):\n return jnp.argmax(distribution_or_probs.logits_parameter(), axis=1)\n return jnp.argmax(distribution_or_probs, axis=1)", "def mol_distribution(y, log_scale_min=None):\n if log_scale_min is None:\n log_scale_min = torch.log(torch.tensor(1e-14)).item() #float(np.log(1e-14))\n assert y.size(1) % 3 == 0\n nr_mix = y.size(1) // 3\n\n # B x T x C\n y = y.transpose(1, 2)\n logit_probs = y[:, :, :nr_mix]\n\n # sample mixture indicator from softmax\n temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)\n temp = logit_probs.data - torch.log(- torch.log(temp))\n _, argmax = temp.max(dim=-1)\n\n # (B, T) -> (B, T, nr_mix)\n one_hot = F.one_hot(argmax, nr_mix).float()\n # select logistic parameters\n means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1)\n log_scales = torch.clamp(torch.sum(\n y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min)\n # sample from logistic & clip to interval\n # we don't actually round to the nearest 8bit value when sampling\n u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)\n x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))\n\n x = torch.clamp(torch.clamp(x, min=-1.), max=1.)\n\n return x", "def figure_of_merit_from(self, parameter_list):\r\n return self.log_likelihood_from(parameter_list=parameter_list)", "def multi_class5_classification_model_logits() -> tf.keras.Model:\n\n # Build model\n model = tf.keras.Sequential(tf.keras.layers.Dense(5, activation=None))\n model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True))\n\n return model", "def maximize_loglik(model_params: Union[CupidParams, CupidParamsCSHeteroxy, CupidParamsFcmnl],\n x_init: np.ndarray,\n lower: Optional[np.ndarray] = None,\n upper: Optional[np.ndarray] = None,\n checkgrad: Optional[bool] = False,\n verbose: Optional[bool] = False,\n fixed_vars: Optional[List[int]] = None,\n fixed_vals: Optional[List[float]] = None,\n options: Optional[Dict] = {'iprint': 1}) -> Tuple[float, np.ndarray, int]:\n n_params = x_init.size\n try:\n kc = KN_new()\n except:\n bs_error_abort(\"Failed to find a valid Knitro license.\")\n\n KN_add_vars(kc, n_params)\n\n # bounds, if any\n if lower is None:\n # not necessary since infinite\n KN_set_var_lobnds(kc, xLoBnds=np.full(n_params, -KN_INFINITY))\n else:\n KN_set_var_lobnds(kc, xLoBnds=lower)\n if upper is None:\n KN_set_var_upbnds(kc, xUpBnds=np.full(n_params, KN_INFINITY))\n else:\n KN_set_var_upbnds(kc, xUpBnds=upper)\n\n # Define an initial point. If not set, Knitro will generate one.\n KN_set_var_primal_init_values(kc, xInitVals=x_init)\n\n if fixed_vars is not None:\n assert fixed_vals is not None\n KN_set_var_fxbnds(kc, fixed_vars, fixed_vals)\n\n cb = KN_add_eval_callback(kc, evalObj=True, funcCallback=log_likelihood)\n\n KN_set_cb_user_params(kc, cb, model_params)\n\n KN_set_cb_grad(kc, cb, objGradIndexVars=KN_DENSE,\n gradCallback=grad_log_likelihood)\n\n KN_set_int_param(kc, KN_PARAM_OUTLEV, KN_OUTLEV_ALL)\n\n if checkgrad:\n # Perform a derivative check.\n KN_set_int_param(kc, KN_PARAM_DERIVCHECK, KN_DERIVCHECK_ALL)\n\n # Solve the problem.\n nStatus = KN_solve(kc)\n\n loglik_val, estimates = print_optimization_results(kc)\n\n print_stars()\n print(f\" Value of log-likelihood: {loglik_val: > 8.3f}\\n\")\n print()\n\n return loglik_val, np.array(estimates), nStatus", "def build_clf_model(arch=arch_lmd, scale_weights=1.0):\n clf = RhythmLikelihood(graph=Network(arch))\n vs = clf.graph.param_values()\n\n for n in vs:\n for k in vs[n]:\n if k.count('W'):\n vs[n][k] *= scale_weights\n\n clf.graph.set_param_values(vs)\n\n return clf", "def _create_log_likelihood(self, individual):\n # Get individuals data\n times = []\n observations = []\n mask = self._data[self._id_key] == individual\n data = self._data[mask][\n [self._time_key, self._obs_key, self._value_key]]\n for output in self._mechanistic_model.outputs():\n # Mask data for observable\n observable = self._output_observable_dict[output]\n mask = data[self._obs_key] == observable\n temp_df = data[mask]\n\n # Filter times and observations for non-NaN entries\n mask = temp_df[self._value_key].notnull()\n temp_df = temp_df[[self._time_key, self._value_key]][mask]\n mask = temp_df[self._time_key].notnull()\n temp_df = temp_df[mask]\n\n # Collect data for output\n times.append(temp_df[self._time_key].to_numpy())\n observations.append(temp_df[self._value_key].to_numpy())\n\n # # Count outputs that were measured\n # # TODO: copy mechanistic model and update model outputs.\n # # (Useful for e.g. control group and dose group training)\n # n_measured_outputs = 0\n # for output_measurements in observations:\n # if len(output_measurements) > 0:\n # n_measured_outputs += 1\n\n # Create log-likelihood and set ID to individual\n log_likelihood = chi.LogLikelihood(\n self._mechanistic_model, self._error_models, observations, times)\n log_likelihood.set_id(individual)\n\n return log_likelihood", "def _object_func_marginals_c_log(log_params, *args, **kwargs):\n\t return _object_func_marginals_c(numpy.exp(log_params), *args, **kwargs)", "def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)", "def log_marg_likelihood(self):\n self.A = np.linalg.inv(self.Sn)\n term1 = self.t - [email protected]\n self.Evidence_mN = (self.beta/2)*np.linalg.norm(term1)+ (self.alpha/2)*[email protected]\n A_abs = np.linalg.eigvals(self.A)\n A_abs = np.prod(A_abs)\n\n self.marg_lik = ((self.p)/2)*np.log(self.alpha) + (self.n/2)*np.log(self.beta) - self.Evidence_mN - (1/2)*np.log(A_abs) - (self.n/2)*np.log(2*np.pi)\n\n return self.marg_lik", "def get_classifier(name, model, param, rand_iter=-1):\r\n assert isinstance(name, str)\r\n if param: # Do grid search only if parameter list is not empty\r\n N_p = np.prod([len(l) for l in param.values()])\r\n if (N_p <= rand_iter) or rand_iter<=0:\r\n logging.info(\"Using grid search for %s\" % name)\r\n model = GridSearchCV(model, param, cv=5, scoring=\"accuracy\",\r\n n_jobs=PROCESSORS)\r\n else:\r\n logging.info(\"Using random search for %s\" % name)\r\n model = RandomizedSearchCV(model, param, cv=5, scoring=\"accuracy\",\r\n n_jobs=PROCESSORS, n_iter=rand_iter)\r\n else:\r\n logging.info(\"Not using grid search for %s\" % name)\r\n return model", "def get_log_likelihood(response_probability, observed_response):\n \n return np.log(response_probability[observed_response])", "def BIC(model, logL, nPeople):\n\t\tph, pvh = model\n\t\tnClusters, nQuestions, nAnswers = pvh.shape\n\n\t\tnParams = (nClusters - 1) + nClusters * nQuestions *(nAnswers - 1)\n\t\tbic = -2. * logL + nParams * np.log(nPeople)\n\t\treturn bic", "def _object_func_marginals_log(log_params, *args, **kwargs):\n return _object_func_marginals(numpy.exp(log_params), *args, **kwargs)", "def FIM_MonteCarlo1(representation, loader, model,\n variant='classif_logsoftmax'):\n\n if variant == 'classif_logsoftmax':\n\n def loss(input, target):\n log_softmax = F.log_softmax(model(input), dim=1)\n probabilities = torch.exp(log_softmax)\n sampled_targets = torch.multinomial(probabilities, 1)\n return torch.gather(log_softmax, 1, sampled_targets)\n\n generator = M2Gradients(model=model,\n dataloader=loader,\n loss_function=loss)\n return representation(generator)\n else:\n raise NotImplementedError", "def predict_log_likelihood_ratio(self, X):\n raise NotImplementedError", "def marginal_log_likelihood(self, theta):\n\n # Theta is on a log scale\n alpha = np.exp(theta[0])\n beta = 1 / np.exp(theta[1])\n\n D = self.X_transformed.shape[1]\n N = self.X_transformed.shape[0]\n\n A = beta * np.dot(self.X_transformed.T, self.X_transformed)\n A += np.eye(self.X_transformed.shape[1]) * alpha\n try:\n A_inv = np.linalg.inv(A)\n except np.linalg.linalg.LinAlgError:\n A_inv = np.linalg.inv(A + np.random.rand(A.shape[0], A.shape[1]) * 1e-8)\n \n\n m = beta * np.dot(A_inv, self.X_transformed.T)\n m = np.dot(m, self.y)\n\n mll = D / 2 * np.log(alpha)\n mll += N / 2 * np.log(beta)\n mll -= N / 2 * np.log(2 * np.pi)\n mll -= beta / 2. * np.linalg.norm(self.y - np.dot(self.X_transformed, m), 2)\n mll -= alpha / 2. * np.dot(m.T, m)\n mll -= 0.5 * np.log(np.linalg.det(A))\n\n if self.prior is not None:\n mll += self.prior.lnprob(theta)\n\n return mll", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def __findBestLogProbability(self):\n best_model = None\n highest_log_probability = -sys.maxsize# (np.finfo(float).eps)\n\n # Find the highest model\n for item in self.data_array:\n if item[1] > highest_log_probability:\n best_model = item\n highest_log_probability = item[1]\n\n return best_model", "def set_marginals(self, bw_method=None):\n \n # Log density\n def kde(sample):\n k = gaussian_kde(np.transpose(sample), bw_method=bw_method)\n return lambda X: k.logpdf(np.array(X))[0]\n \n for para in [\"theta\", \"q\"]:\n for typ in [\"prior\", \"post\"]:\n sample = getattr(self, typ)[para][\"sample\"]\n \n if sample is None:\n getattr(self, typ)[para][\"marginal\"] = [\n None\n for I in util.marg_1_2]\n continue\n \n getattr(self, typ)[para][\"marginal\"] = [\n kde(sample[:, I])\n for I in util.marg_1_2]\n \n if self.hyperpara[0] == 3:\n if self.hyperpara[1] == \"i\":\n qu_diff_dist = [\n TruncatedDistribution(\n Normal(self.para[i, 0], self.para[i, 1]),\n 0.0,\n TruncatedDistribution.LOWER)\n for i in range(3)]\n qu_dist = [\n qu_diff_dist[0],\n qu_diff_dist[0] + qu_diff_dist[1],\n qu_diff_dist[0] + qu_diff_dist[1] + qu_diff_dist[2]]\n \n self.prior[\"q\"][\"marginal\"][:3] = [\n qu_dist[i].computeLogPDF\n for i in range(3)]\n elif self.hyperpara[1] == \"me\":\n self.prior[\"q\"][\"marginal\"][:3] = [\n TruncatedDistribution(\n Normal(self.para[i, 0], self.para[i, 1]),\n 0.0,\n TruncatedDistribution.LOWER).computeLogPDF\n for i in range(3)]", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def get_class(klass, kind):\n return getattr(sys.modules['model'], kind, None)", "def log_likelihood(param, logP, choiceidx, piL, yield_probs=False):\n beta, bg, bc, bh = param\n b = (0., bg, bc, bh)\n exponent = beta * (logP + b)\n l1 = np.exp(exponent)[np.arange(len(choiceidx)),choiceidx] / np.exp(exponent).sum(-1)\n l0 = 1./4.\n p = piL * l0 + (1-piL) * l1\n if yield_probs:\n if logP.ndim == 1:\n l1 = np.exp(exponent) / np.exp(exponent).sum(-1)\n elif logP.ndim == 2:\n l1 = np.exp(exponent) / np.exp(exponent).sum(-1)[:,None]\n probs = piL * l0 + (1-piL) * l1\n return np.log(p).sum(), probs\n else:\n return np.log(p).sum()", "def get_model(params):\r\n module_name, class_name = params.model.name.rsplit('.', 1)\r\n i = importlib.import_module(module_name)\r\n return getattr(i, class_name)", "def choose_class(self, *args, **kwargs):", "def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)", "def get_model():\n global model_class\n if model_class is None:\n from fluent_comments.models import FluentComment\n\n # Our proxy model that performs select_related('user') for the comments\n model_class = FluentComment\n\n return model_class", "def loglikelihood(model, data, q):\n\tph, pvh = model\n\tnPeople, nQuestions = data.shape\n\tlogL = 0\n\tfor i in range(nPeople):\n\t\tanswers = data[i,:]\n\t\tfor k in range(nQuestions):\n\t\t\tlogL += np.log(sum(pvh[:, k, int(answers[k] - 1)] * q[i,:].T))\n\treturn logL", "def sampled_softmax_logprobs(self, logit_target_in_model, logit_noise_in_model, logit_noise_in_noise, logit_target_in_noise):\n logits = torch.cat([logit_target_in_model.unsqueeze(2), logit_noise_in_model], dim=2)\n q_logits = torch.cat([logit_target_in_noise.unsqueeze(2), logit_noise_in_noise], dim=2)\n # subtract Q for correction of biased sampling\n logits = logits - q_logits\n logproba = torch.nn.LogSoftmax(dim=-1)(logits)\n \n labels = torch.zeros_like(logits.narrow(2, 0, 1)).squeeze(2).long()\n \n logprobs = -torch.nn.NLLLoss(reduction='none')(logproba.view(-1, logproba.size(-1)), labels.view(-1)).view_as(labels)\n logprobs = torch.sum(logprobs, dim=1)\n \n return logprobs", "def get_log_prob(self, X, target=None):\n\n # We don't support the target argument for now.\n assert target is None\n\n batch_size, seq_len, dim = X.size()\n X = X.contiguous().view(-1, dim)\n\n head_y = self.head(X)\n # log_probs = head_y.new_zeros(X.size(0), self.vocab_size)\n\n head_size = self.cutoff[0] + len(self.tail)\n head_log_probs = self.lsm(head_y)\n log_probs_list = [head_log_probs[:, :self.cutoff[0]]]\n\n if len(self.tail) > 0:\n tail_priors = head_log_probs[:, self.cutoff[0]:head_size]\n\n for i in range(len(self.tail)):\n tail_i = self.lsm(self.tail[i](X))\n tail_i = tail_i + tail_priors[:, i, None]\n log_probs_list.append(tail_i)\n\n log_probs = torch.cat(log_probs_list, dim=1)\n log_probs = log_probs.view(batch_size, seq_len, self.vocab_size)\n return log_probs", "def log_likelihood_function(self, instance):\r\n\r\n xvalues = np.arange(self.data.shape[0])\r\n model_data = instance.profile_from_xvalues(xvalues=xvalues)\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n log_likelihood = -0.5 * sum(chi_squared_map)\r\n\r\n return log_likelihood", "def nce_logprobs(self, logit_target_in_model, logit_noise_in_model, logit_noise_in_noise, logit_target_in_noise):\n\n # NOTE: prob <= 1 is not guaranteed\n logit_model = torch.cat([logit_target_in_model.unsqueeze(2), logit_noise_in_model], dim=2)\n logit_noise = torch.cat([logit_target_in_noise.unsqueeze(2), logit_noise_in_noise], dim=2)\n\n # predicted probability of the word comes from true data distribution\n # The posterior can be computed as following\n # p_true = logit_model.exp() / (logit_model.exp() + self.noise_ratio * logit_noise.exp())\n # For numeric stability we compute the logits of true label and\n # directly use bce_with_logits.\n # Ref https://pytorch.org/docs/stable/nn.html?highlight=bce#torch.nn.BCEWithLogitsLoss\n #logit_true = logit_model - logit_noise - math.log(self.noise_ratio)\n p_true = logit_model.exp() / (logit_model.exp() + self.noise_ratio * logit_noise.exp())\n \n label = torch.zeros_like(logit_model)\n label[:, :, 0] = 1\n\n #logprobs = -self.bce_with_logits(logit_true, label)\n logprobs = -nn.BCELoss(reduction='none')(p_true, label)\n logprobs = torch.sum(logprobs.view(logprobs.size(0), -1), dim=1)\n \n return logprobs", "def brute_force_marginal_likelihood(self, X, n_samples=10000, gen_seed=0):\n check_data_type_column_data(X)\n\n if type(n_samples) is not int:\n raise TypeError(\"n_samples should be an int\")\n if n_samples <= 0:\n raise ValueError(\"n_samples should be greater than 0\")\n if type(gen_seed) is not int:\n raise TypeError(\"gen_seed should be an int\")\n\n rng = random.Random(gen_seed)\n log_likelihoods = [0]*n_samples\n for i in range(n_samples):\n params = self.sample_parameters_given_hyper(gen_seed=next_seed(rng))\n log_likelihoods[i] = self.log_likelihood(X, params)\n\n log_marginal_likelihood = logmeanexp(log_likelihoods)\n\n return log_marginal_likelihood", "def choose_model(\n name: str,\n log_dir: str = \"logs\",\n n_estimators: int = 100,\n max_depth: int = 6,\n xgb_lr: float = 0.3,\n gamma_xgb: float = 0.0,\n min_child_weight: float = 1.0,\n subsample: float = 1.0,\n colsample_bytree: float = 1.0,\n reg_lambda: float = 1.0,\n C: float = 1.0,\n nn_wt: float = 1.0,\n epochs: int = 50,\n batch_size: int = 64,\n nn_lr: float = 1e-3,\n lr_step: int = 10000,\n lr_decay: float = 0.75,\n weight_decay: float = 1e-3,\n balance_weights: bool = True,\n **kwargs,\n) -> BaseClassifier:\n xgb_model = XGBClassifier(\n n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=xgb_lr,\n gamma=gamma_xgb,\n min_child_weight=min_child_weight,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n reg_lambda=reg_lambda,\n random_state=0,\n )\n svm_model = SVC(C=C, class_weight=\"balanced\", random_state=0)\n random_forest_classifier = RandomForestClassifier()\n\n nn_model = NN(\n epochs=epochs,\n batch_size=batch_size,\n log_dir=log_dir,\n learning_rate=nn_lr,\n lr_step=lr_step,\n lr_decay=lr_decay,\n weight_decay=weight_decay,\n balance_weights=balance_weights,\n random_state=0,\n )\n\n if name == \"xgb\":\n return xgb_model\n elif name == \"svm\":\n return svm_model\n elif name == \"ensemble\":\n model_wt = np.array([1.0, nn_wt])\n model_wt /= sum(model_wt)\n return VotingClassifier(\n [(\"xgb\", xgb_model), (\"nn\", nn_model)], voting=\"soft\", weights=model_wt\n )\n elif name == \"forest\":\n return random_forest_classifier\n elif name == \"nn\":\n return nn_model\n else:\n raise ValueError(f\"Invalid model name: {name}\")", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def make_mlp_likelihood_reg(\n model=None, model_config=None, wiener_params=None, **kwargs\n):\n\n # Need to rewrite these random parts !\n def random(\n self,\n keep_negative_responses=True,\n add_model=False,\n add_model_parameters=False,\n add_outliers=False,\n keep_subj_idx=False,\n ):\n \"\"\"\n Function to sample from a regressor based likelihood. Conditions on the covariates.\n \"\"\"\n param_dict = deepcopy(self.parents.value)\n del param_dict[\"reg_outcomes\"]\n\n param_data = np.zeros(\n (self.value.shape[0], len(model_config[\"params\"])), dtype=np.float32\n )\n\n cnt = 0\n for tmp_str in model_config[\"params\"]:\n if tmp_str in self.parents[\"reg_outcomes\"]:\n # param_data[:, cnt] = param_dict[tmp_str].values\n param_data[:, cnt] = param_dict[tmp_str].loc[self.value.index].values\n\n for linked_indirect_regressor in param_links[tmp_str]:\n # param_data[:, cnt] = (\n # param_data[:, cnt]\n # + param_dict[linked_indirect_regressor].values\n # )\n\n param_data[:, cnt] = (\n param_data[:, cnt]\n + param_dict[linked_indirect_regressor]\n .loc[self.value.index]\n .values\n )\n\n for linked_indirect_beta in param_links_betas[tmp_str]:\n param_data[:, cnt] = (\n param_data[:, cnt]\n + param_dict[linked_indirect_beta[0]]\n * self.value[linked_indirect_beta[1]]\n )\n else:\n param_data[:, cnt] = param_dict[tmp_str]\n cnt += 1\n\n sim_out = simulator(\n theta=param_data, model=model, n_samples=1, max_t=20 # n_trials = size,\n )\n\n # Add outliers:\n if add_outliers:\n if self.parents.value[\"p_outlier\"] > 0.0:\n sim_out = hddm_dataset_generators._add_outliers(\n sim_out=sim_out,\n p_outlier=self.parents.value[\"p_outlier\"],\n max_rt_outlier=1 / wiener_params[\"w_outlier\"],\n )\n\n sim_out_proc = hddm_preprocess(\n sim_out,\n keep_negative_responses=keep_negative_responses,\n add_model_parameters=add_model_parameters,\n keep_subj_idx=keep_subj_idx,\n )\n\n if add_model:\n sim_out_proc[\"model\"] = model\n\n return sim_out_proc\n\n def pdf(self, x):\n return \"Not yet implemented\"\n\n def cdf(self, x):\n # TODO: Implement the CDF method for neural networks\n return \"Not yet implemented\"\n\n def make_likelihood():\n if indirect_betas_present or indirect_regressors_present:\n likelihood_str = make_reg_likelihood_str_mlp(\n config=model_config,\n wiener_params=wiener_params,\n param_links=param_links,\n param_links_betas=param_links_betas,\n )\n else:\n likelihood_str = make_reg_likelihood_str_mlp_basic(\n config=model_config,\n wiener_params=wiener_params,\n )\n\n exec(likelihood_str)\n my_fun = locals()[\"custom_likelihood_reg\"]\n return my_fun\n\n # TODO: Allow for missing data in LAN likelihoods\n def make_likelihood_missing_data():\n return\n\n param_links, indirect_regressors_present = __prepare_indirect_regressors(\n model_config=model_config\n )\n param_links_betas, indirect_betas_present = __prepare_indirect_betas(\n model_config=model_config\n )\n\n likelihood_ = make_likelihood()\n stoch = stochastic_from_dist(\"wfpt_reg\", partial(likelihood_, **kwargs))\n stoch.pdf = pdf\n stoch.cdf = cdf\n stoch.random = random\n return stoch", "def classify(im, model):\n\n classe = model.predict(im)\n classe = classe.argmax(axis=-1) # taking index of the maximum %\n return classe[0]", "def log_likelihood(X, Z, variable_types):\n\tk = Z['pi_unconstrained'].shape[1]+1 # the number of mixture components\n\t## We gather the log probabilities of each indiv in batch for each mixture component into\n\t## a matrix of size (B x k), where B is the batch size.\n\tlogps = torch.zeros([len(X), k])\n\t## First insert the mixture weight contribution to the array\n\tlogps += logsoftmax(Z['pi_unconstrained'], dim=-1)\n\t## Next loop over the features and sum the contributions to logps\n\tfor i, (key, z) in enumerate(Z.items()):\n\t\tif key not in ['pi_unconstrained']:\n\t\t\tdata = torch.Tensor(X[key].values).unsqueeze(-1)\n\t\t\tdist = variable_types[key]\n\t\t\tif dist == 'Categorical':\n\t\t\t\talpha = softmax(z, dim=-1, additional=-50.)\n\t\t\t\tlogps += Categorical(probs = alpha).log_prob(data)\n\t\t\telif dist == 'Bernoulli':\n\t\t\t\ttheta = z\n\t\t\t\tlogps += Bernoulli(logits = theta).log_prob(data)\n\t\t\telif dist == 'Beta':\n\t\t\t\talpha, beta = torch.exp(z).transpose(0,1)\n\t\t\t\tlogps += Beta(alpha, beta).log_prob(data)\n\t## Compute logsumexp over the mixture components and return the sum over data elements.\n\tlogp = torch.logsumexp(logps, dim=-1)\n\treturn logp.sum()", "def log_likelihood(self):\r\n return (-0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) -\r\n 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z)", "def __init__(self, classific_method=\"LogisticRegression\"):\n\t\tself.classific_method = classific_method", "def best_other_class(logits, exclude):\n y_onehot = torch.zeros_like(logits)\n y_onehot.scatter_(1, exclude, 1)\n # make logits that we want to exclude a large negative number\n other_logits = logits - y_onehot * 1e9\n return other_logits.max(1)[0]", "def likelihood(mean, cov, z, log_hyperparam=True, profile_hyperparam='var'):\n\n # Set likelihood method depending on the type of profile.\n if profile_hyperparam == 'none':\n return FullLikelihood(mean, cov, z, log_hyperparam)\n elif profile_hyperparam == 'var':\n return ProfileLikelihood(mean, cov, z, log_hyperparam)\n elif profile_hyperparam == 'var_approx':\n return ProfileLikelihoodApprox(mean, cov, z)\n elif profile_hyperparam == 'var_noise':\n return DoubleProfileLikelihood(mean, cov, z, log_hyperparam)\n else:\n raise ValueError('\"profile_hyperparam\" can be one of \"none\", ' +\n '\"var\", or \"var_noise\".')", "def max_log_likelihood_tracer(self) -> Tracer:\r\n return self.analysis.tracer_via_instance_from(instance=self.instance)", "def get_model_class(class_name, kwargs={}):\n # , Perceptron, PassiveAggressiveRegressor\n # , NuSVR, LinearSVR\n\n if class_name == 'LinearRegression':\n from sklearn.linear_model import LinearRegression\n return LinearRegression(**kwargs)\n\n if class_name == 'SGDRegressor':\n from sklearn.linear_model import SGDRegressor\n return SGDRegressor(**kwargs)\n\n if class_name == 'SVR':\n from sklearn.svm import SVR\n return SVR(**kwargs)\n\n if class_name == 'DecisionTreeRegressor':\n from sklearn.tree import DecisionTreeRegressor\n return DecisionTreeRegressor(**kwargs)\n\n if class_name == 'ExtraTreesRegressor':\n from sklearn.ensemble import ExtraTreesRegressor\n return ExtraTreesRegressor(**kwargs)\n\n if class_name == 'KNeighborsRegressor':\n from sklearn.neighbors import KNeighborsRegressor\n return KNeighborsRegressor(**kwargs)\n\n if class_name == 'MLPRegressor':\n from sklearn.neural_network import MLPRegressor\n return MLPRegressor(**kwargs)\n\n raise Exception(\"Unknown Model class\")", "def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n # DONE implement model selection based on BIC scores\n\n # Bayesian information criteria: BIC = −2 * log L + p * log N,\n # where\n # • L is the likelihood of the fitted model,\n # • p is the number of parameters, and\n # • N is the number of data points.\n # The term −2 log L decreases with increasing model complexity\n # (more parameters), whereas the penalties 2p or p log N increase with\n # increasing complexity. The BIC applies a larger penalty\n # when N > e 2 = 7.4.\n # Model selection: The lower the BIC value the better the model\n\n select_bic = float(\"inf\")\n select_model = None\n for n_components in range(self.min_n_components, self.max_n_components + 1):\n try:\n model = self.base_model(n_components)\n logL = model.score(self.X, self.lengths)\n # https://discussions.udacity.com/t/verifing-bic-calculation/246165/5\n # https://discussions.udacity.com/t/number-of-parameters-bic-calculation/233235/17\n p = n_components**2 + 2*n_components * model.n_features - 1\n logN = math.log(sum(self.lengths))\n bic = - 2 * logL + p * logN\n if bic < select_bic:\n select_bic = bic\n select_model = model\n except:\n continue\n return select_model", "def log_likelihood(self):\n return -0.5*self.D*self.K_logdet + self._model_fit_term() + self.likelihood.Z", "def pick_model(self):\n return ConvModel(self.model_pmeter)", "def get_marginal(\n target=None\n ):\n pass", "def get_model(config):\n if not isinstance(config, ModelConfig):\n raise ValueError(\"Get model must be a config file. \")\n\n identifier = str(config.class_id).lower()\n if identifier in ['vgg', 'vgg16', 'vgg19']:\n return vgg.get_model(config)\n elif identifier in ['resnet', 'resnet50',]:\n return resnet.get_model(config)", "def loglikelihood(self, y):\n raise NotImplementedError", "def new(arg):\n if arg == 0:\n return Bernouli()\n elif arg == 1:\n return Multinomial()\n else:\n raise AssertionError(\"Cannot create classifier with given arg: {}\".format(arg))", "def select_classifier(model, X, A, n_splits=5, loss_type='01', seed=None):\n if isinstance(model, (GridSearchCV, RandomizedSearchCV)):\n selected_model = _select_classifier_from_sk_search(model, X, A)\n elif isinstance(model, list):\n selected_model = _select_classifier_from_list(candidates=model, X=X, A=A, n_splits=n_splits, seed=seed,\n loss_type=loss_type)\n elif isinstance(model, dict):\n selected_model = _select_classifier_from_grid(X=X, A=A, n_splits=n_splits, seed=seed, **model,\n loss_type=loss_type)\n else: # A regular classifier was passed\n selected_model = model\n return selected_model", "def find_model_class(model_module):\n\n model = None\n\n for obj_key, obj_value in model_module.__dict__.items():\n\n if obj_key in BASE_MODEL_CLASSES:\n continue\n elif hasattr(model_module.__dict__[obj_key], '__bases__'):\n if model_module.__dict__[obj_key].__bases__[0] in [MantraModel]:\n model = model_module.__dict__[obj_key]\n\n return model", "def logProbFn(mcVec, logLikeFn, logPriorFn, fitFn, params, freqs, data, sigmas):\n #Pad the mcVec with the non-varying parameter values in the right locations\n paramsVec = mcVec2paramsVec(mcVec, params)\n\n #Update the log-liklihood using the fitFn and the new paramsVec\n logLike = logLikeFn(fitFn, paramsVec, freqs, data, sigmas)\n\n #Update the prior using the parameter bounds and the new paramsVec\n logPrior = logPriorFn(paramsVec, params)\n\n #Update the log-Probability\n logProb = logLike + logPrior\n return logProb", "def log_likelihood(self):\n raise NotImplementedError(\"the log_likelihood property should \"\n \"be defined in the Estimator sub-class\")", "def get_classifier(clf_name, params):\n if clf_name == 'KNN':\n clf = KNeighborsClassifier(n_neighbors=params[\"K\"])\n\n elif clf_name == 'SVM':\n clf = SVC(C=params[\"C\"])\n\n elif clf_name == 'Random Forest':\n clf = RandomForestClassifier(n_estimators=params[\"n_estimators\"],\n max_depth = params[\"max_depth\"], random_state=1234)\n\n else:\n clf = LogisticRegression()\n\n return clf", "def marginal_ln_likelihood(samples, prior, data):\n n_samples = len(samples)\n n_linear = len(prior._linear_equiv_units)\n mu = np.zeros(n_linear)\n\n marg_ll = np.zeros(n_samples)\n for n, M, Lambda, ivar, *_ in get_M_Lambda_ivar(samples, prior, data):\n try:\n marg_ll[n], *_ = likelihood_worker(data.rv.value, ivar, M,\n mu, np.diag(Lambda),\n make_aA=False)\n except np.linalg.LinAlgError as e:\n raise e\n\n return marg_ll", "def log_likelihood(source_sentence: List[int],\n target_sentence: List[int],\n model: Seq2SeqAttentionModel) -> torch.Tensor:\n encoder_hiddens = encode_all(source_sentence, model)\n # input of shape seq_len x embedding_size\n target_sentence = [SOS_token] + target_sentence\n # stack x hid_dim\n prev_hidden = encoder_hiddens[-1]\n prev_context = torch.zeros(model.hidden_dim)\n target_log_probs = []\n\n for pos in range(len(target_sentence) - 1):\n log_probs, prev_hidden, prev_context,_ = decode(prev_hidden, encoder_hiddens, prev_context, target_sentence[pos], model)\n target_log_probs.append(torch.log(log_probs[target_sentence[pos + 1]]))\n\n return torch.sum(torch.stack(target_log_probs))", "def modelClass(self):\n raise NotImplementedError", "def log_marginal_likelihood(self) -> tf.Tensor:\n L = tf.linalg.cholesky(self.likelihood.add_to(self.KXX))\n return tf.reduce_sum(multivariate_normal(self._Y, self._mean, L))", "def marginal_ln_likelihood_worker(task):\n slice_or_idx, task_id, prior_samples_file, joker_helper = task\n\n # Read the batch of prior samples\n batch = read_batch(prior_samples_file, joker_helper.packed_order,\n slice_or_idx, units=joker_helper.internal_units)\n\n if batch.dtype != np.float64:\n batch = batch.astype(np.float64)\n\n # memoryview is returned\n ll = joker_helper.batch_marginal_ln_likelihood(batch)\n\n return np.array(ll)", "def __call__(self, class_logits, box_regression):\n\n class_logits = cat(class_logits, dim=0)\n box_regression = cat(box_regression, dim=0)\n device = class_logits.device\n\n if not hasattr(self, \"_proposals\"):\n return None, None\n # raise RuntimeError(\"subsample needs to be called before\")\n\n proposals = self._proposals\n\n labels = cat([proposal.get_field(\"labels\") for proposal in proposals], dim=0)\n regression_targets = cat(\n [proposal.get_field(\"regression_targets\") for proposal in proposals], dim=0\n )\n\n if self.class_balance_loss:\n class_balance_weight = self.class_balance_loss.get_weights(labels)\n else:\n class_balance_weight = None\n\n if self.focal_loss:\n classification_loss = self.focal_loss(class_logits,\n labels.int(),\n weight=class_balance_weight,\n reduction='mean')\n else:\n classification_loss = cross_entropy(class_logits,\n labels,\n weight=class_balance_weight,\n reduction='mean')\n\n # get indices that correspond to the regression targets for\n # the corresponding ground truth labels, to be used with\n # advanced indexing\n sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)\n if self.cls_agnostic_bbox_reg:\n map_inds = torch.tensor([4, 5, 6, 7], device=device)\n else:\n labels_pos = labels[sampled_pos_inds_subset]\n map_inds = 4 * labels_pos[:, None] + torch.tensor(\n [0, 1, 2, 3], device=device)\n\n if self.balance_l1_loss:\n box_loss = self.balance_l1_loss(\n box_regression[sampled_pos_inds_subset[:, None], map_inds],\n regression_targets[sampled_pos_inds_subset],\n reduction='sum',\n )\n elif self.adjust_smooth_l1_loss:\n box_loss = self.adjust_smooth_l1_loss(\n box_regression[sampled_pos_inds_subset[:, None], map_inds],\n regression_targets[sampled_pos_inds_subset],\n reduction='sum',\n ) / 4\n elif self.wing_loss:\n box_loss = self.wing_loss(\n box_regression[sampled_pos_inds_subset[:, None], map_inds],\n regression_targets[sampled_pos_inds_subset],\n reduction='sum',\n )\n else:\n box_loss = self.smooth_l1_loss(\n box_regression[sampled_pos_inds_subset[:, None], map_inds],\n regression_targets[sampled_pos_inds_subset],\n reduction='sum',\n )\n\n if len(labels) == 0:\n box_loss = box_loss * 0\n else:\n box_loss = box_loss / labels.numel()\n\n return classification_loss, box_loss", "def log_likelihood(self):\n\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood", "def __init__(self, **kwargs):\n super(LogisticRegression, self).__init__()\n self.C = kwargs.pop(\"C\", 100)\n self.clf = _LogisticRegression(C=self.C, **kwargs)", "def from_dict(input_dict):\n\n import copy\n input_dict = copy.deepcopy(input_dict)\n likelihood_class = input_dict.pop('class')\n input_dict[\"name\"] = str(input_dict[\"name\"])\n name = input_dict.pop('name')\n import GPy\n likelihood_class = eval(likelihood_class)\n return likelihood_class._build_from_input_dict(likelihood_class, input_dict)", "def __init__(self, classifier_path, model_class, data_format, model=None):\n self.path = classifier_path\n self.model = model_class(classifier_path, output_logits=True,\n input_data_format=data_format, data_format=data_format).model \\\n if model is None else model.model\n self.softmax = Sequential()\n self.softmax.add(Lambda(lambda X: softmax(X, axis=1), input_shape=(10,)))", "def log_density(model, model_args, model_kwargs, params, skip_dist_transforms=False):\n # We skip transforms in\n # + autoguide's model\n # + hmc's model\n # We apply transforms in\n # + autoguide's guide\n # + svi's model + guide\n if skip_dist_transforms:\n model = substitute(model, base_param_map=params)\n else:\n model = substitute(model, param_map=params)\n model_trace = trace(model).get_trace(*model_args, **model_kwargs)\n log_joint = 0.\n for site in model_trace.values():\n if site['type'] == 'sample':\n value = site['value']\n intermediates = site['intermediates']\n if intermediates:\n if skip_dist_transforms:\n log_prob = site['fn'].base_dist.log_prob(intermediates[0][0])\n else:\n log_prob = site['fn'].log_prob(value, intermediates)\n else:\n log_prob = site['fn'].log_prob(value)\n log_prob = np.sum(log_prob)\n if 'scale' in site:\n log_prob = site['scale'] * log_prob\n log_joint = log_joint + log_prob\n return log_joint, model_trace", "def __init__(self, target_model: Classifier):\n\n super().__init__(target_model, None, None, None, None)", "def objective_llh(self, params):\n\n try:\n obj = self.log_likelihood(params[0], params[1], params[2:])\n except (LinAlgError, ZeroDivisionError, ValueError):\n obj = -np.inf\n return obj", "def get_model_instance(model,baseclass=None,nvarparams=1,**kwargs):\n if isinstance(model,ParametricModel if baseclass is None else baseclass):\n for k,v in kwargs.iteritems():\n setattr(model,k,v)\n return model\n else:\n cls = get_model_class(model,baseclass)\n args = (nvarparams,) if cls.isVarnumModel() else tuple()\n return cls(*args,**kwargs)", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)" ]
[ "0.6100431", "0.5624646", "0.55522686", "0.55522686", "0.536249", "0.53345406", "0.5269471", "0.52394426", "0.5220503", "0.51793855", "0.51639843", "0.508774", "0.50566566", "0.505593", "0.50103873", "0.50024384", "0.49973372", "0.4989311", "0.49551207", "0.49261236", "0.49254608", "0.49082416", "0.4896225", "0.48831117", "0.4874971", "0.48513383", "0.48297438", "0.4827886", "0.48256612", "0.48106003", "0.4805276", "0.48026305", "0.4794837", "0.4792583", "0.47802576", "0.4763272", "0.47582594", "0.47506052", "0.47481057", "0.47370008", "0.47354442", "0.47295603", "0.47066432", "0.4702227", "0.46945167", "0.46856022", "0.46839333", "0.46830148", "0.46789715", "0.46747473", "0.4667279", "0.4633547", "0.4630595", "0.46300632", "0.46275294", "0.46258777", "0.46247151", "0.46220216", "0.46043614", "0.46013784", "0.46002114", "0.45906675", "0.45826858", "0.4576549", "0.45763528", "0.45651534", "0.45651266", "0.45632905", "0.45608848", "0.4557129", "0.45481467", "0.45475748", "0.45420858", "0.45415917", "0.45348048", "0.45337367", "0.4532734", "0.453247", "0.45266742", "0.4520883", "0.4516731", "0.45154163", "0.45037243", "0.45007905", "0.45007476", "0.44998387", "0.4499611", "0.4493222", "0.44923735", "0.44900662", "0.44854087", "0.44834283", "0.44793633", "0.44770208", "0.44730946", "0.4464081", "0.44629467", "0.4459146", "0.44546592", "0.44502074" ]
0.74493885
0
r"""Chooses a BoTorch `AcquisitionFunction` class.
def choose_botorch_acqf_class() -> Type[AcquisitionFunction]: # NOTE: In the future, this dispatch function could leverage any # of the attributes of `BoTorchModel` or kwargs passed to # `BoTorchModel.gen` to intelligently select acquisition function. return qNoisyExpectedImprovement
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, acquisition_functions):\n self.acquisition_functions = acquisition_functions", "def _optimise_acquisition(acq_fn, acq_optimiser, anc_data):\n return acq_optimiser(acq_fn, anc_data.max_evals)", "def _optimise_acquisition(acq_fn, acq_optimiser, anc_data):\n if anc_data.acq_opt_method == 'direct':\n acquisition = lambda x: acq_fn(x.reshape((1, -1)))\n else:\n acquisition = acq_fn\n _, opt_pt = acq_optimiser(acquisition, anc_data.max_evals)\n return opt_pt", "def _function_class(self):\n return FriCASExpectFunction", "def run_acquisition_function(\n acquisition_function,\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n data_array,\n model_type,\n classification_model=None,\n number_of_cpus=0,\n):\n tmp_objective_limits = None\n configurations = concatenate_list_of_dictionaries(configurations)\n configurations = data_dictionary_to_tuple(\n configurations, param_space.get_input_parameters()\n )\n if acquisition_function == \"TS\":\n scalarized_values, tmp_objective_limits = thompson_sampling(\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n model_type,\n classification_model,\n number_of_cpus,\n )\n elif acquisition_function == \"UCB\":\n scalarized_values, tmp_objective_limits = ucb(\n configurations,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n model_type,\n classification_model,\n number_of_cpus,\n )\n elif acquisition_function == \"EI\":\n scalarized_values, tmp_objective_limits = EI(\n configurations,\n data_array,\n objective_weights,\n regression_models,\n param_space,\n scalarization_method,\n objective_limits,\n iteration_number,\n model_type,\n classification_model,\n number_of_cpus,\n )\n else:\n print(\"Unrecognized acquisition function:\", acquisition_function)\n raise SystemExit\n\n scalarized_values = list(scalarized_values)\n\n # we want the local search to consider all points feasible, we already account for feasibility it in the scalarized value\n feasibility_indicators = [1] * len(scalarized_values)\n\n return scalarized_values, feasibility_indicators", "def choose_class(self, *args, **kwargs):", "def __init__(\n self,\n async_strategy=\"impute\",\n impute_strategy=\"cl_min\",\n acq_fun=None,\n acq_fun_kwargs=None,\n acq_optimizer=\"lbfgs\",\n acq_optimizer_kwargs=None,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n # validations\n\n # allowed combinations of async strategies and acquisition functions\n allowed_combinations = {\n \"impute\": {\n \"EI\": GaussianProcess_EI,\n \"LCB\": GaussianProcess_LCB,\n \"PI\": GaussianProcess_PI,\n },\n \"asy_ts\": {\"AsyTS\": AsyTS},\n }\n if async_strategy not in allowed_combinations.keys():\n raise ValueError(\n \"Expected async_strategy to be in {} with GP as surrogate, got {}\".format(\n list(allowed_combinations.keys()), async_strategy\n )\n )\n\n if async_strategy == \"impute\" and self.pruner:\n if not self.interim_results:\n raise ValueError(\n \"Optimizer GP with async strategy `impute` only supports Pruner with interim_results==True, got {}\".format(\n self.interim_results\n )\n )\n\n if acq_fun not in allowed_combinations[async_strategy] and acq_fun is not None:\n raise ValueError(\n \"Expected acq_fun to be in {} with GP as surrogate and {} as async_strategy, got {}\".format(\n list(allowed_combinations[async_strategy].keys()),\n async_strategy,\n acq_fun,\n )\n )\n\n # async_strategy\n self.async_strategy = async_strategy\n\n # configure acquisition function\n if acq_fun is None:\n # default acq_fun is the first in the dict\n acq_fun = list(allowed_combinations[async_strategy].keys())[0]\n self.acq_fun = allowed_combinations[self.async_strategy][acq_fun]()\n self.acq_func_kwargs = acq_fun_kwargs\n\n # configure acquisiton function optimizer\n allowed_acq_opt = [\"sampling\", \"lbfgs\"]\n if acq_optimizer not in allowed_acq_opt:\n raise ValueError(\n \"expected acq_optimizer to be in {}, got {}\".format(\n allowed_acq_opt, acq_optimizer\n )\n )\n self.acq_optimizer = acq_optimizer\n if acq_optimizer_kwargs is None:\n acq_optimizer_kwargs = dict()\n\n if self.async_strategy == \"asy_ts\":\n # default value is 100 and max value is 1000 for asy ts\n self.n_points = np.clip(acq_optimizer_kwargs.get(\"n_points\", 100), 10, 1000)\n else:\n self.n_points = acq_optimizer_kwargs.get(\"n_points\", 10000)\n self.n_restarts_optimizer = acq_optimizer_kwargs.get(\"n_restarts_optimizer\", 5)\n self.acq_optimizer_kwargs = acq_optimizer_kwargs\n\n # configure impute strategy\n if self.async_strategy == \"impute\":\n allowed_impute_strategies = [\"cl_min\", \"cl_max\", \"cl_mean\", \"kb\"]\n if impute_strategy not in allowed_impute_strategies:\n raise ValueError(\n \"expected impute_strategy to be in {}, got {}\".format(\n allowed_impute_strategies, impute_strategy\n )\n )\n self.impute_strategy = impute_strategy\n\n # estimator that has not been fit on any data.\n self.base_model = None\n\n if self.async_strategy == \"impute\":\n self._log(\"Impute Strategy: {}\".format(self.impute_strategy))", "def _function_element_class(self):\n return FriCASFunctionElement", "def next_point(self):\n if self.verbose:\n print(\"Computing acquisition function...\")\n if self.acquisition_function == 'cb':\n acq, pred = acqfunc.confidence_bound(\n self.surrogate_model, self.X_full,\n alpha=self.alpha, beta=self.beta)\n elif self.acquisition_function == 'ei':\n acq, pred = acqfunc.expected_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif self.acquisition_function == 'poi':\n acq, pred = acqfunc.probability_of_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif isinstance(self.acquisition_function, types.FunctionType):\n acq, pred = self.acquisition_function(\n self.surrogate_model, self.X_full, self.X_sparse)\n else:\n raise NotImplementedError(\n \"Choose between 'cb', 'ei', and 'poi' acquisition functions or define your own\")\n self.gp_predictions.append(pred)\n if self.mask is None:\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list][::-1][:self.batch_size].tolist()\n indices_list = np.dstack(indices_list)[0][::-1][:self.batch_size].tolist()\n else:\n acq = self.mask*acq\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list]\n vals_list = vals_list[~np.isnan(vals_list)][::-1]\n indices_list = np.dstack(indices_list)[0]\n indices_list = indices_list[:len(vals_list)][::-1]\n vals_list = vals_list[:self.batch_size].tolist()\n indices_list = indices_list[:self.batch_size].tolist()\n if not self.batch_update:\n return vals_list, indices_list\n if self.batch_dscale is None:\n batch_dscale_ = self.surrogate_model.model.kernel.lengthscale.mean().item()\n else:\n batch_dscale_ = self.batch_dscale\n vals_list, indices_list = self.update_points(\n vals_list, indices_list, batch_dscale_)\n return vals_list, indices_list", "def pick_action(self):\n if self.exploration_mode == 'time':\n self.acq_func.exploration_rate = self.exploration_rate(self.duration + 1)\n elif self.exploration_mode == 'samples':\n self.acq_func.exploration_rate = self.exploration_rate(len(self.rounds) + 1)\n\n fid, x = optim.pick_acquisition_mf(acq_func=self.acq_func,\n optimizer=self.aux_optimizer,\n gammas=self.gammas,\n x_init=self.aux_x_init)\n rmean, rsd = self.acq_func.predict_mf(fid=fid, x=x)\n\n # Undo negation of objective function so as to not confuse user\n if self.mode == 'min':\n rmean = -rmean\n\n rospy.loginfo('Next sample (%d, %s) with beta %f and predicted reward %f +- %f',\n fid,\n str(x), self.acq_func.exploration_rate,\n rmean,\n rsd)\n return fid, x", "def choice(func):\n # __choice_fn func_name used to identify function in Alternation.execute\n def __choice_fn(*args, **kwargs):\n return Choice(func, *args, **kwargs)\n return __choice_fn", "def __init__(self, function):\n self.function = function", "def __init__(self, function=None):\n self._function = function", "def pick_next(self, STATUS, N=100, nysamples=100):\n untested = [i for i in range(self.n) if STATUS[i] == 0]\n if self.acquisition_function == 'Thompson':\n alpha = self.samples()\n \n elif self.acquisition_function == 'Greedy_N':\n y_samples = self.samples(nysamples)\n alpha = np.zeros(self.n)\n for j in range(nysamples):\n # count number of times each point is in the top N for a sample \n alpha[np.argpartition(y_samples[:, j], -N)[-N:]] += 1\n \n elif self.acquisition_function == 'Greedy_tau':\n if np.mod(self.estimate_tau_counter, self.tau_update) == 0:\n self.estimate_tau()\n self.estimate_tau_counter += 1\n else:\n self.estimate_tau_counter += 1\n mu_X_pos, var_X_pos = self.predict()\n alpha = 1-norm.cdf(np.divide(self.tau-mu_X_pos,var_X_pos**0.5))\n \n elif self.acquisition_function == 'EI':\n mu_X_pos, var_X_pos = self.predict()\n sig_X_pos = var_X_pos**0.5\n alpha = (mu_X_pos-self.y_max)*norm.cdf(np.divide(mu_X_pos-self.y_max,sig_X_pos))+sig_X_pos*norm.pdf(np.divide(mu_X_pos-self.y_max,sig_X_pos))\n \n else:\n # if no valid acquisition_function entered then pick at random \n alpha = np.random.rand(self.n)\n print('enter a valid acquisition function - picking randomly')\n ipick = untested[np.argmax(alpha[untested])]\n return ipick", "def start_acquisition(self):\n self.lib.StartAcquisition()", "def getFunctionClass(functionID):\n d = { 1: Linear,\n 2: LinearDrag,\n 11: Gaussian,\n 12: GaussianDrag,\n 21: Lorentzian,\n 22: LorentzianDrag }\n return d[functionID]", "def __get_function(self):\n return random.choice(self.FUNCTIONS)", "def __init__(self, function='cogscore/'):\n self.function = function", "def _set_up_acq_opt_rand(self):\n def _random_max_wrap(*args):\n \"\"\" A wrapper so as to only return optimal point.\"\"\"\n _, opt_pt = random_maximise(*args)\n return opt_pt\n # Set this up in acq_optimise\n self.acq_optimise = lambda obj, max_evals: _random_max_wrap(obj, self.domain_bounds,\n max_evals)\n if self.get_acq_opt_max_evals is None:\n lead_const = 10 * min(5, self.domain_dim)**2\n self.get_acq_opt_max_evals = lambda t: np.clip(\n lead_const * np.sqrt(min(t, 1000)), 2000, 3e4)\n # Acquisition function should be evaluated via multiple evaluations\n self.acq_query_type = 'multiple'", "def AcquisitionSource(self, default={}):\n tmp = self.data.get('metadata', {}).get('acquisition_source', default)\n return HEP.AcquisitionSourceObject(tmp)", "def __init__(self,\n function: Callable):\n\n self._function = function", "def getFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def acquisition_function_random(gp_reward_model: BasicGPRewardModel) -> int:\n return np.random.randint(0, len(gp_reward_model.candidate_queries))", "def get_acquisition_func(i: int):\n switcher = {\n 0: \"category\",\n 1: \"mean\",\n 2: \"std\",\n 3: \"random\",\n }\n return switcher.get(i, \"category\")", "def get_q_func(self, is_training=False, reuse=False, scope='q_func'):\n return functools.partial(self.q_func,\n scope=scope,\n reuse=reuse,\n is_training=is_training)", "def __init__(self, function, **kwargs):\n self.function = function\n self.kwargs = kwargs", "def __init__(self, fitness_function, *args, **kwargs):\n Function.__init__(self, fitness_function)\n self.fitness_function = fitness_function # never used\n self.args = args\n self.kwargs = kwargs", "def func(self):\n return self.__class__", "def auto() -> AutoDistribute:\n return _auto", "def finite_acquisition(self, *args, **kwargs):\n return _uhd_swig.usrp_source_finite_acquisition(self, *args, **kwargs)", "def __init__(__self__, *,\n function_name: Optional[pulumi.Input[str]] = None,\n input: Optional[pulumi.Input[str]] = None,\n qualifier: Optional[pulumi.Input[str]] = None,\n result: Optional[pulumi.Input[str]] = None,\n triggers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if function_name is not None:\n pulumi.set(__self__, \"function_name\", function_name)\n if input is not None:\n pulumi.set(__self__, \"input\", input)\n if qualifier is not None:\n pulumi.set(__self__, \"qualifier\", qualifier)\n if result is not None:\n pulumi.set(__self__, \"result\", result)\n if triggers is not None:\n pulumi.set(__self__, \"triggers\", triggers)", "def __init__(self, func):\r\n self.getter = func\r\n self.name = func.__name__", "def __init__(self, func, type):\n self.func = func\n self.type = type", "def acquire(ABC) -> bool:", "def instantiate(cls, device, **kwargs):\n\n def keygen(cls, device, **kwargs):\n \"\"\"Generate the cache key from device and attributes.\"\"\"\n key = '%s/%s' % (cls.__name__, device)\n for v in kwargs.values():\n key += '/' + str(v)\n return key\n\n def creator(cls, cache_key, device, **kwargs):\n \"\"\"Create and then cache a function.\"\"\"\n function = cls(cache_key, device, **kwargs)\n _GLOBAL_CACHED_FUNCTIONS[cache_key] = function\n return function\n\n cache_key = keygen(cls, device, **kwargs)\n try:\n return _GLOBAL_CACHED_FUNCTIONS[cache_key]\n except KeyError:\n return creator(cls, cache_key, device, **kwargs)", "def pick(func):\n setattr(world, func.__name__, func)\n return func", "def __init__(self, function='sourcepfam/'):\n self.function = function", "def __init__(self, _class=None, *, policy=compat32):\n self._class = _class\n self.policy = policy", "def __init__(self, policy, q_function, n, gamma, alpha):\n self.policy = policy\n self.q = q_function\n self.n = n\n self.gamma = gamma\n self.alpha = alpha\n\n self._record = Records(self.n)", "def prepare_acquisition(self):\n self.lib.PrepareAcquisition()", "def __call__(self, f_or_klass):\n\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*****\n # 0.3.0\n # -- implement \"kill switch\", NO_DECO\n # -- handle decorating both functions and classes\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*****\n\n # 0.3.0b16: if it isn't callable, scram'\n if not callable(f_or_klass):\n return f_or_klass\n\n # Special-case handling for ``NO_DECO``: remove from settings of ``self``\n if self._effective_settings.get('NO_DECO'):\n return f_or_klass\n # else, delete that item wherever it might be\n if 'NO_DECO' in self._effective_settings:\n del self._effective_settings['NO_DECO']\n if 'NO_DECO' in self._changed_settings:\n del self._changed_settings['NO_DECO']\n\n f = f_or_klass if inspect.isfunction(f_or_klass) else None\n klass = f_or_klass if inspect.isclass(f_or_klass) else None\n\n self.f = f\n self.cls = klass\n\n if klass:\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*\n # 0.3.0 -- case \"f_or_klass is a class\" -- namely, klass\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*\n\n self._class__call__(klass) # modifies klass (methods & inner classes) (if not builtin)\n self._add_class_attrs(klass) # v0.3.0v20 traps TypeError for builtins\n return klass\n\n elif not f:\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*\n # 0.3.0 -- case \"f_or_klass is a callable but not a function\"\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*\n # functools.partial objects are callable, have no __name__ much less __qualname__,\n # and trying to deco __call__ gets messy.\n # Callable builtins e.g. len are not functions in the isfunction sense,\n # can't deco anyway. Just give up (quietly):\n return f_or_klass\n\n else: # not a class, f nonempty is a function of f_or_klass callable\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*\n # 0.3.0 -- case \"f_or_klass is a function\" -- namely, f\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*\n\n #----------------------------------------------------------------\n # Don't double-decorate -- don't wanna, & it doesn't work anyway!\n #----------------------------------------------------------------\n # Note: As with methods of classes,\n # . if f is deco'd, its existing EXPLICITLY GIVEN settings take precedence.\n\n # # From _class__call__, props & methods cases, w/a few name changes\n deco_obj = getattr(f, self._sentinels['DECO_OF'], None) # type: _deco_base\n\n # get a fresh copy for each attr\n new_settings = self._changed_settings.copy() # updated below\n\n # __init__ fixup, a nicety:\n # By default, don't log retval for __init__.\n # If user insists on it with 'log_retval=True' in __init__ deco,\n # that will override this.\n if f.__name__ == '__init__':\n self.fixup_for_init(new_settings)\n\n if deco_obj: # f is deco'd by this decorator\n # Yes. Figure out settings for f,\n ### 0.3.0b18 -- Use self._override\n self._update_settings(new=new_settings,\n old=deco_obj._changed_settings,\n override_existing=self._override)\n # update func's settings (_force_mutable=True to handle `max_history` properly)\n deco_obj._settings_mapping.update(new_settings, _force_mutable=True)\n return f\n\n #----------------------------------------------------------------\n # f is a function & is NOT already deco'd\n #----------------------------------------------------------------\n\n # 0.3.0.x -- f may not have a .__qualname__\n try:\n self._classname_of_f = '.'.join( f.__qualname__.split('.')[:-1] )\n except AttributeError as e:\n self._classname_of_f = ''\n\n # Special-case '__repr__' handling, if deco subclass doesn't allow it.\n if f.__name__ == '__repr__' and self._classname_of_f and not self.allow_repr():\n # v0.3.0b23 -- Instead of refusing to deco, use recursive_repr\n # return f\n return recursive_repr(fillvalue=\"...\")(f)\n\n # 0.3.0\n # Use __qualname__ ALL the time, unless user provides `name=display_name_str`\n # where `display_name_str` is either the name to be used for the fn in logged output,\n # or is an oldstyle format str into which f.__name__ will be substituted\n # to obtain the display name.\n # We require Py3.3+, so __qualname__ is available.\n\n # setup f_display_name\n if self._name_param:\n try:\n self.f_display_name = (self._name_param % f.__name__)\n except TypeError:\n self.f_display_name = self._name_param\n else:\n self.f_display_name = f.__qualname__\n\n # TODO TRY THIS -- anything break?\n # 0.3.1 Inspired by fractions.Fraction.__sub__ et al:\n # __name__ may be very different from __qualname__;\n # if so, show both\n if f.__name__ not in f.__qualname__:\n self.f_display_name += \" (\" + f.__name__ + \")\"\n\n #================================================================\n # 0.3.0 -- Init things (migrated from __init__)\n #----------------------------------------------------------------\n # set up pseudo-dict (DecoSettingsMapping),\n # using settings given by self._effective_settings.\n #\n # *** DecoSettingsMapping \"API\" --\n # (2) construct DecoSettingsMapping object\n # that will provide mapping & attribute access to settings, & more\n #----------------------------------------------------------------\n self._settings_mapping = DecoSettingsMapping(\n deco_class=self.__class__,\n # DecoSettingsMapping calls the rest ** values_dict\n ** self._effective_settings # 0.3.0 set by __init__\n )\n\n #----------------------------------------------------------------\n # Init more stuff\n #----------------------------------------------------------------\n self._stats = ClassInstanceAttrProxy(\n class_instance=self,\n data_descriptor_names=self.__class__._data_descriptor_names,\n method_descriptor_names=self.__class__._method_descriptor_names)\n # Accessed by descriptors on the stats obj\n self._num_calls_total = 0\n self._num_calls_logged = 0\n # max_history > 0 --> size of self._call_history; <= 0 --> unbounded\n # Set before calling _make_call_history\n\n # 0.3.0 self._other_values_dict set by __init__\n self.max_history = self._other_values_dict.get('max_history', 0) # <-- Nota bene\n self._call_history = self._make_call_history()\n\n # Accumulate this (for logged calls only)\n # even when record_history is false:\n self._elapsed_secs_logged = 0.0\n self._process_secs_logged = 0.0\n\n # 0.2.2.post1\n # stack(s), pushed & popped wrapper of deco'd function\n # by _logging_state_push, _logging_state_pop\n # 0.3.0 convert to pushing/popping single namedtuples\n self.logging_state_stack = [] # 0.3.0 stack of LoggingState namedtuples\n self._enabled_stack = [] # 0.3.0 - um, stack, of 'enabled's\n\n #----------------------------------------------------------------\n # end of Init passage\n #================================================================\n\n # Save signature and parameters of f\n self.f_signature = inspect.signature(f) # Py >= 3.3\n self.f_params = self.f_signature.parameters\n\n # 0.3.0 We assume Py3.3 so we use perf_counter, process_time all the time\n wall_time_fn = time.perf_counter\n process_time_fn = time.process_time\n\n #############################\n # The wrapper of a callable\n #############################\n\n @wraps(f)\n def _deco_base_f_wrapper_(*args, **kwargs):\n \"\"\"Wrapper around the wrapped function f.\n When this runs, f has been called, so we can now resolve\n any indirect values for the settings/keyword-params\n of log_calls, using info in kwargs and self.f_params.\"\"\"\n # *** Part of the DecoSettingsMapping \"API\" --\n # (4) using self._settings_mapping.get_final_value in wrapper\n # [[[ This/these is/are 4th chronologically ]]]\n\n # inner/local fn -- save a few cycles and characters -\n # we call this a lot (<= 9x).\n def _get_final_value(setting_name):\n \"Use outer scope's kwargs and self.f_params\"\n return self._settings_mapping.get_final_value(\n setting_name, kwargs, fparams=self.f_params)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # if nothing to do, hurry up & don't do it.\n # NOTE: call_chain_to_next_log_calls_fn looks in stack frames\n # to find (0.2.4) STACKFRAME_HACK_DICT_NAME (really!)\n # It and its values (the following _XXX variables)\n # must be set before calling f.\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n _enabled = _get_final_value('enabled')\n # 0.3.0 in case f calls log_message (no output if f disabled)\n self._enabled_state_push(_enabled)\n\n # 0.2.4.post5 \"true bypass\": if 'enabled' < 0 then scram\n if _enabled < 0:\n ret = f(*args, **kwargs)\n self._enabled_state_pop()\n return ret\n\n # Bump call counters, before calling fn.\n # Note: elapsed_secs, process_secs not reflected yet of course\n self._add_call(logged=_enabled)\n\n _log_call_numbers = _get_final_value('log_call_numbers')\n # counters just got bumped\n _active_call_number = (self._stats.num_calls_logged\n if _log_call_numbers else\n 0)\n # Get list of callers up to & including first log_call's-deco'd fn\n # (or just caller, if no such fn)\n call_list, prev_indent_level = self.call_chain_to_next_log_calls_fn()\n\n # Bump _extra_indent_level if last fn on call_list is deco'd AND enabled,\n # o/w it's the _extra_indent_level which that fn 'inherited'.\n # _extra_indent_level: prev_indent_level, or prev_indent_level + 1\n do_indent = _get_final_value('indent')\n _extra_indent_level = (prev_indent_level +\n int(not not do_indent and not not _enabled))\n # 0.3.0\n ########## prefixed_fname = _get_final_value('prefix') + f.__name__\n prefixed_fname = _get_final_value('prefix') + self.f_display_name\n\n # Stackframe hack:\n assert '_deco_base__active_call_items__' == STACKFRAME_HACK_DICT_NAME\n _deco_base__active_call_items__ = {\n '_enabled': _enabled,\n '_log_call_numbers': _log_call_numbers,\n '_prefixed_fname': prefixed_fname, # Hack alert (Pt 1)\n '_active_call_number': _active_call_number,\n '_extra_indent_level': _extra_indent_level,\n # 0.3.0 for _get_own_deco_wrapper\n '_wrapper_deco': self\n }\n\n # Get logging function IF ANY.\n # For the benefit of callees further down the call chain,\n # if this f is not enabled (_enabled <= 0).\n # Subclass can return None to suppress printed/logged output.\n logging_fn = self.get_logging_fn(_get_final_value)\n\n # Only do global indentation for print, not for loggers\n global_indent_len = max(_extra_indent_level, 0) * self.INDENT\n\n # 0.2.2.post1 - save output_fname for log_message use\n call_number_str = ((' [%d]' % _active_call_number)\n if _log_call_numbers else '')\n output_fname = prefixed_fname + call_number_str\n\n # 0.3.0\n # Note: DON'T combine with global_mute(),\n # cuz this value will be pushed,\n # and when popped any realtime changes to global mute\n # made during call to f would be ignored.\n mute = _get_final_value('mute')\n\n # 0.2.2 -- self._log_message() will use\n # the logging_fn, indent_len and output_fname at top of these stacks;\n # thus, verbose functions should use log_calls.print (~ log_message)\n # to write their blather.\n # There's a stack of logging-state ,\n # used by self._log_message(), maintained in this wrapper.\n self._logging_state_push(logging_fn, global_indent_len, output_fname, mute)\n\n # (_xxx variables set, ok to call f)\n if not _enabled:\n ret = f(*args, **kwargs)\n self._logging_state_pop(enabled_too=True)\n return ret\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Set up context, for pre-call handlers\n # (after calling f, add to it for post-call handlers)\n # THIS is the time sink - 23x slower than other 'blocks'\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Key/values of \"context\" whose values we know so far:\n context = {\n 'decorator': self,\n 'settings': self._settings_mapping,\n 'stats': self._stats,\n 'prefixed_fname': prefixed_fname,\n 'fparams': self.f_params,\n 'call_list': call_list,\n 'args': args,\n 'kwargs': kwargs,\n 'indent': \" \" * self.INDENT, # our unit of indentation\n 'output_fname': output_fname,\n }\n\n # Gather all the things we need (for log output, & for history)\n # Use inspect module's Signature.bind method.\n # bound_args.arguments -- contains only explicitly bound arguments\n # 0.2.4.post5 - using\n # inspect.signature(f).bind(*args, **kwargs)\n # took 45% of execution time of entire wrapper; this takes 23%:\n # 0.3.1 TODO BUG No args is a problem?!\n bound_args = self.f_signature.bind(*args, **kwargs)\n \"\"\"\n File \"/Users/brianoneill/Desktop/Programming/Python-package-staging/log_calls/log_calls/tests/_temp.py\", line 12, in <module>\n g(f())\n File \"/Users/brianoneill/Desktop/Programming/Python-package-staging/log_calls/log_calls/log_calls.py\", line 1935, in _deco_base_f_wrapper_\n bound_args = self.f_signature.bind(*args, **kwargs)\n File \"/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/inspect.py\", line 2646, in bind\n return args[0]._bind(args[1:], kwargs)\n File \"/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/inspect.py\", line 2571, in _bind\n raise TypeError('too many positional arguments') from None\n TypeError: too many positional arguments\n \"\"\"\n\n varargs_pos = get_args_pos(self.f_params) # -1 if no *args in signature\n argcount = varargs_pos if varargs_pos >= 0 else len(args)\n context['argcount'] = argcount\n # The first argcount-many things in bound_args\n context['argnames'] = list(bound_args.arguments)[:argcount]\n context['argvals'] = args[:argcount]\n\n context['varargs'] = args[argcount:]\n (context['varargs_name'],\n context['kwargs_name']) = get_args_kwargs_param_names(self.f_params)\n\n # These 3 statements = 31% of execution time of wrapper\n context['defaulted_kwargs'] = get_defaulted_kwargs_OD(self.f_params, bound_args)\n context['explicit_kwargs'] = get_explicit_kwargs_OD(self.f_params, bound_args, kwargs)\n # context['implicit_kwargs'] = {\n # k: kwargs[k] for k in kwargs if k not in context['explicit_kwargs']\n # }\n # At least 2x as fast:\n context['implicit_kwargs'] = \\\n difference_update(kwargs.copy(), context['explicit_kwargs'])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Call pre-call handlers, collect nonempty return values\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # only consult global mute in r/t\n if not (mute or self.global_mute()): # 0.3.0\n pre_msgs = []\n for setting_name in self._settings_mapping._pre_call_handlers: # keys\n if _get_final_value(setting_name):\n info = self._settings_mapping._get_DecoSetting(setting_name)\n msg = info.pre_call_handler(context)\n if msg:\n pre_msgs.append(msg)\n\n # Write pre-call messages\n if logging_fn:\n for msg in pre_msgs:\n self._log_message(msg, extra_indent_level=0)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Call f(*args, **kwargs) and get its retval; time it.\n # Add timestamp, elapsed time(s) and retval to context.\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # No dictionary overhead between timer(s) start & stop.\n t0 = time.time() # for timestamp\n t0_wall = wall_time_fn()\n t0_process = process_time_fn()\n retval = f(*args, **kwargs)\n t_end_wall = wall_time_fn()\n t_end_process = process_time_fn()\n context['elapsed_secs'] = (t_end_wall - t0_wall)\n context['process_secs'] = (t_end_process - t0_process)\n context['timestamp'] = t0\n context['retval'] = retval\n\n self._add_to_elapsed(context['elapsed_secs'], context['process_secs'])\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Call post-call handlers, collect nonempty return values\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # only consult global mute in r/t\n if not (mute or self.global_mute()): # 0.3.0\n post_msgs = []\n for setting_name in self._settings_mapping._post_call_handlers: # keys\n if _get_final_value(setting_name):\n info = self._settings_mapping._get_DecoSetting(setting_name)\n msg = info.post_call_handler(context)\n if msg:\n post_msgs.append(msg)\n\n # Write post-call messages\n if logging_fn:\n for msg in post_msgs:\n self._log_message(msg, extra_indent_level=0)\n # v0.3.0b22 -- if recording history, add record of call even if we're muted(!)\n elif _get_final_value('record_history'):\n info = self._settings_mapping._get_DecoSetting('record_history')\n _ = info.post_call_handler(context)\n\n self._logging_state_pop(enabled_too=True)\n\n return retval\n\n self._add_function_attrs(f, _deco_base_f_wrapper_)\n return _deco_base_f_wrapper_\n\n #-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n # end else (case \"f_or_klass is a function\",\n # subcase \"f is a function & is NOT already deco'd\")\n #+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*+*", "def __init__(self, satisfied: Callable[[Product], bool]) -> None:\n self.satisfied = satisfied", "def __init__(self, an_function: callable):\n print(f\"Instantiating a FalseCeleryApp for {an_function.__name__}.\")\n self.an_function = an_function", "def instantiate_from_string(class_name):\n class_name = convert_underscore_to_camel_case(class_name)\n return globals()[class_name]()", "def _set_up_acq_opt(self):\n # First set up function to get maximum evaluations.\n if isinstance(self.options.acq_opt_max_evals, int):\n if self.options.acq_opt_max_evals > 0:\n self.get_acq_opt_max_evals = lambda t: self.options.acq_opt_max_evals\n else:\n self.get_acq_opt_max_evals = None\n else: # In this case, the user likely passed a function here.\n self.get_acq_opt_max_evals = self.options.acq_opt_max_evals\n # Additional set up based on the specific optimisation procedure\n if self.options.acq_opt_criterion == 'direct':\n self._set_up_acq_opt_direct()\n elif self.options.acq_opt_criterion == 'rand':\n self._set_up_acq_opt_rand()\n else:\n raise NotImplementedError('Not implemented acquisition optimisation for %s yet.'%(\n self.options.acq_opt_criterion))", "def prep_acquisition(self, acq_mode, read_mode, exp_time, accum_time,\r\n n_accums, kin_time, n_kinetics, trigger, keep_clean_mode,\r\n backg_subtr):\r\n # Acquisition mode: currently only scan until abort works\r\n # because when the other modes terminate the GUI wouldn't know\r\n # that the acquisition is done.\r\n if acq_mode == \"Scan until abort\":\r\n acq_mode = \"scan_until_abort\"\r\n else:\r\n raise NotImplementedError(\"acq_mode %r\" % acq_mode)\r\n\r\n # Read mode: currently only full vertical binning works because\r\n # the spectra plotters assume a 1-dimensional array.\r\n if read_mode == \"Full vertical binning\":\r\n read_mode = \"fullbin\"\r\n else:\r\n raise NotImplementedError(\"read_mode %r\" % read_mode)\r\n\r\n # Convert from ms to seconds\r\n exp_time /= 1000\r\n accum_time /= 1000\r\n kin_time /= 1000\r\n\r\n trigger = str(trigger).lower()\r\n keep_clean_mode = str(keep_clean_mode).lower().replace(\" \", \"_\")\r\n\r\n # Don't even pass the accum/kinetic settings to prep_acquisition\r\n # because we're mostly externally triggering anyway. TODO if\r\n # different behavior is desired.\r\n settings = {\"acq_mode\": acq_mode, \"read_mode\": read_mode,\r\n \"exp_time\": exp_time, \"trigger\": trigger}\r\n if trigger == \"external\":\r\n settings[\"keep_clean_mode\"] = keep_clean_mode\r\n\r\n # Queue a call to cam.prep_acquisition\r\n self.acquisition_settings.emit(settings)\r\n\r\n # Set which of self's methods to call upon acquisition start\r\n if backg_subtr:\r\n self.start_whole_exposure = self.get_background\r\n else:\r\n self.background = np.zeros(self.cam.x, dtype=np.int32)\r\n self.start_whole_exposure = self.continue_with_exposure", "def __init__(self, function=None, name=None, description=None):\n self.name = name\n self.function = function\n self.description = description", "def creator(cls, cache_key, device, **kwargs):\n function = cls(cache_key, device, **kwargs)\n _GLOBAL_CACHED_FUNCTIONS[cache_key] = function\n return function", "def __init__(self, function, *args):\n self.function = function\n self.args = args", "def __class_getitem__(cls, product_type):\n return lambda *args: cls(product_type, *args)", "def __init__(self, supplier):\n self.supplier = supplier", "def __init__(self, observation_space, valid_actions: callable, q_source: QEstimator):\n super().__init__(observation_space, valid_actions)\n self.q = q_source", "def __init__(self, function, function_representation):\n self.function = function\n self.function_representation = function_representation", "def __init__(self, function, instance):\r\n self.instance = instance\r\n self.function = function", "def __init__(__self__, *,\n function_name: pulumi.Input[str],\n input: pulumi.Input[str],\n qualifier: Optional[pulumi.Input[str]] = None,\n triggers: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"function_name\", function_name)\n pulumi.set(__self__, \"input\", input)\n if qualifier is not None:\n pulumi.set(__self__, \"qualifier\", qualifier)\n if triggers is not None:\n pulumi.set(__self__, \"triggers\", triggers)", "def strategy(func):\n strategies.append(func)\n return func", "def select_function(self, name=None):\n if not name:\n if len(self.available_functions()) > 1:\n raise self.MultipleFunctionsFoundError(\n \"Multiple functions found in the config file, please select\"\n \" one\"\n )\n elif len(self.available_functions()) == 1:\n # Take the first, the only one element\n self.function_selected = next(iter(self.available_functions()))\n else:\n raise self.FunctionNotFoundError(\n \"No function present is not present in the config file\"\n )\n elif name in self.available_functions():\n self.function_selected = name\n else:\n raise self.FunctionNotFoundError(\"The function {0} is not present \"\n \"in the config file\".format(name))\n self.function_config = self.config[self.function_selected]\n self.runtime = AVAILABLE_RUNTIMES[self.function_config['Runtime']]", "def finite_acquisition(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_finite_acquisition(self, *args, **kwargs)", "def __init__(self, func): \n self.func = func", "def make_query_strategy(utility_measure: Callable, selector: Callable) -> Callable:\n def query_strategy(classifier: BaseEstimator, X: modALinput) -> Tuple:\n utility = utility_measure(classifier, X)\n query_idx = selector(utility)\n return query_idx, X[query_idx]\n\n return query_strategy", "def __init__(self, function_name, function_object):\n self.function_name = function_name\n function_name_parts = self.function_name.split('_', 1) # handle function that doesn't contains underscore\n self.method, self.name = function_name_parts[0].upper(), function_name_parts[-1]\n self.obj = function_object\n args, _, _, defaults = inspect.getargspec(function_object) # pylint: disable=deprecated-method\n self.args = args[1:]\n self.defaults = defaults if defaults else []", "def best_out_of_sample_point(\n self,\n search_space_digest: SearchSpaceDigest,\n torch_opt_config: TorchOptConfig,\n options: Optional[TConfig] = None,\n ) -> Tuple[Tensor, Tensor]:\n if torch_opt_config.fixed_features:\n # When have fixed features, need `FixedFeatureAcquisitionFunction`\n # which has peculiar instantiation (wraps another acquisition fn.),\n # so need to figure out how to handle.\n # TODO (ref: https://fburl.com/diff/uneqb3n9)\n raise NotImplementedError(\"Fixed features not yet supported.\")\n\n options = options or {}\n acqf_class, acqf_options = pick_best_out_of_sample_point_acqf_class(\n outcome_constraints=torch_opt_config.outcome_constraints,\n seed_inner=checked_cast_optional(int, options.get(Keys.SEED_INNER, None)),\n qmc=checked_cast(bool, options.get(Keys.QMC, True)),\n risk_measure=torch_opt_config.risk_measure,\n )\n\n # Avoiding circular import between `Surrogate` and `Acquisition`.\n from ax.models.torch.botorch_modular.acquisition import Acquisition\n\n acqf = Acquisition( # TODO: For multi-fidelity, might need diff. class.\n surrogates={\"self\": self},\n botorch_acqf_class=acqf_class,\n search_space_digest=search_space_digest,\n torch_opt_config=torch_opt_config,\n options=acqf_options,\n )\n candidates, acqf_values = acqf.optimize(\n n=1,\n search_space_digest=search_space_digest,\n inequality_constraints=_to_inequality_constraints(\n linear_constraints=torch_opt_config.linear_constraints\n ),\n fixed_features=torch_opt_config.fixed_features,\n )\n return candidates[0], acqf_values[0]", "def __init__(self, function, original_name, cache_name=None, default_value=None, use_default=True,\n key_replace_name=None):\n functools.update_wrapper(self, function)\n self.func = function\n self.name = function.__name__\n self.original_name = original_name\n self.cache_name = cache_name\n self.default_value = default_value\n self.use_default = use_default\n self.key_replace_name = key_replace_name", "def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...", "def __init__(self, getter=None, **kwargs):\n if getter is None:\n getter = lambda event: event.duration\n if not callable(getter):\n raise TypeError('expected callable getter, not %r' % getter)\n self.getter = getter\n\n default_acc = ReservoirAccumulator\n use_p2 = kwargs.pop('use_p2', False)\n if use_p2:\n default_acc = P2Accumulator\n\n self._acc_type = kwargs.pop('acc_type', default_acc)\n self.q_points = kwargs.pop('q_points', QP_PRAG)\n\n self.qas = {}\n\n if kwargs:\n raise TypeError('unexpected keyword arguments: %r' % list(kwargs.keys()))", "def __init__(self, callable_, time=1):\n Function.__init__(self) # callable_ could go here\n self.time = time\n self.callable = callable_", "def gpb_from_func_caller(func_caller, worker_manager, max_capital, mode=None, acq=None,\n options=None, reporter='default'):\n if options is None:\n reporter = get_reporter(reporter)\n options = load_options(all_gp_bandit_args, reporter=reporter)\n options.acq = acq\n options.mode = mode\n return (GPBandit(func_caller, worker_manager, options, reporter)).optimise(max_capital)", "def _get_class():\n return ASParameters", "def make_acquisition_function(handle_multioutput=\"mean\"):\n\n def decorator(fn):\n if handle_multioutput == \"mean\": # define fn where scores are avgd\n\n @functools.wraps(fn)\n def wrapped_fn(probabilities, shuffle_prop=0.1):\n if _is_multioutput(probabilities):\n scores = np.stack(\n tuple(fn(prob) for prob in probabilities), axis=0\n ).mean(axis=0)\n else:\n scores = fn(probabilities)\n return _get_indices(scores, shuffle_prop)\n\n else: # raise error if list is passed\n\n @functools.wraps(fn)\n def wrapped_fn(probabilities, shuffle_prop=0.1):\n if _is_multioutput(probabilities):\n raise ValueError(\n \"The input probabilities is a list of arrays, \"\n + \"indicating multi-label output. \"\n + \"The {} function \".format(fn.__name__)\n + \"is not defined for these outputs. Use \"\n + \"the acquisition functions margin or certainty \"\n + \"instead.\"\n )\n else:\n scores = fn(probabilities)\n return _get_indices(scores, shuffle_prop)\n\n return wrapped_fn\n\n return decorator", "def from_object(cls, obj):\n if any(p is obj for p in obj.params):\n raise ValueError(\n f\"Cannot create a Function from a parameter object. This parameter {obj._name!r} \"\n \"is like an argument to a function---not the body of the function itself.\"\n )\n\n named_args = {p._name: getattr(p, \"_proxytype\", type(p)) for p in obj.params}\n # ^ if any of the params are widgets (likely), use their base Proxytype in the Function type signature:\n # a Function[Checkbox, Slider, ...] would be 1) weird and 2) not serializeable.\n concrete_function_type = cls[named_args, type(obj)]\n\n graft = client.function_graft(obj, *(p.graft for p in obj.params))\n # TODO we should probably store `obj.params` somewhere---that's valuable metadata maybe\n # to show the function as widgets, etc?\n return concrete_function_type._from_graft(graft)", "def get_activation_function(actfn):\n if actfn is None or actfn == 'leakyrelu':\n def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)\n elif actfn == 'gelu':\n def create_actfn(): return nn.GELU()\n elif actfn == 'relu':\n def create_actfn(): return nn.ReLU()\n elif actfn == 'swish' or actfn == 'silu':\n def create_actfn(): return nn.SiLU()\n else:\n raise Exception('Unknown activation function ' + str(actfn))\n return create_actfn", "def __init__(self, function: Optional[Callable] = None,\n kwargs: Optional[Dict] = None):\n self.function: Callable = function\n\n if kwargs is None:\n kwargs = dict()\n self.kwargs: Dict[str, Any] = kwargs", "def __init__(self, mq_choice=\"zmq\"):\n self.mq = mq_choice\n func = getattr(self, \"_init_{}\".format(self.mq))\n func()", "def __init__(self, func, *args, **kwargs):\n self._func = func\n self._args = args\n self._kwargs = kwargs\n self._fully_bound = None", "def __init__(self, fitness_function=None):\n Function.initialize(self, fitness_function)", "def __init__(self, afn, *args, **kwargs):\n super().__init__(afn, *args, **kwargs)", "def __getattr__(self, exported_function_name: str) -> ExportedFunction:\n pass", "def __init__(self, fn: callable):\n self.fn = fn", "def ready_required(f):\n def new_f(*args, **kwargs):\n if args[0]._is_ready(): # args[0] is always self\n f(*args, **kwargs)\n else:\n raise NotReadyError\n new_f.__name__ = f.__name__\n new_f.__doc__ = f.__doc__\n return new_f", "def get_function(self):\n raise NotImplementedError()", "def __def_function__():\n pass", "def __call__(self, function: FuncSpeechArg):\n self._add_attr(function)\n return function", "def switch_function_tab(self):\n fitting_func = self.ui.FittingFunc_comboBox.currentText()\n if fitting_func == \"Stretched Exponential\":\n self.ui.fitting_params_stackedWidget.setCurrentIndex(0)\n elif fitting_func == \"Double Exponential\":\n self.ui.fitting_params_stackedWidget.setCurrentIndex(1)\n elif fitting_func == \"Single Exponential\":\n self.ui.fitting_params_stackedWidget.setCurrentIndex(2)", "def __init__(self, apifunc, cachefunc=None, cachedir='cache/', mindelay=0.5, nthreads=4, expiration=0, expirepolicy='overwrite', serializer='pickle', defaultkwargs=None):\n from Queue import Queue\n # save params\n self.apifunc = apifunc\n if not cachefunc:\n cachefunc = defaultcachefunc\n self.cachefunc = cachefunc\n self.cachedir = cachedir % dict(fn=apifunc.__name__)\n self.mindelay, self.nthreads, self.serializer = mindelay, nthreads, serializer\n self.expiration, self.expirepolicy = expiration, expirepolicy\n self.defaultkwargs = defaultkwargs\n # instance vars\n self.lastcall = 0\n self.inq, self.outq = Queue(), Queue()\n # spawn threads\n self.threads = spawnWorkers(self.nthreads, self._qprocess)", "def __init__(self, func_name, spin):\n self.xc_func = None\n self._xc_func_init = False\n\n # Handle func_name\n if isinstance(func_name, str):\n func_id = util.xc_functional_get_number(func_name)\n if func_id == -1:\n raise KeyError(\"LibXCFunctional: name '%s' not found.\" % func_name)\n elif isinstance(func_name, (int, np.integer)):\n func_id = func_name\n if util.xc_functional_get_name(func_name) is None:\n raise KeyError(\"LibXCFunctional: ID '%d' not found.\" % func_name)\n else:\n raise TypeError(\"LibXCFunctional: func_name must either be a string or int. Got {}\".format(func_name))\n\n self._xc_func_name = util.xc_functional_get_name(func_id)\n\n # Handle spin\n if isinstance(spin, str):\n spin = spin.lower()\n if spin == \"polarized\":\n self._spin = 2\n elif spin == \"unpolarized\":\n self._spin = 1\n else:\n raise KeyError(\"LibXCFunctional: spin must either be 'polarized' or 'unpolarized' if represented by a string. Got {}\".format(spin))\n else:\n self._spin = spin\n\n if self._spin not in [1, 2]:\n raise KeyError(\"LibXCFunctional: spin must either be 1 or 2 if represented by a integer. Got {}\".format(self._spin))\n\n # Build the LibXC functional\n self.xc_func = core.xc_func_alloc()\n self.xc_func_size_names = [x for x in dir(self.xc_func.contents.dim) if not \"_\" in x]\n\n # Set all int attributes to zero (not all set to zero in libxc)\n for attr in self.xc_func_size_names:\n setattr(self.xc_func.contents, attr, 0)\n\n ret = core.xc_func_init(self.xc_func, func_id, self._spin)\n if ret != 0:\n raise ValueError(\"LibXC Functional construction did not complete. Error code %d\" % ret)\n self._xc_func_init = True\n\n # Pull out all sizes after init\n self.xc_func_sizes = {}\n for attr in self.xc_func_size_names:\n self.xc_func_sizes[attr] = getattr(self.xc_func.contents.dim, attr)\n\n # Unpack functional info\n self.xc_func_info = core.xc_func_get_info(self.xc_func)\n self._number = core.xc_func_info_get_number(self.xc_func_info)\n self._kind = core.xc_func_info_get_kind(self.xc_func_info)\n self._name = core.xc_func_info_get_name(self.xc_func_info).decode(\"UTF-8\")\n self._family = core.xc_func_info_get_family(self.xc_func_info)\n self._flags = core.xc_func_info_get_flags(self.xc_func_info)\n\n # Set needed flags\n self._needs_laplacian = self._flags & flags.XC_FLAGS_NEEDS_LAPLACIAN\n\n # Set derivatives\n self._have_exc = self._flags & flags.XC_FLAGS_HAVE_EXC\n self._have_vxc = self._flags & flags.XC_FLAGS_HAVE_VXC\n self._have_fxc = self._flags & flags.XC_FLAGS_HAVE_FXC\n self._have_kxc = self._flags & flags.XC_FLAGS_HAVE_KXC\n self._have_lxc = self._flags & flags.XC_FLAGS_HAVE_LXC\n\n # Set omega\n self._have_cam = self._flags & flags.XC_FLAGS_HYB_CAM\n self._have_cam |= self._flags & flags.XC_FLAGS_HYB_CAMY\n self._have_cam |= self._flags & flags.XC_FLAGS_HYB_LC\n self._have_cam |= self._flags & flags.XC_FLAGS_HYB_LCY\n self._cam_omega = self._cam_alpha = self._cam_beta = False\n if self._have_cam:\n self._cam_omega = self.xc_func.contents.cam_omega\n self._cam_alpha = self.xc_func.contents.cam_alpha\n self._cam_beta = self.xc_func.contents.cam_beta\n\n elif self._family in [flags.XC_FAMILY_HYB_LDA, flags.XC_FAMILY_HYB_GGA, flags.XC_FAMILY_HYB_MGGA]:\n self._cam_alpha = self.xc_func.contents.cam_alpha\n\n # VV10\n self._have_vv10 = self._flags & flags.XC_FLAGS_VV10\n self._nlc_b = self._nlc_C = False\n if self._have_vv10:\n self._nlc_b = self.xc_func.contents.nlc_b\n self._nlc_C = self.xc_func.contents.nlc_C\n\n # Stable\n self._stable = self._flags & flags.XC_FLAGS_STABLE\n self._dev = self._flags & flags.XC_FLAGS_DEVELOPMENT\n\n # Pull out references\n self._refs = []\n self._bibtexs = []\n self._dois = []\n\n for pos in range(flags.XC_MAX_REFERENCES):\n ref = core.xc_func_info_get_references(self.xc_func_info, pos)\n if not ref: break\n\n self._refs.append(ref.contents.ref.decode(\"UTF-8\"))\n self._bibtexs.append(ref.contents.bibtex.decode(\"UTF-8\"))\n self._dois.append(ref.contents.doi.decode(\"UTF-8\"))", "def getAc(self, state):\n\n # Pick Action\n flip = util.flipCoin(self.epsilon)\n\n if flip:\n\t\t\treturn random.choice(self.actions)\n\n return self.getPolicy(state)", "def __init__(self, stock, share, utility_function, strategy):\n self.stock = stock\n self.share = share\n self.utility_function = utility_function\n self.strategy = strategy\n # still need to consider the net worth", "def select_action(self, state):\n\n if state in self.Q:\n prob = self.get_probs(self.Q[state])\n else:\n prob = np.ones(self.nA) / self.nA\n return np.random.choice(np.arange(self.nA), p = prob)", "def _select_classifier_from_sk_search(estimator, X, A):\n estimator.fit(X, A)\n best_estimator = clone(estimator.best_estimator_)\n return best_estimator", "def qs_to_q_function(func: FunctionType, q_1: Qs) -> Q:\n\n scalar = func(q_1)\n\n if scalar.qs_type != \"scalar_q\":\n raise Exception(f\"Oops, does not evaluate to a scalar: {scalar}\")\n\n return scalar.qs[0]", "def get_func(self, class_name, arg, stored_objects):\n find_func = re.match('([a-z]+)', arg) # returns a matching object\n func_name = find_func.group()\n args = re.findall('\"([^\"]+)\",?', arg) # return a list of arguments\n\n if len(args) == 0:\n if func_name == \"all\":\n self.all(class_name, stored_objects)\n elif func_name == \"count\":\n self.count(class_name, stored_objects)\n else:\n print(\"** instance id missing **\")\n\n elif len(args) == 1:\n if self.check_instance(class_name, args[0], stored_objects):\n if func_name == \"show\":\n self.show(class_name, args[0], stored_objects)\n elif func_name == \"destroy\":\n self.destroy(class_name, args[0], stored_objects)\n elif func_name == \"update\":\n print(\"** attribute name missing **\")\n\n elif len(args) == 2 and func_name == \"update\":\n print(\"** value missing **\")\n\n elif len(args) == 3 and func_name == \"update\":\n if self.check_instance(class_name, args[0], stored_objects):\n self.update(class_name, args, stored_objects)", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def autocomplete_curve_function(s):\n s = s.strip().upper()\n if not s:\n return CURVE_FUNCTIONS_ORDERED[0]\n for i in CURVE_FUNCTIONS_ORDERED:\n if i.startswith(s):\n return i\n if consts.VERBOSE:\n print('ERROR: Bad curve function %s'%s)\n return CURVE_FUNCTIONS_ORDERED[0]", "def __init__(self, rank_discount_fn):\n self._rank_discount_fn = rank_discount_fn", "def __init__(self, func, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._func = func", "def __init__(self, entity_func: Callable[[DurableEntityContext], None]):\n self.fn: Callable[[DurableEntityContext], None] = entity_func", "def _get_task_cls(fn):\n\n if hasattr(fn, \"task_cls\"):\n cls = fn.task_cls\n elif hasattr(fn, \"decorator\") and hasattr(fn.decorator, \"task_cls\"):\n cls = fn.decorator.task_cls\n else:\n cls = asynq.AsyncTask\n\n if cls is None: # @async_proxy()\n return asynq.FutureBase\n else:\n return cls", "def ask(self, **kwargs):\n qtype = kwargs.get('qtype', 'manual')\n\n clean_keys = ['qtype']\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)\n\n q_obj_map = pytan.utils.get_q_obj_map(qtype=qtype)\n\n method = getattr(self, q_obj_map['handler'])\n result = method(**clean_kwargs)\n return result", "def from_function(cls, py_func, py_file):\n raise NotImplementedError", "def from_function(cls, py_func, py_file):\n raise NotImplementedError" ]
[ "0.6058467", "0.5935972", "0.58589655", "0.5788014", "0.548214", "0.5331167", "0.53201264", "0.53140235", "0.53060716", "0.5304556", "0.5121495", "0.5052555", "0.5037035", "0.50274396", "0.500004", "0.49999496", "0.49726632", "0.49604744", "0.49250162", "0.48792323", "0.48333678", "0.47544217", "0.47469586", "0.47466987", "0.47157833", "0.4712404", "0.47073543", "0.4704667", "0.47041744", "0.46938", "0.46596897", "0.46309507", "0.46283948", "0.46137857", "0.45907575", "0.45786232", "0.45778373", "0.4568059", "0.4563779", "0.4562364", "0.45518127", "0.45466822", "0.45452744", "0.4529823", "0.45143566", "0.45074177", "0.45067137", "0.45064187", "0.44998932", "0.4496542", "0.44935808", "0.449116", "0.4486347", "0.44853023", "0.44801113", "0.44759002", "0.4471988", "0.44708243", "0.44687897", "0.44682044", "0.44679278", "0.44672203", "0.44621786", "0.44601744", "0.44575587", "0.44493577", "0.4448863", "0.4444465", "0.4437511", "0.44352433", "0.44213238", "0.44045806", "0.4402931", "0.4399093", "0.4396317", "0.43933457", "0.43907648", "0.43885306", "0.43689182", "0.4360582", "0.43602046", "0.43524575", "0.43505424", "0.43471345", "0.4341157", "0.43392408", "0.43343958", "0.43335193", "0.43295097", "0.43224958", "0.43220142", "0.43183428", "0.43160868", "0.43151897", "0.4307247", "0.43067116", "0.4305231", "0.43029705", "0.43026495", "0.43026495" ]
0.73012865
0
Construct a `TrainingData` object based on sizes of Xs, Ys, and Yvars, and the type of model, for which the training data is intended.
def construct_training_data( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], model_class: Type[Model] ) -> TrainingData: if not isclass(model_class): # pragma: no cover raise ValueError( f"Expected `Type[Model]`, got: {model_class} " f"(type: {type(model_class)})." ) if len(Xs) == len(Ys) == 1: # Just one outcome, can use single model. return TrainingData(X=Xs[0], Y=Ys[0], Yvar=Yvars[0]) elif issubclass(model_class, BatchedMultiOutputGPyTorchModel) and all( torch.equal(Xs[0], X) for X in Xs[1:] ): # All Xs are the same and model supports batched multioutput. return TrainingData( X=Xs[0], Y=torch.cat(Ys, dim=-1), Yvar=torch.cat(Yvars, dim=-1) ) elif model_class is ModelListGP: # pragma: no cover # TODO: This will be case for `ListSurrogate`. raise NotImplementedError("`ModelListGP` not yet supported.") raise ValueError(f"Unexpected training data format for {model_class}.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def _unpack_training_data(data, val=None):\n if isinstance(data, TrainingData):\n assert val is None\n return data\n\n if val is not None:\n x, y = data\n return TrainingData.from_x_y(x, y, val)\n\n train, val = data\n if not isinstance(train, Dataset):\n xx, yy = train\n train = RamDataset(xx, yy)\n if not isinstance(val, Dataset):\n xx, yy = val\n val = RamDataset(xx, yy)\n return TrainingData(train, val)", "def prepare_dataset(data_path, test_size=0.2, validation_size=0.2):\r\n\r\n # load dataset\r\n if data_path.endswith('json'):\r\n X, y = load_data_from_json(data_path)\r\n else:\r\n X, y = load_data_from_fold(data_path)\r\n # create train, validation, test split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)\r\n X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size)\r\n\r\n # add an axis to nd array\r\n X_train = X_train[..., np.newaxis]\r\n X_test = X_test[..., np.newaxis]\r\n X_validation = X_validation[..., np.newaxis]\r\n\r\n return X_train, y_train, X_validation, y_validation, X_test, y_test", "def build_training_data_loader(self) -> DataLoader:\n pass", "def createtrainingarrays(dataSize, xVariables, yVariable, TrainIndices):\n\n # For the desired training indices, add the values to the training arrays\n xTrainValues = np.array([])\n yTrainValues = np.array([])\n indexCounter = 0\n for q in range(0, dataSize):\n if TrainIndices.__contains__(q):\n\n if indexCounter is 0:\n xTrainValues = xVariables[q]\n indexCounter = -1\n else:\n xTrainValues = np.vstack((xTrainValues, xVariables[q]))\n\n yTrainValues = np.append(yTrainValues, yVariable[0][q])\n\n # Reshape the data to proper dimensions so that a linear regression may be performed\n length = yTrainValues.size\n yTrainValues = yTrainValues.reshape(length, 1)\n\n return xTrainValues, yTrainValues", "def get_data_loader_from_data(cls, batch_size, X, Y, **kwargs):\n X_torch = torch.from_numpy(X).float()\n\n if (\n \"classification_problem\" in kwargs\n and kwargs[\"classification_problem\"] == False\n ):\n Y_torch = torch.from_numpy(Y).float()\n else:\n Y_torch = torch.from_numpy(Y).long()\n dataset = TensorDataset(X_torch, Y_torch)\n kwargs.pop(\"classification_problem\", None)\n return DataLoader(dataset, batch_size=batch_size, **kwargs)", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def build_dataset(self, X, y=None):\n X = np.array(X)\n self.input_dim = X.shape[1]\n X = torch.FloatTensor(X)\n if y is None:\n dataset = torch.utils.data.TensorDataset(X)\n else:\n self.classes_ = sorted(set(y))\n self.n_classes_ = len(self.classes_)\n class2index = dict(zip(self.classes_, range(self.n_classes_)))\n y = [class2index[label] for label in y]\n y = torch.tensor(y)\n dataset = torch.utils.data.TensorDataset(X, y)\n return dataset", "def trainData(self, X, y, NeuralNet, epochs):", "def prepare_dataset(self, xFold_step, xFold_type):\n\n eval_samples_per_xfold = int(round((self.__train_size + self.__eval_size)/xFold_type))\n\n start_index = int(xFold_step*eval_samples_per_xfold)\n end_index = int(start_index + eval_samples_per_xfold)\n\n if end_index < len(self.__read_in_labels[-self.__test_size:]):\n end_index = len(self.__read_in_labels[-self.__test_size:])\n\n dataset = {\n \"x_train\": np.concatenate((self.__read_in_images[:start_index], self.__read_in_images[end_index:]), axis=0),\n \"y_train\": np.concatenate((self.__read_in_labels[:start_index], self.__read_in_labels[end_index:]), axis=0),\n\n \"x_eval\": self.__read_in_images[start_index:end_index],\n \"y_eval\": self.__read_in_labels[start_index:end_index],\n\n \"x_test\": self.__read_in_images[-self.__test_size:],\n \"y_test\": self.__read_in_labels[-self.__test_size:],\n }\n\n return dataset", "def make_data_loader(examples, batch_size=100, shuffle=True):\n x, y = zip(*examples) # makes lists of windows and tags\n x, y = tr.from_numpy(np.array(x)), tr.from_numpy(np.array(y))\n x, y = x.type(tr.LongTensor), y.type(tr.LongTensor) # convert lists to tensors\n train = utdata.TensorDataset(x, y)\n return utdata.DataLoader(train, batch_size, shuffle)", "def make_training_xy(self, data):\n pass", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def _create_model_get_train_X_y(self, X_train, y_train):\n if X_train is not None:\n data_X = X_train.copy()\n else:\n if self.X_train is None:\n data_X = None\n else:\n data_X = self.X_train\n data_y = self.y_train if y_train is None else y_train.copy()\n return data_X, data_y", "def _create_dataset(batch_size):\n ds = collections.OrderedDict([('x', [[-1.0, -1.0], [1.0, 1.0], [1.0, 1.0]]),\n ('y', [[1.0], [1.0], [1.0]])])\n # Note: batching is needed here as it creates the required batch dimension.\n # The batch size can be re-set (by `unbatch()` first) in personalization.\n return tf.data.Dataset.from_tensor_slices(ds).batch(batch_size)", "def create_data(dataset, val_size=0.1):\n if dataset == 1:\n (X, y), test = mnist.load_data()\n else:\n (X, y), test = fashion_mnist.load_data()\n\n len_val = int(X.shape[0] * val_size)\n \n return (X[len_val:], y[len_val:]), (X[:len_val], y[:len_val]), test", "def construct(data_dir, fname, X=None, normalize=False, _type='sparse'):\n if _type == 'sparse':\n return SparseFeatures(data_dir, fname, X, normalize)\n elif _type == 'dense':\n return DenseFeatures(data_dir, fname, X, normalize)\n elif _type == 'sequential':\n return SequentialFeatures(data_dir, fname, X)\n else:\n raise NotImplementedError(\"Unknown feature type\")", "def training_data(kind, depth = 5):\n\n if kind == 'unigram':\n return UnigramTrainingData.load(UNIGRAM_DIR + str(depth))\n\n if kind == 'rnn':\n return RNNTrainingData.load(RNN_DIR + str(depth))", "def train(self, model_type, params=None):\n Model = load_model_class(model_type)\n self.model_type = model_type\n X, y = self.task.make_dataset()\n self.final_data = X.copy()\n # Save preds\n preds = np.zeros_like(y.values).astype(np.float)\n with TMPFolder():\n N = len(X)\n n = N // self.cv\n # Assign a fold to each sample\n folds = np.random.permutation(np.repeat(np.arange(self.cv), n+1)[:N])\n if self.cv == 1:\n folds[:] = 1\n folds[np.random.permutation(np.arange(N))[:int(round(0.25 * N))]] = 0\n # Iterate over folds\n for k in range(self.cv):\n print(\"Fold\", k)\n # Create model\n model = Model()\n if params is not None:\n model.set_hp(params)\n # Create sub-dataset\n X_train = X[folds != k]\n y_train = y[folds != k]\n X_test = X[folds == k]\n y_test = y[folds == k]\n # Train the model\n model.train(X_train, y_train)\n # Make predictions on test samples\n y_pred = model.predict(X_test)\n # Save the predictions\n preds[folds == k] = y_pred\n self.model_save.append(model)\n # Save folds\n self.folds = folds\n self.is_trained = True\n self.preds = preds\n self.true_labels = y", "def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set", "def _create_dataset(self, *data):\n # Make sure data is a tuple of dense tensors\n data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]\n return TensorDataset(*data)", "def prepare_data(self, context_size, model_name):\n self.context_size = context_size\n data_x = []\n data_y = []\n oob = self.word2idx['OOB']\n\n for item in self.docs:\n data = [oob] * context_size + self.doc2token(item) + [oob] * context_size #padding\n for i in range(context_size, len(data) - context_size):\n data_x.append(data[i - context_size: i] + data[i + 1: i + context_size + 1])\n data_y.append(data[i])\n \n if model_name.lower() == 'skipgram':\n data_x, data_y = data_y, data_x\n self.data_x = Variable(torch.LongTensor(data_x))\n self.data_y = Variable(torch.LongTensor(data_y))\n logging.info(f'data preprocessed, data shape: {self.data_x.shape}, {self.data_y.shape}')", "def create_and_train_NN(nn_model, training_data, training_keywords):\n\n data_rows, data_columns = training_data.shape\n\n keywords_rows, keywords_columns = training_keywords.shape\n\n nn_model.create_model(data_columns, keywords_columns)\n\n nn_model.train_model(training_data, training_keywords, graphs=True)", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def prepare_dataset(self, dataset_type: str) -> Dataset:\n\n logger.info(\"Creating features from dataset file at %s\", self.hparams.data_dir)\n\n if dataset_type == \"train\":\n dataset = self.processor.get_train_dataset(self.hparams.data_dir, self.hparams.train_file_name)\n elif dataset_type == \"dev\":\n dataset = self.processor.get_dev_dataset(self.hparams.data_dir, self.hparams.dev_file_name)\n elif dataset_type == \"test\":\n dataset = self.processor.get_test_dataset(self.hparams.data_dir, self.hparams.test_file_name)\n else:\n raise ValueError(f\"{dataset_type} do not support. [train|dev|test]\")\n logger.info(f\"Prepare {dataset_type} dataset (Count: {len(dataset)}) \")\n return dataset", "def initialize_dataloaders(\n self, X: Union[np.ndarray, pd.DataFrame], y: Union[np.ndarray, np.array]\n ):\n training_design_matrix, training_targets_array, validation_design_matrix, validation_targets_array = self.generate_training_validation_split(\n X, y\n )\n training_dataloader_kwargs = {\n \"design_matrix\": training_design_matrix,\n \"targets_array\": training_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": self.shuffle_training_examples,\n }\n validation_dataloader_kwargs = {\n \"design_matrix\": validation_design_matrix,\n \"targets_array\": validation_targets_array,\n \"data_type\": self.data_type,\n \"batch_size\": self.batch_size,\n \"shuffle\": False,\n }\n self.training_dataloader = self.generate_dataloader(**training_dataloader_kwargs)\n self.validation_dataloader = self.generate_dataloader(**validation_dataloader_kwargs)", "def getTrainingData(self):\n raise NotImplementedError", "def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids", "def __init__(self, data_X, data_Y, dtype=dtypes.float32):\n dtype = dtypes.as_dtype(dtype).base_dtype\n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError(\"Invalid dtype %r, expected uint8 or float32\" % dtype)\n\n assert data_X.shape[0] == data_Y.shape[0], (\"data_X.shape: %s data_Y.shape: %s\" % (data_X.shape, data_Y.shape))\n self.num_examples = data_X.shape[0]\n\n if dtype == dtypes.float32:\n data_X = data_X.astype(np.float32)\n self.data_X = data_X\n self.data_Y = data_Y \n\n self.epochs_completed = 0\n self.index_in_epoch = 0", "def build_model_mobilenet(num_classes):", "def make_data(_is_train, data, label):\n save_path = os.path.join(os.getcwd(), \"SRCNN\", 'checkpoint')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if _is_train:\n save_path = os.path.join(save_path, 'train.h5')\n else:\n save_path = os.path.join(save_path, 'test.h5')\n\n # data 和 label 預設類型是 numpy array ,但若建立時內部陣列維度不相等,內部數據將被轉為 dtype=object\n # 導致 h5py 無法儲存: TypeError: Object dtype dtype('O') has no native HDF5 equivalent\n with h5py.File(save_path, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)", "def prep_data(self):\n\n self.fit_tokenizer(texts=self.texts)\n sequences = self.get_sequences(self.texts)\n self.text_data = pad_sequences(sequences, maxlen=self.MAX_SEQUENCE_LENGTH)\n\n self.labels = to_categorical(np.asarray(self.labels))\n print('Shape of data tensor:', self.text_data.shape)\n print('Shape of label tensor:', self.labels.shape)\n\n # split the data into a training set and a validation set\n indices = np.arange(self.text_data.shape[0])\n np.random.shuffle(indices)\n self.text_data = self.text_data[indices]\n self.labels = self.labels[indices]\n nb_validation_samples = int(self.VALIDATION_SPLIT * self.text_data.shape[0])\n\n x_train = self.text_data[:-nb_validation_samples]\n y_train = self.labels[:-nb_validation_samples]\n x_val = self.text_data[-nb_validation_samples:]\n y_val = self.labels[-nb_validation_samples:]\n\n return x_train,y_train, x_val, y_val", "def _prepare_ml_data(X, y, to_optimize=False):\n size_test = 1\n y_test = None\n if to_optimize:\n size_test = CONFIG.OPTIMIZE_PARAMS['size'] + 1\n y_test = y.iloc[-size_test:]\n X_train = X.iloc[:-size_test]\n y_train = y.iloc[:-size_test]\n X_test = X.iloc[-size_test:]\n return X_train, y_train, X_test, y_test", "def get_train_data(batch_size=8):\n transform_train = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomFlipLeftRight(),\n transforms.RandomColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n transforms.RandomLighting(0.1),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n ])\n\n img_folder, img_file = get_data_path()\n td = MultilabelDataset(data_folder=img_folder, data_file=img_file)\n train_data = DataLoader(td.transform_first(transform_train), batch_size=batch_size, shuffle=True)\n return train_data", "def define_training_data(self, train_sources, train_labels=None):\n logging.info(\"Defining training data for NNetModel...\")\n self.train_cols = []\n if train_labels is None:\n for source in train_sources:\n self.train_cols += self._read(source)\n else:\n for source, label in zip(train_sources, train_labels):\n self.train_cols += self._read(source, label)\n\n logging.info(\"NNetModel: Training data contains {} columns from {} sources\".format(len(self.train_cols), len(train_sources)))", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def create_data_model():\r\n data = {}\r\n data['distance_matrix'] = distance_matrix.tolist()\r\n data['time_matrix'] = time_matrix.tolist()\r\n data['time_windows'] = time_windows.tolist()\r\n data['pickups_deliveries'] = pickup_deliveries.tolist()\r\n data['demands'] = demand\r\n data['num_vehicles'] = 20\r\n data['vehicle_capacities'] = [20 * i / i for i in range(1, num_vehicles+1)]\r\n data['depot'] = (2 * length) - 1\r\n return data", "def prepare_data(X, y, n = None):\n # notice that the training data contains time series data since the GTI\n # data is collected from videos and thus frames from the same video are\n # not independent. It would be better to split based on video but\n # unfortunately this information is not included in the dataset and thus\n # we are leaking data from test set into training set which makes the\n # testing performance a less reliable estimate for the classifiers\n # generalization ability.\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(\n X, y, test_size = 0.15)\n if n is not None:\n X_train = X_train[:n,...]\n y_train = y_train[:n,...]\n X_test = X_test[:n,...]\n y_test = y_test[:n,...]\n return (X_train, y_train), (X_test, y_test)", "def _fit(cls, project_id: str, name: str, data_type: DataType, training_type: TypeProblem,\n dataset: Union[Dataset, Tuple[Dataset, DatasetImages]], column_config: ColumnConfig,\n metric: metrics.Enum, holdout_dataset: Dataset = None,\n training_config: TrainingConfig = TrainingConfig(), **kwargs) -> 'Supervised':\n training_args = to_json(training_config)\n assert isinstance(training_args, Dict)\n training_args.update(to_json(column_config))\n\n if holdout_dataset:\n if isinstance(holdout_dataset, str):\n training_args['holdout_dataset_id'] = holdout_dataset\n else:\n training_args['holdout_dataset_id'] = holdout_dataset.id\n\n assert metric\n\n if isinstance(dataset, str):\n dataset_id = dataset\n elif isinstance(dataset, tuple):\n dataset_id = [d.id for d in dataset]\n else:\n dataset_id = dataset.id\n start_response = cls._start_usecase(project_id,\n name,\n dataset_id=dataset_id,\n data_type=data_type,\n training_type=training_type,\n metric=metric if isinstance(metric, str) else metric.value,\n **training_args)\n usecase = cls.from_id(start_response['_id'])\n print(usecase.training_type)\n events_url = '/{}/{}'.format(cls.resource, start_response['_id'])\n pio.client.event_manager.wait_for_event(usecase.resource_id,\n cls.resource,\n EventTuple('USECASE_VERSION_UPDATE', 'state', 'running',\n [('state', 'failed')]),\n specific_url=events_url)\n\n return usecase", "def train_model(x_data, y_data, model_type):\n # def lr model object\n clr = None\n try:\n clr = model_list[model_type]()\n except Exception as e:\n print(e)\n # fit model\n clr.fit(x_data, y_data)\n # save model in pkl file\n try:\n joblib.dump(clr, \"model/\" + model_type + \".pkl\")\n except Exception as e:\n print(e)\n return clr", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def make_loader(model, dataset, batch_size, workers, eval_proportion):\n dataset_extra_args = dict(\n num_frames=model['num_frames'],\n channels=model['input_type'],\n action_space=model['action_space'],\n steps_action=model['steps_action'],\n num_signals=model['num_signals'],\n num_skills=model['num_skills'])\n if model['mode'] == 'regression':\n dataset_class = RegressionDataset\n else:\n dataset_class = ImitationDataset\n\n im_dataset = dataset_class(**dataset, **dataset_extra_args)\n train_dataset, eval_dataset = split_dataset(im_dataset, eval_proportion)\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size,\n num_workers=workers,\n shuffle=True,\n drop_last=False)\n if eval_dataset is not None:\n eval_loader = torch.utils.data.DataLoader(\n eval_dataset,\n batch_size,\n num_workers=workers,\n shuffle=True,\n drop_last=False)\n else:\n eval_loader = None\n\n env_name = im_dataset._frames.infos['env_name']\n return (train_loader, eval_loader, env_name, im_dataset.get_statistics())", "def _create_dataset(options, is_training, input_pipeline_context=None):\n dataset = tf.data.Dataset.list_files(options.input_pattern[:],\n shuffle=is_training)\n\n batch_size = options.batch_size\n if input_pipeline_context:\n if input_pipeline_context.num_input_pipelines > 1:\n dataset = dataset.shard(input_pipeline_context.num_input_pipelines,\n input_pipeline_context.input_pipeline_id)\n batch_size = input_pipeline_context.get_per_replica_batch_size(\n options.batch_size)\n\n if is_training:\n if options.cache_dataset:\n dataset = dataset.cache()\n dataset = dataset.repeat()\n dataset = dataset.shuffle(options.shuffle_buffer_size)\n dataset = dataset.interleave(tf.data.TFRecordDataset,\n cycle_length=options.interleave_cycle_length)\n\n parse_fn = lambda x: _parse_single_example(x, options)\n dataset = dataset.map(map_func=parse_fn,\n num_parallel_calls=options.num_parallel_calls)\n\n padded_shapes = {\n InputFields.img_id: [],\n InputFields.annot_id: [],\n InputFields.answer_label: [],\n InputFields.num_objects: [],\n InputFields.object_bboxes: [None, 4],\n InputFields.object_labels: [None],\n InputFields.object_scores: [None],\n InputFields.object_features: [None, options.frcnn_feature_dims],\n InputFields.cls_bert: [NUM_CHOICES, options.bert_feature_dims],\n InputFields.question: [None],\n InputFields.question_tag: [None],\n InputFields.question_bert: [NUM_CHOICES, None, options.bert_feature_dims],\n InputFields.question_len: [],\n InputFields.answer_choices: [NUM_CHOICES, None],\n InputFields.answer_choices_tag: [NUM_CHOICES, None],\n InputFields.answer_choices_bert: [\n NUM_CHOICES, None, options.bert_feature_dims\n ],\n InputFields.answer_choices_len: [NUM_CHOICES],\n InputFields.answer_choices_with_question: [NUM_CHOICES, None],\n InputFields.answer_choices_with_question_tag: [NUM_CHOICES, None],\n InputFields.answer_choices_with_question_len: [NUM_CHOICES],\n }\n padding_values = {\n InputFields.img_id: '',\n InputFields.annot_id: '',\n InputFields.answer_label: -1,\n InputFields.num_objects: 0,\n InputFields.object_bboxes: 0.0,\n InputFields.object_labels: '',\n InputFields.object_scores: 0.0,\n InputFields.object_features: 0.0,\n InputFields.cls_bert: 0.0,\n InputFields.question: PAD,\n InputFields.question_tag: -1,\n InputFields.question_bert: 0.0,\n InputFields.question_len: 0,\n InputFields.answer_choices: PAD,\n InputFields.answer_choices_tag: -1,\n InputFields.answer_choices_bert: 0.0,\n InputFields.answer_choices_len: 0,\n InputFields.answer_choices_with_question: PAD,\n InputFields.answer_choices_with_question_tag: -1,\n InputFields.answer_choices_with_question_len: 0,\n }\n if options.decode_jpeg:\n padded_shapes.update({\n InputFields.img_data: [None, None, 3],\n InputFields.img_height: [],\n InputFields.img_width: [],\n })\n padding_values.update({\n InputFields.img_data: tf.constant(0, dtype=tf.uint8),\n InputFields.img_height: 0,\n InputFields.img_width: 0,\n })\n dataset = dataset.padded_batch(batch_size,\n padded_shapes=padded_shapes,\n padding_values=padding_values,\n drop_remainder=True)\n dataset = dataset.prefetch(options.prefetch_buffer_size)\n return dataset", "def from_data(cls, model_params, y_train, train_preds,\n y_test, test_preds, trace, target_col):\n train_preds, test_preds = utils.cap_train_and_test_predictions(\n train_preds, test_preds)\n start = len(train_preds)\n end = start + len(test_preds)\n results_dict = utils.calculate_evaluation_metrics(\n y_train, train_preds, y_test, test_preds,\n trace.get_spare_resource_in_window(target_col, start, end))\n harvest_stats_dict = {\n buffer_pct: HarvestStats.from_predictions(\n trace, test_preds, buffer_pct, target_col, start, end)\n for buffer_pct in specs.BUFFER_PCTS}\n return cls(model_params, results_dict, harvest_stats_dict)", "def make_dataset(self):\n # Read raw data\n data = self.read_raw_data()\n self.default_header = list(data.columns.values)\n # Fit the variables on the raw dataset\n self.fit(data.copy())\n return make_df(data, self.features), make_df(data, self.targets)", "def prepare_train_validation(self) -> Tuple:\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset", "def train_data_constructor(learning_files_list):\n\n if learning_files_list is None:\n train_data = None\n else:\n full_learning_data_frame = pd.concat(learning_files_list[i].raw_data for i in range(len(learning_files_list)))\n full_learning_data_frame = full_learning_data_frame.sample(frac=1)\n train_data = TrainData(selex_str_len=len(learning_files_list[0].raw_data['DNA_Id'].iloc[0]), selex_files_num=len(learning_files_list))\n train_data.set_one_hot_matrix(dna_data=full_learning_data_frame['DNA_Id'],\n primary_selex_sequence=learning_files_list[0].primary_selex_sequence)\n train_data.set_enrichment_matrix(enrichment_data=np.asarray(full_learning_data_frame['cycle_matrix']))\n return train_data", "def convert_train(ndata, ndim):\r\n print ('Converting training data ... ')\r\n x = np.zeros([ndata, ndim])\r\n y = np.zeros([ndata])\r\n \r\n for i in range(0, len(flist) - 2):\r\n batchn = filepath + flist[i]\r\n temp = read(batchn)\r\n x[i * 10000:(i + 1) * 10000] = temp['data']\r\n y[i * 10000:(i + 1) * 10000] = temp['labels']\r\n \"\"\"\r\n i=0\r\n batchn = filepath + flist[i]\r\n\r\n temp = read(batchn)\r\n\r\n x[i * 10000:(i + 1) * 10000] = temp['data']\r\n\r\n y[i * 10000:(i + 1) * 10000] = temp['labels']\r\n \"\"\"\r\n return x, y", "def train_datas(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(self.size))\r\n np.random.shuffle(indices)\r\n\r\n epoch_size = self.size // batch_size * batch_size\r\n self._train_datas = self._train_datas[indices][:epoch_size] # [epoch_size, ...]\r\n self._train_labels = self._train_labels[indices][:epoch_size] # [epoch_size, ...]\r\n \r\n datas = []\r\n for i in range(self.size // batch_size):\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[i*batch_size:(i+1)*batch_size], \r\n self._train_labels[i*batch_size:(i+1)*batch_size]])\r\n return datas", "def train_model(model_dir, model_type, train_steps, train_file_name):\n model_dir = tempfile.mkdtemp() if not model_dir else model_dir\n\n m = build_estimator(model_dir, model_type)\n\n # set num_epochs to None to get infinite stream of data.\n m.train(input_fn=input_fn_train(train_file_name, num_epochs=None, shuffle=True),\n steps=train_steps)\n\n return m", "def prepare_data(dataset, train_ratio=0.8, input_dim=None, seed=10):\n # Retrieve main path of project\n dirname = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and store dataset at chosen location\n if dataset == 'Cora' or dataset == 'PubMed' or dataset == 'Citeseer':\n path = os.path.join(dirname, 'data')\n data = Planetoid(path, name=dataset, split='full')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n # data.train_mask, data.val_mask, data.test_mask = split_function(data.y.numpy())\n # data = Planetoid(path, name=dataset, split='public', transform=T.NormalizeFeatures(), num_train_per_class=20, num_val=500, num_test=1000)\n\n elif dataset == 'Amazon':\n path = os.path.join(dirname, 'data', 'Amazon')\n data = Amazon(path, 'photo')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), seed=seed)\n # Amazon: 4896 train, 1224 val, 1530 test\n \n elif dataset in ['syn1', 'syn2', 'syn4', 'syn5']: \n data = synthetic_data(\n dataset, dirname, train_ratio, input_dim)\n \n elif dataset == 'syn6':\n data = gc_data(dataset, dirname, train_ratio)\n\n elif dataset == 'Mutagenicity':\n data = gc_data(dataset, dirname, train_ratio)\n\n return data", "def __init__(self, num_classes, num_filters,\n data_format=None,\n dtype=DEFAULT_DTYPE):\n super(Model, self).__init__(\n num_classes, num_filters,\n data_format, dtype\n )", "def get_training_data_structure(x1, x2, y1, y2):\n return {input_id:{'group_1':x1, 'group_2':x2},\n output_id:{'group_1':y1, 'group_2':y2}}", "def from_dataset(cls, dataset, col_names, vocab_size, character_coverage, model_type, params):\n\n vocab = SentencePieceVocab()\n root = copy.deepcopy(dataset).build_sentencepiece_vocab(vocab, col_names, vocab_size, character_coverage,\n model_type, params)\n for d in root.create_dict_iterator(num_epochs=1):\n if d is None:\n raise ValueError(\"from_dataset should receive data other than None.\")\n return vocab", "def _make_example(X, y, n, target_type='int'):\n\n feature = {}\n feature['X'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=X.flatten()))\n feature['n'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=n.flatten()))\n\n if target_type == 'int':\n feature['y'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=y.flatten()))\n elif target_type in ['float', 'signal']:\n y = y.astype(np.float32)\n feature['y'] = tf.train.Feature(\n float_list=tf.train.FloatList(value=y.flatten()))\n else:\n raise ValueError('Invalid target type.')\n\n # Construct the Example proto object\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n return example", "def get_training_data() -> GraphDataset:\n _load_data_if_needed()\n return training_data", "def init_training(self):\n\n if not os.path.exists(self._model_root_path):\n os.makedirs(self._model_root_path)\n\n # Only initialize once!\n if self._model is None:\n self._model = TrainableAimbotModel(self._config, self._fov,\n os.path.join(self._model_root_path, 'aimbot_model.tf'))\n\n if not os.path.isfile(self._train_data_tfrecord_path) and not os.path.isfile(self._test_data_tfrecord_path):\n # Only create if not existing\n images_labels = _get_annotations_and_images(self._image_path)\n images_labels_train, images_labels_test = train_test_split(images_labels, shuffle=True, test_size=0.20)\n\n self._model.create_tfrecords(self._train_data_tfrecord_path, images_labels_train)\n self._model.create_tfrecords(self._test_data_tfrecord_path, images_labels_test)\n\n self._train_data_set = self._model.create_dataset(self._train_data_tfrecord_path, augment=True, shuffle=True)\n self._test_data_set = self._model.create_dataset(self._train_data_tfrecord_path)", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def prepare_train(self) -> Tuple[ZLIMGS, ZLIMGS, ZLIMGS, ZLIMGS]:\n\n if self.setting == 'setting1':\n warnings.warn(\"Please note that Setting 1 should not use train eval dataset! \"\n \"Because its training set only contain normal samples!\")\n\n with open(self.json_path) as fp:\n ids_json = json.load(fp)\n ids_train_normal = ids_json['normal']['train']\n ids_train_defect = ids_json['defect']['train']\n\n # train\n zlimgs_train_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n zlimgs_train_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_train'])\n\n # train eval\n zlimgs_train_eval_normal = self._create_zl_imgs_given_ids(ids=ids_train_normal,\n subset=CONFIG[self.setting]['normal_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n zlimgs_train_eval_defect = self._create_zl_imgs_given_ids(ids=ids_train_defect,\n subset=CONFIG[self.setting]['defect_train'],\n ann_type=CONFIG[self.setting]['ann_eval'])\n\n return zlimgs_train_normal, zlimgs_train_defect, zlimgs_train_eval_normal, zlimgs_train_eval_defect", "def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates", "def create_training(logits):\r\n \r\n\r\n return train_op, loss, label_ph", "def create_dataset(series_dir: Path, downsampling: int,\n model_input_type: str) -> Tuple[tf.data.Dataset, int, int]:\n if model_input_type not in MODEL_INPUT_TYPES:\n raise ValueError(f\"Invalid model_input_type ({model_input_type})argument passed to the function.\")\n\n model_type, input_type = model_input_type.split(\"-\")\n cut_flag = \"cut\" in input_type\n batched_flag = \"batched\" in input_type\n\n def load_numpy_file(file_path: tf.Tensor) -> np.ndarray:\n data = np.load(file_path.numpy().decode())[::downsampling]\n if cut_flag:\n data = data[int(data.shape[0] * 0.75):]\n return data.astype(np.float32)\n\n example_file = next(series_dir.iterdir()).joinpath(\"StateMetric\", \"results_t200000.npy\")\n shape = tf.TensorShape(load_numpy_file(tf.convert_to_tensor(str(example_file.absolute()))).shape)\n time_steps, n_agents, n_options = shape.as_list()\n\n # The value of -1 flattens any remaining dimensions.\n model_input_shape = [-1] if (model_type == \"dnn\" and ('batched' not in input_type)) else [time_steps, -1]\n\n def data_preprocessing(exp_data: tf.Tensor) -> tf.Tensor:\n tensor = tf.reshape(exp_data, model_input_shape)\n if batched_flag:\n tensor = tf.transpose(tensor)\n return tensor, tensor\n\n def data_pipeline(file_path: str) -> tf.Tensor:\n [exp_data,] = tf.py_function(load_numpy_file, [file_path], [tf.float32,])\n exp_data.set_shape(shape)\n inputs, outputs = data_preprocessing(exp_data)\n return inputs, outputs\n\n file_pattern = str(series_dir) + \"/*/StateMetric/results_t200000.npy\"\n dataset = tf.data.Dataset.list_files(file_pattern=file_pattern, shuffle=False)\n dataset = dataset.map(data_pipeline, num_parallel_calls=tf.data.experimental.AUTOTUNE).cache()\n return dataset, n_agents, n_options", "def _create_model_data(\n self,\n tracker_state_features: List[List[Dict[Text, List[Features]]]],\n label_ids: Optional[np.ndarray] = None,\n entity_tags: Optional[List[List[Dict[Text, List[Features]]]]] = None,\n encoded_all_labels: Optional[List[Dict[Text, List[Features]]]] = None,\n ) -> RasaModelData:\n model_data = RasaModelData(label_key=LABEL_KEY, label_sub_key=LABEL_SUB_KEY)\n\n if label_ids is not None and encoded_all_labels is not None:\n label_ids = np.array(\n [np.expand_dims(seq_label_ids, -1) for seq_label_ids in label_ids]\n )\n model_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(label_ids, number_of_dimensions=3)],\n )\n\n attribute_data, self.fake_features = convert_to_data_format(\n tracker_state_features, featurizers=self.config[FEATURIZERS]\n )\n\n entity_tags_data = self._create_data_for_entities(entity_tags)\n if entity_tags_data is not None:\n model_data.add_data(entity_tags_data)\n else:\n # method is called during prediction\n attribute_data, _ = convert_to_data_format(\n tracker_state_features,\n self.fake_features,\n featurizers=self.config[FEATURIZERS],\n )\n\n model_data.add_data(attribute_data)\n model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)\n model_data.add_lengths(ACTION_TEXT, SEQUENCE_LENGTH, ACTION_TEXT, SEQUENCE)\n\n # add the dialogue lengths\n attribute_present = next(iter(list(attribute_data.keys())))\n dialogue_lengths = np.array(\n [\n np.size(np.squeeze(f, -1))\n for f in model_data.data[attribute_present][MASK][0]\n ]\n )\n model_data.data[DIALOGUE][LENGTH] = [\n FeatureArray(dialogue_lengths, number_of_dimensions=1)\n ]\n\n # make sure all keys are in the same order during training and prediction\n model_data.sort()\n\n return model_data", "def _training__(self):\n self.input_size, self.output_size = self.X_train.shape[1], self.y_train.shape[1]\n w1 = np.random.uniform(size=[self.input_size, self.hidden_size])\n b = np.random.uniform(size=[1, self.hidden_size])\n H = self._activation__(np.add(np.matmul(self.X_train, w1), b))\n w2 = np.dot(np.linalg.pinv(H), self.y_train)\n self.model = {\"w1\": w1, \"b\": b, \"w2\": w2}", "def init_benchmark_model(\n input_size, hidden_size, num_classes, rand_seed=None,\n **kwargs\n):\n rs = np.random.RandomState(seed=rand_seed)\n\n model = {}\n D, H, C = input_size, hidden_size, num_classes\n model['W1'] = rs.rand(D, H)\n model['b1'] = rs.rand(H)\n model['W2'] = rs.rand(H, C)\n model['b2'] = rs.rand(C)\n\n return model", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def create_model_and_data(dataset_name: str, use_synthetic_data: bool) ->...:\n # This `train_batch_size` is only used in training clients, not validation and\n # test clients, which are the ones we used to evaluation the personalization\n # performance. For validation and test clients, batching is applied after\n # splitting their local data into a personalization set and an eval set (i.e.,\n # inside `knn_per_avg_clients` above).\n unused_batch_size = 20\n if dataset_name == 'emnist':\n return emnist.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'stackoverflow':\n return stackoverflow.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'landmark':\n return landmark.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n elif dataset_name == 'ted_multi':\n return ted_multi.create_model_and_data(\n num_local_epochs=1,\n train_batch_size=unused_batch_size,\n use_synthetic_data=use_synthetic_data)\n raise ValueError(f'Accepted dataset names: {constants.DATASET_NAMES}, but '\n f'found {dataset_name}. Please provide a valid name.')", "def train(self, training_data):\n pass", "def generate_dataset(self):\n sets = {\n \"train\": 10,\n \"test\": 5,\n }\n\n fields = {\n \"strings_list\": lambda x: str_to_ascii(self.generate_string_list(x)),\n \"data\": lambda x: np.random.randint(0, 10, (x, 10)),\n \"number\": lambda x: np.array(range(x)),\n \"field_with_a_long_name_for_printing\": lambda x: np.array(range(x)),\n }\n\n lists = {\n \"list_dummy_data\": np.array(range(10)),\n \"list_dummy_number\": np.array(range(10), dtype=np.uint8),\n }\n\n dataset = {}\n data_fields = {}\n for set_name in sets:\n dataset[set_name] = self.populate_set(sets[set_name], fields, lists)\n data_fields[set_name] = sorted(dataset[set_name].keys())\n\n return dataset, data_fields", "def __init__(self, height = None, width = None, ratio=None, type=None):\n \n self.dF = []\n self.feature = []\n self.Class = []\n self.featureNumpy = []\n self.ClassNumpy = []\n \n self.model = []\n \n self.fTrain = []\n self.fTest = []\n self.cTrain = []\n self.cTest = []", "def random_cls_dataset(request):\n set_seed()\n shape = request.param.get('shape', 10)\n size = request.param.get('size', 100)\n X, Y = make_classification(n_samples=2*size, n_features=shape, n_classes=10, n_informative=10, n_redundant=0)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.5)\n Y_train, Y_test = Y_train.astype(np.int64), Y_test.astype(np.int64)\n return (X_train, Y_train), (X_test, Y_test)", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def build_dataset(\n is_train, \n data_dir: str,\n image_size: int = 224,\n color_jitter: float = 0.4, \n aa: str = \"rand-m9-mstd0.5-inc1\",\n train_interpolation: str = \"bicubic\",\n reprob: float = 0.25, \n remode: str = \"pixel\", \n recount: int = 1\n):\n transforms = build_transform(\n is_train, \n image_size, \n color_jitter, \n aa, \n train_interpolation, \n reprob, \n remode, \n recount\n )\n\n root = os.path.join(data_dir, 'train' if is_train else 'val')\n dataset = datasets.ImageFolder(root, transform=transforms)\n nb_classes = 1000\n\n return dataset, nb_classes", "def __init__(self,\n batch_size,\n min_num_context,\n max_num_context,\n data,\n num_inst,\n testing=False):\n self._batch_size = batch_size\n self._min_num_context = min_num_context\n self._max_num_context = max_num_context\n self._data = data\n # Hardcoded for right now\n self._x_data = self._data[:,1:-1]\n self._y_data = self._data[:,-1:]\n self._testing = testing\n self._num_inst = num_inst\n self._num_pts_per_inst = tf.cast(self._data.get_shape().as_list()[0]/self._num_inst,tf.int32)\n self._x_uniq = self._x_data[:self._num_pts_per_inst]", "def _setup_trainer(tmpdir):\n SAMPLE_SIZE = 8\n TRAIN_BATCH_SIZE = 4\n VAL_BATCH_SIZE = 1\n NUM_CLASSES = 3\n SCALE = (8, 16, 32)\n ASPECT_RATIO = (.5, 1., 2.)\n NUM_ANCHORS = 9\n NUM_EPOCHS = 2\n LABEL_TEMPLATE = np.array([0., 0., 0.9, 0.9, 1, 1])\n LEARNING_RATE = .1\n\n model = YOLOV2\n grid_size = (model.GRID_H, model.GRID_W)\n num_anchors = len(SCALE) * len(ASPECT_RATIO)\n eval_epochs = NUM_CLASSES\n\n image_height = model.GRID_H * model.SCALE\n image_width = model.GRID_W * model.SCALE\n\n with tf.device('/cpu:0'):\n anchor_priors = generate_anchor_priors(grid_size, SCALE, ASPECT_RATIO)\n anchor_converter = AnchorConverter(anchor_priors)\n\n train_batch = {\n 'image':\n tf.convert_to_tensor(\n np.ones([SAMPLE_SIZE, image_height, image_width, 3]),\n dtype=tf.float32),\n 'label':\n tf.convert_to_tensor(\n np.tile(\n LABEL_TEMPLATE,\n [SAMPLE_SIZE, model.GRID_H, model.GRID_W, num_anchors, 1]),\n dtype=tf.float32),\n }\n val_batch = {\n 'image':\n tf.convert_to_tensor(\n np.ones([SAMPLE_SIZE, image_height, image_width, 3]),\n dtype=tf.float32),\n 'label':\n tf.convert_to_tensor(\n np.tile(\n LABEL_TEMPLATE,\n [SAMPLE_SIZE, model.GRID_H, model.GRID_W, num_anchors, 1]),\n dtype=tf.float32),\n }\n train_dataset = tf.data.Dataset.from_tensor_slices(train_batch)\n train_dataset = train_dataset.batch(TRAIN_BATCH_SIZE)\n train_iterator = train_dataset.make_initializable_iterator()\n val_dataset = tf.data.Dataset.from_tensor_slices(val_batch)\n val_dataset = val_dataset.batch(VAL_BATCH_SIZE)\n val_iterator = val_dataset.make_initializable_iterator()\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n with tf.device('/gpu:0'):\n model_ins = model(NUM_CLASSES, num_anchors)\n\n optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)\n\n dut = Trainer(\n model_ins,\n NUM_CLASSES,\n TRAIN_BATCH_SIZE,\n VAL_BATCH_SIZE,\n train_iterator,\n val_iterator,\n anchor_converter,\n yolo_detection_loss,\n optimizer,\n global_step,\n str(tmpdir),\n num_epochs=NUM_EPOCHS,\n evaluate_epochs=eval_epochs)\n\n return dut", "def batch_creator(batch_size, dataset_length, dataset_name):\n # batch_size = 128\n # dataset_length = 6000\n batch_mask = rng.choice(dataset_length, batch_size)\n\n batch_x = eval('x_' + dataset_name)[[batch_mask]].reshape(-1, input_num_units)\n batch_x = preproc(batch_x)\n\n if dataset_name == 'train':\n batch_y = eval('y_' + dataset_name)[[batch_mask]]\n batch_y = dense_to_one_hot(batch_y)\n\n return batch_x, batch_y", "def __init__(self, data_dir, input_shape=[10, 64, 64, 1], target_shape=[10, 64, 64, 1],\n as_binary=False, num_digits=2, step_length=0.1):\n dataset_size = sys.maxint\n super(MovingMNISTTrainDataset, self).__init__('train', data_dir, dataset_size,\n input_shape, target_shape,\n as_binary, num_digits, step_length)", "def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')", "def _create_example(self):\n source = np.random.randn(self.batch_size, self.max_decode_length,\n self.input_depth)\n source_len = np.random.randint(0, self.max_decode_length, [self.batch_size])\n target_len = np.random.randint(0, self.max_decode_length * 2,\n [self.batch_size])\n target = np.random.randn(self.batch_size,\n np.max(target_len), self.input_depth)\n labels = np.random.randint(0, self.vocab_size,\n [self.batch_size, np.max(target_len) - 1])\n\n example_ = namedtuple(\n \"Example\", [\"source\", \"source_len\", \"target\", \"target_len\", \"labels\"])\n return example_(source, source_len, target, target_len, labels)", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def prepare_data(train_x, train_y, dev_x, dev_y, test_x, testy):\n train_x = torch.FloatTensor(train_x).cuda()\n train_y = torch.FloatTensor(train_y).cuda()\n dev_x = torch.FloatTensor(dev_x).cuda()\n dev_y = torch.FloatTensor(dev_y).cuda()\n test_x = torch.FloatTensor(test_x).cuda()\n test_y = torch.FloatTensor(testy).cuda()\n return train_x, train_y, dev_x, dev_y, test_x, test_y", "def create_train_model(self):\n st = LancasterStemmer()\n with open(self.data_path, encoding='utf8') as f_name:\n sentences = [[st.stem(w) for w, t in pos_tag(line.lower().split()) if 'N' in t] for line in f_name]\n sentences = [filter(lambda x: len(x) > 2, (word.strip(punctuation) for word in sentences)) for sent in sentences]\n model = Word2Vec(sentences,\n min_count=self.min_count,\n size=self.size,\n window=self.window,\n workers=4)\n model.save(self.model_path)", "def __init__(self, train_y, test_id, train_id, tags, data_dir='data/output/'):\n self.train_y = train_y\n self.test_id = test_id\n self.train_id = train_id\n self.TAGS = tags\n self.data_dir = data_dir", "def get_classification_training_data() -> Iterable[Tuple[str, Dict[str, Any]]]:\n return (_create_training_entry(*pair) for pair in TRAINING_DATA) # type: ignore", "def train(self, train_data, train_labels, batch_size=50, num_epochs=5):\n raise NotImplementedError", "def create_datasets(config, data_rng):\n # Compute batch size per device from global batch size.\n if config.batch_size % jax.device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.device_count()}).')\n per_device_batch_size = config.batch_size // jax.device_count()\n\n dataset_builder = tfds.builder(config.dataset)\n\n def cast_int32(batch):\n img = tf.cast(batch['image'], tf.int32)\n out = batch.copy()\n out['image'] = img\n return out\n\n def drop_info(batch):\n \"\"\"Removes unwanted keys from batch.\"\"\"\n if 'id' in batch:\n batch.pop('id')\n if 'rng' in batch:\n batch.pop('rng')\n return batch\n\n if config.data_augmentation:\n should_augment = True\n should_randflip = True\n should_rotate = True\n else:\n should_augment = False\n should_randflip = False\n should_rotate = False\n\n def augment(batch):\n img = tf.cast(batch['image'], tf.float32)\n aug = None\n if should_augment:\n if should_randflip:\n img_flipped = tf.image.flip_left_right(img)\n aug = tf.random.uniform(shape=[]) > 0.5\n img = tf.where(aug, img_flipped, img)\n if should_rotate:\n u = tf.random.uniform(shape=[])\n k = tf.cast(tf.floor(4. * u), tf.int32)\n img = tf.image.rot90(img, k=k)\n aug = aug | (k > 0)\n if aug is None:\n aug = tf.convert_to_tensor(False, dtype=tf.bool)\n\n out = batch.copy()\n out['image'] = img\n return out\n\n def preprocess_train(batch):\n return cast_int32(augment(drop_info(batch)))\n\n def preprocess_eval(batch):\n return cast_int32(drop_info(batch))\n\n # Read instructions to shard the dataset!\n print('train', dataset_builder.info.splits['train'].num_examples)\n # TODO(emielh) use dataset_info instead of num_examples.\n train_split = deterministic_data.get_read_instruction_for_host(\n 'train', num_examples=dataset_builder.info.splits['train'].num_examples)\n train_ds = deterministic_data.create_dataset(\n dataset_builder,\n split=train_split,\n num_epochs=1,\n shuffle=True,\n batch_dims=[jax.local_device_count(), per_device_batch_size],\n preprocess_fn=preprocess_train,\n rng=data_rng,\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=True\n )\n\n # TODO(emielh) check if this is necessary?\n\n # Test batches are _not_ sharded. In the worst case, this simply leads to some\n # duplicated information. In our case, since the elbo is stochastic we get\n # multiple passes over the test data.\n if config.test_batch_size % jax.local_device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.local_device_count()}).')\n test_device_batch_size = config.test_batch_size // jax.local_device_count()\n\n eval_ds = deterministic_data.create_dataset(\n dataset_builder,\n split='test',\n # Repeated epochs for lower variance ELBO estimate.\n num_epochs=config.num_eval_passes,\n shuffle=False,\n batch_dims=[jax.local_device_count(), test_device_batch_size],\n preprocess_fn=preprocess_eval,\n # TODO(emielh) Fix this with batch padding instead of dropping.\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=False)\n\n return dataset_builder.info, train_ds, eval_ds", "def GetDataset():\n x_train = []\n x_test = []\n y_train = []\n y_test = []\n\n classes1 = set()\n classes2 = set()\n for f in GetInputFiles():\n class1, class2, fold, fname = f.split('\\\\')[-4:]\n classes1.add(class1)\n classes2.add(class2)\n class1 = class1.split('_')[0]\n class2 = class2.split('_')[0]\n\n x = ReadAndTokenize(f)\n y = [int(class1 == 'positive'), int(class2 == 'truthful')]\n if fold == 'fold4':\n x_test.append(x)\n y_test.append(y)\n else:\n x_train.append(x)\n y_train.append(y)\n\n ### Make numpy arrays.\n x_test = MakeDesignMatrix(x_test)\n x_train = MakeDesignMatrix(x_train)\n y_test = numpy.array(y_test, dtype='float32')\n y_train = numpy.array(y_train, dtype='float32')\n\n dataset = (x_train, y_train, x_test, y_test)\n with open('dataset.pkl', 'wb') as fout:\n pickle.dump(dataset, fout)\n return dataset", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def _dtrain(self, X_train, y_train):\n if isinstance(X_train, np.ndarray):\n self.X_train = pd.DataFrame(\n X_train, columns=[f\"F_{i}\" for i in range(X_train.shape[1])]\n )\n elif isinstance(X_train, pd.DataFrame):\n self.X_train = X_train\n else:\n raise TypeError(\n \"The input X_train must be numpy array or pandas DataFrame.\"\n )\n\n if isinstance(y_train, np.ndarray) or isinstance(y_train, list):\n self.y_train = y_train\n else:\n raise TypeError(\"The input y_train must be numpy array or list.\")\n\n return self.X_train, self.y_train", "def init_benchmark_data(\n num_inputs, input_size, num_classes, rand_seed=None,\n **kwargs\n):\n N, D, C = num_inputs, input_size, num_classes\n\n rs = np.random.RandomState(seed=rand_seed)\n X = rs.rand(N, D)\n y = rs.choice(C, size=N)\n return X, y", "def build_and_train(hype_space, save_best_weights=True):\n\n K.set_learning_phase(1)\n model = build_model(hype_space)\n\n time_str = datetime.now().strftime(\"%Y_%m_%d-%H_%M\")\n model_weight_name = MODEL_NAME+\"-\" + time_str\n\n callbacks = []\n\n # Weight saving callback:\n if save_best_weights:\n weights_save_path = os.path.join(\n WEIGHTS_DIR, '{}.hdf5'.format(model_weight_name))\n print(\"Model's weights will be saved to: {}\".format(weights_save_path))\n if not os.path.exists(WEIGHTS_DIR):\n os.makedirs(WEIGHTS_DIR)\n\n callbacks.append(ModelCheckpoint(\n filepath=weights_save_path,\n monitor='val_accuracy',\n verbose = 1,\n save_best_only=True, mode='max'))\n\n callbacks.append(EarlyStopping(\n monitor='val_accuracy',\n patience=10, verbose=1, mode='max'))\n\n callbacks.append(ReduceLROnPlateau(\n monitor='val_accuracy', factor=0.5,\n patience=10, verbose=1, mode='max', cooldown = 2))\n\n # TensorBoard logging callback (see model 6):\n log_path = None\n\n emb_dim = int(hype_space['embed_dim'])\n window_size = int(hype_space['window_size'])\n nb_neg = int(hype_space['negative'])\n nb_iter = int(hype_space['iter'])\n n_gram = int(hype_space['n_gram'])\n #model = int(hype_space['model'])\n #tokens = int(hype_space['tokens'])\n mod = 0\n\n #standardize train and val profiles\n X_train, y_train, X_aug = get_netsurf_data('train_full')\n\n X_train, y_train, X_aug, X_val, y_val, X_aug_val = train_val_split_(X_train, y_train, X_aug)\n\n ## load data and get embedding form train data, embed train+val\n index2embed = get_embedding(emb_dim, window_size, nb_neg, nb_iter, n_gram, mod,\n seqs=X_train)\n\n X_train_embed = embed_data(X_train, index2embed, emb_dim, n_gram)\n X_val_embed = embed_data(X_val, index2embed, emb_dim, n_gram)\n\n X_train_aug = [X_train_embed, X_aug]\n X_val_aug = [X_val_embed, X_aug_val]\n\n print('We have '+str(len(callbacks))+' callbacks.')\n\n # Train net:\n history = model.fit(\n X_train_aug,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n shuffle=True,\n verbose=2,\n callbacks=callbacks,\n validation_data=(X_val_aug, y_val)\n ).history\n\n\n # Test net:\n score = evaluate_model(model, weights_save_path,\n emb_dim, n_gram, index2embed)\n K.set_learning_phase(0)\n print(\"\\n\\n\")\n min_loss = min(history['val_loss'])\n max_acc = max(history['val_accuracy'])\n number_of_epochs_it_ran = len(history['loss'])\n\n model_name = MODEL_NAME+\"_{}_{}\".format(str(max_acc), time_str)\n print(\"Model name: {}\".format(model_name))\n\n print('Score: ', score)\n result = {\n # We plug \"-val_accuracy\" as a minimizing metric named 'loss' by Hyperopt.\n 'loss': -max_acc,\n 'real_loss': min_loss,\n 'cb513': score['cb513_full'],\n 'casp12':score['casp12_full'],\n 'ts115':score['ts115_full'],\n 'nb_epochs': number_of_epochs_it_ran,\n # Misc:\n 'model_name': model_name,\n 'space': hype_space,\n 'status': STATUS_OK\n }\n\n print(\"RESULT:\")\n print_json(result)\n\n # save test results to logfile\n f = open(\"/nosave/lange/cu-ssp/model_neu/optimized/logs/test_results_mod3_w2v.txt\", \"a+\")\n res = \"\"\n for k, v in score.items():\n res += str(k)+\": \"+str(v)+\"\\t\"\n f.write(\"\\n\"+str(model_weight_name)+\"\\t\"+ res)\n f.close()\n\n return model, model_name, result, log_path", "def get_dataset(data_pars=None, **kw):\n\n\n print('Loading data...')\n maxlen = data_pars['data_info']['maxlen']\n\n loader = DataLoader(data_pars)\n loader.compute()\n dataset, internal_states = loader.get_data()\n\n # return dataset\n Xtrain, ytrain, Xtest, ytest = dataset\n Xtrain = sequence.pad_sequences(Xtrain, maxlen=maxlen)\n Xtest = sequence.pad_sequences(Xtest, maxlen=maxlen)\n return Xtrain, Xtest, ytrain, ytest", "def __init__(self, **kwargs):\n is_training = kwargs.get('is_training', True)\n rootfolder = kwargs['rootfolder']\n dtype = kwargs.get('dtype', np.float64)\n self._load_mnist(rootfolder, is_training, dtype)\n # normalize data.\n self._data /= 255.\n ndarraydata.NdarrayDataLayer.__init__(\n self, sources=[self._data, self._label], **kwargs)", "def make_data(config, data, label):\n if not os.path.isdir(os.path.join(os.getcwd(), config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(), config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)", "def __init__(self, data_path):\r\n\t\tfile_names = ['data_batch_%d' % i for i in range(1,6)]\r\n\t\tfile_names.append('test_batch')\r\n\r\n\t\tX = []\r\n\t\ty = []\r\n\t\tfor file_name in file_names:\r\n\t\t\twith open(data_path + file_name) as fin:\r\n\t\t\t\tdata_dict = cPickle.load(fin)\r\n\t\t\tX.append(data_dict['data'].ravel())\r\n\t\t\ty = y + data_dict['labels']\r\n\r\n\t\tself.X = np.asarray(X).reshape(60000, 32*32*3)\r\n\t\tself.y = np.asarray(y)\r\n\r\n\t\tfin = open(data_path + 'batches.meta')\r\n\t\tself.LABEL_NAMES = cPickle.load(fin)['label_names']\r\n\t\tfin.close()", "def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters" ]
[ "0.62350506", "0.6225271", "0.6160196", "0.6157232", "0.6134622", "0.61098945", "0.6080254", "0.606203", "0.60147977", "0.60086304", "0.59555346", "0.5948243", "0.5919351", "0.59027153", "0.5900304", "0.58960056", "0.5879972", "0.58337307", "0.5829074", "0.5811363", "0.58097553", "0.5799441", "0.57921666", "0.57788956", "0.5762118", "0.5751669", "0.5749159", "0.5743394", "0.57385665", "0.57341266", "0.572453", "0.5718826", "0.5718074", "0.56916916", "0.56753975", "0.5659432", "0.56507385", "0.5643109", "0.5631704", "0.56295276", "0.5616759", "0.56162864", "0.56035227", "0.56004894", "0.5591612", "0.55906963", "0.5583688", "0.5579562", "0.5557759", "0.55397433", "0.5537106", "0.55329186", "0.55325365", "0.55278605", "0.5527312", "0.5526995", "0.5526651", "0.5521351", "0.55111676", "0.5510696", "0.5501731", "0.5500612", "0.5493343", "0.5488099", "0.54866856", "0.548325", "0.5474771", "0.5473871", "0.54664403", "0.5464288", "0.5464084", "0.5462611", "0.54591995", "0.5455887", "0.54525316", "0.54508966", "0.5442782", "0.54365546", "0.54325444", "0.54302263", "0.5428365", "0.54278564", "0.54245317", "0.54230976", "0.5419379", "0.54189485", "0.5416819", "0.54130095", "0.5408197", "0.5405828", "0.5405431", "0.540197", "0.5398561", "0.5392598", "0.5388772", "0.53863055", "0.53848004", "0.5374315", "0.53732663", "0.53684384" ]
0.8095568
0
Validates that Xs, Ys, Yvars, and metric names all have equal lengths.
def validate_data_format( Xs: List[Tensor], Ys: List[Tensor], Yvars: List[Tensor], metric_names: List[str] ) -> None: if len({len(Xs), len(Ys), len(Yvars), len(metric_names)}) > 1: raise ValueError( # pragma: no cover "Lengths of Xs, Ys, Yvars, and metric_names must match. Your " f"inputs have lengths {len(Xs)}, {len(Ys)}, {len(Yvars)}, and " f"{len(metric_names)}, respectively." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_consistent_length(y_true: List[List[str]], y_pred: List[List[str]]):\n len_true = list(map(len, y_true))\n len_pred = list(map(len, y_pred))\n is_list = set(map(type, y_true)) | set(map(type, y_pred))\n\n if len(y_true) != len(y_pred) or len_true != len_pred:\n message = 'Found input variables with inconsistent numbers of samples:\\n{}\\n{}'.format(len_true, len_pred)\n raise ValueError(message)", "def check_consistent_length(arrays: Sequence[npt.ArrayLike]) -> None:\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of\" \" samples: %r\" % [int(length) for length in lengths]\n )", "def validate_X_y(X: List[str], y: List[Any]):\n if len(X) != len(y):\n raise ValueError(\n f\"X and y must have the same length; X has length {len(X)}, and y has length {len(y)}\"\n )", "def _validate_length_features_and_labels(\n model_endpoint: mlrun.common.schemas.ModelEndpoint,\n ):\n\n # Getting the length of label names, feature_names and feature_stats\n len_of_label_names = (\n 0\n if not model_endpoint.spec.label_names\n else len(model_endpoint.spec.label_names)\n )\n len_of_feature_names = len(model_endpoint.spec.feature_names)\n len_of_feature_stats = len(model_endpoint.status.feature_stats)\n\n if len_of_feature_stats != len_of_feature_names + len_of_label_names:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"The length of model endpoint feature_stats is not equal to the \"\n f\"length of model endpoint feature names and labels \"\n f\"feature_stats({len_of_feature_stats}), \"\n f\"feature_names({len_of_feature_names}),\"\n f\"label_names({len_of_label_names}\"\n )", "def _check_inputlengths(self):\n # Check x and y have more than 1 item, and x and y are equal length\n if not len(self.x) > 1:\n raise ValueError(\"Route input 'x' must contain more than 1 item\")\n\n if not (len(self.y) > 1):\n raise ValueError(\"Route input 'y' must contain more than 1 item\")\n\n if not (len(self.x) == len(self.y)):\n raise ValueError(\"Route inputs 'x' and 'y' must be of equal length\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (len(v) == len(self.x)):\n raise ValueError(\"Route input 'z' must be of equal length to 'x' and 'y'\")", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def validate_input(self):\n self._validate_limits_cols_prefixed()\n self._validate_fillna_cols_prefixed()\n self._validate_ratio_input()", "def _check_variables(datasets, necessary_short_names):\n dataset_name = datasets[0]['dataset']\n necessary_short_names = set(necessary_short_names)\n short_names = set(group_metadata(datasets, 'short_name').keys())\n if short_names != necessary_short_names:\n raise ValueError(\n f\"Expected variables {necessary_short_names} for dataset \"\n f\"'{dataset_name}', got {short_names}\")", "def _validate_XY(X, Y):\n try:\n for inp in [X, Y]:\n assert isinstance(inp, torch.Tensor)\n assert inp.dtype is torch.float or inp.dtype is torch.double\n assert len(inp.shape) == 2\n assert X.dtype is Y.dtype\n assert X.shape[0] == Y.shape[0]\n except AssertionError:\n raise AttributeError(\n \"invalid inputs: X and Y should be float/double tensors of shape \"\n \"(n, d) and (n, m) respectively, where n is the number of samples, \"\n \"d is the number of features, and m is the number of outputs\"\n )", "def validate_ndarray(ndarray, expected_dtypes, expected_dimentions, name):\n\tvalid_dtype_assertion(expected_dtypes, ndarray.dtype, name)\n\tvalid_ndim_assertion(expected_dimentions, ndarray.ndim, name)", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def check_consistent_shape(X_train, y_train, X_test, y_test, y_train_pred,\n y_test_pred):\n\n # check input data shapes are consistent\n X_train, y_train = check_X_y(X_train, y_train)\n X_test, y_test = check_X_y(X_test, y_test)\n\n y_test_pred = column_or_1d(y_test_pred)\n y_train_pred = column_or_1d(y_train_pred)\n\n check_consistent_length(y_train, y_train_pred)\n check_consistent_length(y_test, y_test_pred)\n\n if X_train.shape[1] != X_test.shape[1]:\n raise ValueError(\"X_train {0} and X_test {1} have different number \"\n \"of features.\".format(X_train.shape, X_test.shape))\n\n return X_train, y_train, X_test, y_test, y_train_pred, y_test_pred", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def check_x_and_y_axis_len(self, x_axis, y_axis):\n if x_axis ==0: \n raise ValueError(\"Error! SOM X-Axis is 0!\")\n if y_axis==0:\n raise ValueError(\"Error! SOM Y-Axis is 0!\")", "def validate_common(ndarray, name):\n\tvalidate_ndarray(ndarray,(np.float, np.int), (2,) , name)", "def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), \"XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), \"XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), \"XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension\"\n\tassert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), \"XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension\"", "def _validate_columns(self, names):\n if not is_list_like(names):\n raise ValueError(\"Columns should be list-like\")\n\n if len(set(names)) != len(names):\n raise ValueError(\"Duplicate column names\")\n\n if self._data and len(names) != len(self._data[0]):\n raise ValueError(\"Invalid columns length\")", "def validate_xy(x_train, y_train):\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))", "def _verify_data(inputs, targets):\n check_value_type('inputs', inputs, Tensor)\n if len(inputs.shape) != 4:\n raise ValueError(f'Argument inputs must be 4D Tensor, but got {len(inputs.shape)}D Tensor.')\n check_value_type('targets', targets, (Tensor, int, tuple, list))\n if isinstance(targets, Tensor):\n if len(targets.shape) > 2:\n raise ValueError('Dimension invalid. If `targets` is a Tensor, it should be 0D, 1D or 2D. '\n 'But got {}D.'.format(len(targets.shape)))\n if targets.shape and len(targets) != len(inputs):\n raise ValueError(\n 'If `targets` is a 2D, 1D Tensor, it should have the same length as inputs {}. But got {}.'.format(\n len(inputs), len(targets)))", "def test_spaces(self):\n self.assertTrue(validate_measure_input('1 ', self.measures))\n self.assertFalse(validate_measure_input('1 1', self.measures))", "def _checkSize(X1,X2):\n \n if len(X1) != len(X2):\n raise ValueError, 'Lists are differnt lengths'", "def _check_values_len(self, data_batch: Dict[str, List[str]]):\n values_len = [len(v) for _, v in data_batch.items()]\n unique_len = len(set(values_len))\n assert unique_len == 1, \"Length of values are not consistent across\"", "def validate(self):\n variables = ['waterThickness', 'waterPressure']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/output.nc',\n filename2='restart_run/output.nc')", "def _check_data(self, labels, fluxes, flux_uncertainties, wavelengths=None):\n\n fluxes = np.atleast_2d(fluxes)\n flux_uncertainties = np.atleast_2d(flux_uncertainties)\n\n if len(labels) != fluxes.shape[0]:\n raise ValueError(\"the fluxes should have shape (n_stars, n_pixels) \"\n \"where n_stars is the number of rows in the labels array\")\n\n if fluxes.shape != flux_uncertainties.shape:\n raise ValueError(\"the flux and flux uncertainties array should have\"\n \" the same shape\")\n\n if len(labels) == 0:\n raise ValueError(\"no stars (labels) given\")\n\n if wavelengths is not None:\n wavelengths = np.atleast_1d(wavelengths)\n if wavelengths.size != fluxes.shape[1]:\n raise ValueError(\"mis-match between number of wavelength values\"\n \" ({0}) and flux values ({1})\".format(\n wavelengths.size, fluxes.shape[1]))\n\n return None", "def _validate_elem_length(max_num_levels, elems_flat, axis):\n assertions = []\n\n elem_length = ps.shape(elems_flat[0])[axis]\n\n # The default size limit will overflow a 32-bit int, so make sure we're\n # using 64-bit.\n size_limit = 2**(ps.cast(max_num_levels, np.int64) + 1)\n enough_levels = ps.less(ps.cast(elem_length, np.int64), size_limit)\n enough_levels_ = tf.get_static_value(enough_levels)\n if enough_levels_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n enough_levels, True,\n message='Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis=={}`.'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit)))\n elif not enough_levels_:\n raise ValueError(\n 'Input `Tensor`s must have dimension less than'\n ' `2**(max_num_levels + 1)` along `axis == {}`'\n ' (saw: {} which is not less than 2**{} == {})'.format(\n axis,\n elem_length,\n max_num_levels,\n size_limit))\n\n is_consistent = ps.reduce_all([ps.equal(ps.shape(elem)[axis], elem_length)\n for elem in elems_flat[1:]])\n\n is_consistent_ = tf.get_static_value(is_consistent)\n if is_consistent_ is None:\n assertions.append(\n tf.debugging.assert_equal(\n is_consistent, True,\n message='Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat])))\n elif not is_consistent_:\n raise ValueError(\n 'Inputs must have the same size along the given axis.'\n ' (saw: {})'.format([elem.shape for elem in elems_flat]))\n return elem_length, assertions", "def _shape_check(self, X, y):\n if not len(y.shape) > 1:\n raise RuntimeError(\"The shape of y is incorrect.\")\n if y.shape != X.shape[:-1]:\n raise RuntimeError(\"X and y must have the same number of \" +\n \"samples and microstructure shape.\")\n if X.shape[-1] != 3:\n raise RuntimeError(\"X must have 3 continuous local states \" +\n \"(euler angles)\")", "def validate_dimensions(self, dimensions):\n\n #safety checking\n if len(dimensions) != self.dimensionality:\n raise ValueError(f\"The number of dimensions provided {len(dimensions)}\"\n f\"do not match that of this coordinate system \"\n f\"{self.dimensionality}.\")\n\n if not all(isinstance(elem, int) for elem in dimensions):\n raise ValueError(f\"Not all dimensions are ints {dimensions}\")\n\n if not all(elem > 0 for elem in dimensions):\n raise ValueError(f\"Dimensions must be greater than 1 {dimensions}\")\n\n if not checkallequal(dimensions):\n raise ValueError(f\"Not all dimensions are equal {dimensions}. They \"\n f\"must be equal. This will be changed in a future version\")", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def validate_data(self, y, x=None, verbose=True):\n # Check dimensions\n if not self.ODE_order:\n if not y.ndim == 2:\n raise ValueError(\"y-array is not 2 dimensional, if ODE and you didn't provide y then x is one dim\")\n\n if verbose and y.shape[0] < y.shape[1]:\n print(\"Warning: y-array has more series (columns) than samples (rows). Check if this is correct\")\n\n # Checks for x\n if self.ODE_order and x is None:\n assert False\n if not x is None:\n\n # Check dimensions\n if not x.ndim == 2:\n raise ValueError(\"x-array is not 2 dimensional\")\n\n # Check shape equality\n if x.shape[0] != y.shape[0]:\n raise ValueError(\"y-array and x-array have different number of samples (rows)\")", "def validate_dataset(self):\n pass", "def _CheckLengthOrExpand(param_per_dataset, expected_len, param_name):\n if param_per_dataset is None:\n return None\n if isinstance(param_per_dataset, list):\n if len(param_per_dataset) != expected_len:\n raise ValueError(f'{param_name} doesn\\'t match the size of '\n f'eval_dataset_names: {len(param_per_dataset)} vs '\n f'{expected_len}.')\n else:\n param_per_dataset = [param_per_dataset] * expected_len\n return param_per_dataset", "def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)", "def _verify_integrity(self):\n if len(self.data.shape) != 1:\n raise ValueError(\n \"Data array must be one dimensional \"\n \"(is {})\".format(len(self.data.shape))\n )\n\n if len(self.shape.shape) != 2:\n raise ValueError(\n \"Shape array must be two dimensional \"\n \"(is {})\".format(len(self.shape.shape))\n )\n\n shape_size, data_size = self._cumsum[-1], self.data.size\n\n if not shape_size == data_size:\n raise ValueError(\n \"Size of data ({data_size}) does not match that \"\n \"of the given shapes ({shape_size}).\".format(\n data_size=data_size, shape_size=shape_size\n )\n )", "def _check_input_data(self):\n\n n0, n1, corr, pval = np.nan, np.nan, np.nan, np.nan\n\n error_code_test = 0\n error_text_test = 'No error occurred'\n try:\n error_code_test, error_msg = self._check_notnull()\n if error_code_test == 0:\n error_code_test, error_msg, n0, n1 = self._check_group_obs(self.test_min_data)\n if error_code_test == 0:\n error_code_test, error_msg, corr, pval = \\\n self._check_spearman_corr(self.min_corr, self.max_p)\n if error_code_test != 0:\n error_text_test = str(error_msg)\n except:\n error_code_test = 9\n error_text_test = 'Unknown Error'\n\n self.checkstats = {'n0': n0, 'n1': n1, 'frame_spearmanR': corr, 'frame_corrPval': pval}\n\n self.error_code_test = error_code_test\n self.error_text_test = error_text_test\n\n return self.error_code_test, self.error_text_test", "def _validate_metrics(self, metrics):\n if metrics is None:\n raise ValueError(\"Expected metrics to be a list. Was None.\")\n if any([self.namespace != m.metric.namespace for m in metrics]):\n raise ValueError(\n f\"Metrics ({metrics}) and metrics provider namespace \"\n f\"{self.namespace} do not match.\"\n )", "def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )", "def _validate_cols(cols):\n\n\tif cols is not None and len(cols) < 2:\n\t\traise ValueError('too few features')", "def _check_dimensions(self, a, b):\n units_a = self._get_units(a)\n units_b = self._get_units(b)\n dim_a = units_a.dimensions\n dim_b = units_b.dimensions\n if dim_a != dim_b:\n raise UnitConversionError(units_a, dim_a, units_b, dim_b)", "def __verify_arguments(self):\n if len(self.__pointer_data) == 0:\n raise ValueError(\n \"Input data is empty (size: '%d').\" % len(self.__pointer_data)\n )\n\n if self.__number_clusters <= 0:\n raise ValueError(\n \"Amount of cluster (current value: '%d') for allocation should be greater than 0.\"\n % self.__number_clusters\n )\n\n if self.__numlocal < 0:\n raise ValueError(\n \"Local minima (current value: '%d') should be greater or equal to 0.\"\n % self.__numlocal\n )\n\n if self.__maxneighbor < 0:\n raise ValueError(\n \"Maximum number of neighbors (current value: '%d') should be greater or \"\n \"equal to 0.\" % self.__maxneighbor\n )", "def test_inconsistent_sizes(multiple_linear_regression_data):\n # TODO: X = 5 x 2, y = 7 x 1\n # TODO: X = 5 x 4, y = 5 x 1: n=5, k=5: should work\n # TODO: X = 5 x 5, y = 5 x 1: n=5, k=5+1 (with intercept): should fail\n X, y = multiple_linear_regression_data\n pass", "def check_input_dimension(self, data):\n if len(data[0]) != self.input_dimension:\n raise ValueError(\"Received {} features, expected {}.\".format(self.input_dimension, len(data[0])))", "def check_variables(self, model):\n for rhs_var in model.rhs.keys():\n if rhs_var.name in model.variables.keys():\n var = model.variables[rhs_var.name]\n\n different_shapes = not np.array_equal(\n model.rhs[rhs_var].shape, var.shape\n )\n\n not_concatenation = not isinstance(var, pybamm.Concatenation)\n\n not_mult_by_one_vec = not (\n isinstance(\n var, (pybamm.Multiplication, pybamm.MatrixMultiplication)\n )\n and (\n pybamm.is_matrix_one(var.left)\n or pybamm.is_matrix_one(var.right)\n )\n )\n\n if different_shapes and not_concatenation and not_mult_by_one_vec:\n raise pybamm.ModelError(\n \"variable and its eqn must have the same shape after \"\n \"discretisation but variable.shape = \"\n \"{} and rhs.shape = {} for variable '{}'. \".format(\n var.shape, model.rhs[rhs_var].shape, var\n )\n )", "def _check_input(self, X):\n symbols = np.concatenate(X)\n if len(symbols) == 1: # not enough data\n raise ValueError(\"expected at least 1 observation \"\n \"but none found.\")\n elif (symbols < 0).any(): # contains negative integers\n raise ValueError(\"expected non-negative features \"\n \"for each observation.\")\n elif X.shape[1] > 1: # contains to many features\n raise ValueError(\"expected only 1 feature but got {0} \"\n \"for each observation.\".format(X.shape[1]))\n else:\n return True", "def _validate_add_dims(self, other):\n # For adding we only require that operators have the same total\n # dimensions rather than each subsystem dimension matching.\n if self.dim != other.dim:\n raise QiskitError(\n \"Cannot add operators with different shapes\"\n \" ({} != {}).\".format(self.dim, other.dim))", "def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)", "def __verify_arguments(self):\r\n if len(self.__pointer_data) == 0:\r\n raise ValueError(\"Input data is empty (size: '%d').\" % len(self.__pointer_data))\r\n\r\n if self.__number_clusters <= 0:\r\n raise ValueError(\"Amount of cluster (current value: '%d') for allocation should be greater than 0.\" %\r\n self.__number_clusters)\r\n\r\n if self.__numlocal < 0:\r\n raise ValueError(\"Local minima (current value: '%d') should be greater or equal to 0.\" % self.__numlocal)\r\n\r\n if self.__maxneighbor < 0:\r\n raise ValueError(\"Maximum number of neighbors (current value: '%d') should be greater or \"\r\n \"equal to 0.\" % self.__maxneighbor)", "def _data_params_validation(self) -> None:\n extra_regressor_names = set(self.params._reqd_regressor_names)\n # univariate case\n if self.data.is_univariate():\n if len(extra_regressor_names) != 0:\n msg = (\n f\"Missing data for extra regressors: {self.params._reqd_regressor_names}! \"\n \"Please include the missing regressors in `data`.\"\n )\n raise ValueError(msg)\n # multivariate case\n else:\n value_cols = set(self.data.value.columns)\n if \"y\" not in value_cols:\n msg = \"`data` should contain a column called `y` representing the responsive value.\"\n raise ValueError(msg)\n if not extra_regressor_names.issubset(value_cols):\n msg = f\"`data` should contain all columns listed in {extra_regressor_names}.\"\n raise ValueError(msg)\n # validate cap\n if (self.params.cap is True) and (\"cap\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `cap` representing the cap when `cap = True`.\"\n _error_msg(msg)\n # validate floor\n if (self.params.floor is True) and (\"floor\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `floor` representing the floor when `floor = True`.\"\n _error_msg(msg)", "def test_metrics_names_collision(tmpdir):\n metrics = [\n ClassifierLoss(name=\"test_loss\"),\n ClassifierLoss(name=\"test_loss\"),\n ]\n\n with pytest.raises(ValueError):\n # Since names collision is checked by the primitive Trainer we can test it easily\n # with just one FakeTraining and be assured that it works.\n FakeAdversarialTraining(tmpdir, metrics=metrics)", "def do_grid_check(self,):\n self.ydim, self.xdim = self.data_fcst.shape \n if self.data_obs.shape != (self.ydim,self.xdim):\n raise FormatError(\"Obs and forecast data not same size.\")\n return", "def validate_parameters(self):\n #################### metrics_params/metrics ####################\n if (self.metrics is not None) and (\"metrics\" in self.metrics_params.keys()):\n raise ValueError(\n \"`metrics` may be provided as a kwarg, or as a `metrics_params` key, but NOT BOTH. Received: \"\n + f\"\\n `metrics`={self.metrics}\\n `metrics_params`={self.metrics_params}\"\n )\n else:\n _metrics_alias = \"metrics\"\n if self.metrics is None:\n try:\n self.metrics = self.metrics_params[\"metrics\"]\n except KeyError:\n self.metrics = self.metrics_params[\"metrics_map\"]\n _metrics_alias = \"metrics_map\"\n self.metrics = format_metrics(self.metrics)\n self.metrics_params = {**{_metrics_alias: self.metrics}, **self.metrics_params}", "def _validate_raster_attributes(self, x):\r\n if self.extent != x.extent:\r\n raise ValueError(\"Extents do not match.\")\r\n if self.resolution != x.resolution:\r\n raise ValueError(\"Resolutions do not match.\")\r\n if not np.array_equal(self.x, x.x):\r\n raise ValueError(\"x attributes do not match.\")\r\n if not np.array_equal(self.y, x.y):\r\n raise ValueError(\"y attributes do not match.\")\r\n if len(self.layers) != len(x.layers):\r\n raise ValueError(\"layers lengths do not match.\")\r\n if self.crs != x.crs:\r\n raise ValueError(\"crs attributes do not match.\")", "def _check_inputs(self):\n\n # Check if attributes exists\n if self.attributes is None:\n print(\"attributes is missing; call set_attributes(new_attributes) to fix this! new_attributes should be a\",\n \"populated dataset of independent variables.\")\n return False\n\n # Check if labels exists\n if self.labels is None:\n print(\"labels is missing; call set_labels(new_labels) to fix this! new_labels should be a populated dataset\",\n \"of dependent variables.\")\n return False\n\n # Check if attributes and labels have same number of rows (samples)\n if self.attributes.shape[0] != self.labels.shape[0]:\n print(\"attributes and labels don't have the same number of rows. Make sure the number of samples in each\",\n \"dataset matches!\")\n return False\n\n # Type-checking for fit_intercept, normalize, and copy_X isn't needed; these can accept truthy/falsy values\n\n # Check if n_jobs is an integer or None\n if self.n_jobs is not None and not isinstance(self.n_jobs, int):\n print(\"n_jobs must be None or an integer; call set_n_jobs(new_n_jobs) to fix this!\")\n return False\n\n # Check if test_size is a float or None\n if self.test_size is not None and not isinstance(self.test_size, (int, float)):\n print(\"test_size must be None or a number; call set_test_size(new_test_size) to fix this!\")\n return False\n\n return True", "def test_sanity_check (self):\n X, Y = self.dm.get_data(std=True, lag_indicator=True)\n\n # Ensure number of rows between what we expect.\n row_bound = (800, 1000)\n actual_rows = X.shape[0]\n msg = 'Number of rows not within expected bounds.'\n self.assertTrue(row_bound[0] < actual_rows < row_bound[1], msg)\n\n msg = 'X and Y have different number of rows.'\n self.assertEqual(X.shape[0], Y.shape[0], msg)\n\n # Ensure X columns match.\n expected_x_cols = ['SP500', 'ltc_px_std', 'xrp_px_std', 'xlm_px_std',\n 'eth_px_std', 'btc_px_std', 'ltc_volume_std',\n 'xrp_volume_std', 'xlm_volume_std', 'eth_volume_std',\n 'btc_volume_std', 'lagged_others']\n actual_x_cols = X.columns.tolist()\n msg = 'Number of X columns different than expected.'\n self.assertEqual(len(actual_x_cols), len(expected_x_cols), msg)\n\n for col in expected_x_cols:\n msg = 'Expected column not found: {}'.format(col)\n self.assertTrue(col in actual_x_cols, msg)", "def _validate_input(self, x, y):\n\n x, y = check_X_y(x, y, accept_sparse=[\"csr\", \"csc\", \"coo\"],\n multi_output=True, y_numeric=True)\n return x, y.ravel()", "def check_all_user_inputs_valid(self):\n self.check_RNN_layers_valid()\n self.check_activations_valid()\n self.check_embedding_dimensions_valid()\n self.check_initialiser_valid()\n self.check_y_range_values_valid()\n self.check_return_final_seq_only_valid()", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def validate_data(self):\n if self.type == 'grid':\n for layout in self.data:\n grid = layout.get('grid')\n if not grid:\n raise ChartError(\n \"Layout grid setting must be set \"\n \"if layout type is 'grid'\")\n\n if not grid.get('location'):\n raise ChartError(\n \"Layout grid location must be set \"\n \"if layout type is 'grid'\")\n\n if len(grid['location']) != 2:\n raise ChartError(\"Layout grid location length must be 2\")", "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def test_unequal_pair_lengths(self):\n np.random.seed(987654321)\n input_1 = st.norm.rvs(size=100), st.norm.rvs(size=96)\n self.assertRaises(UnequalVectorLengthError, lambda: GroupLinearRegression(input_1[0], input_1[1]))", "def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) ->None:\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(f'Expected both predictions and target to be either 1- or 2-dimensional tensors, but got {target.ndim} and {preds.ndim}.')\n if num_outputs == 1 and preds.ndim != 1 or num_outputs > 1 and num_outputs != preds.shape[1]:\n raise ValueError(f'Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs} and {preds.shape[1]}.')", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def validate(self):\n self._validate_time_index()\n self._validate_num_profiles()\n self._validate_merge_col_exists()\n self._validate_unique_merge_col()\n self._validate_merge_col_overlaps()", "def _validate_ratio_input(self):\n if self._ratio_bounds is None:\n return\n\n self._validate_ratio_bounds()\n self._validate_ratio_type()\n self._validate_ratio_format()\n self._validate_ratio_cols_prefixed()\n self._validate_ratio_cols_exist()", "def _check_dims(cls, values):\n ndim = values['ndim']\n\n # Check the range tuple has same number of elements as ndim\n if len(values['range']) < ndim:\n values['range'] = ((0, 2, 1),) * (\n ndim - len(values['range'])\n ) + values['range']\n elif len(values['range']) > ndim:\n values['range'] = values['range'][-ndim:]\n\n # Check the current step tuple has same number of elements as ndim\n if len(values['current_step']) < ndim:\n values['current_step'] = (0,) * (\n ndim - len(values['current_step'])\n ) + values['current_step']\n elif len(values['current_step']) > ndim:\n values['current_step'] = values['current_step'][-ndim:]\n\n # Check the order tuple has same number of elements as ndim\n if len(values['order']) < ndim:\n values['order'] = tuple(\n range(ndim - len(values['order']))\n ) + tuple(o + ndim - len(values['order']) for o in values['order'])\n elif len(values['order']) > ndim:\n values['order'] = reorder_after_dim_reduction(\n values['order'][-ndim:]\n )\n\n # Check the order is a permutation of 0, ..., ndim - 1\n if not set(values['order']) == set(range(ndim)):\n raise ValueError(\n trans._(\n \"Invalid ordering {order} for {ndim} dimensions\",\n deferred=True,\n order=values['order'],\n ndim=ndim,\n )\n )\n\n # Check the axis labels tuple has same number of elements as ndim\n if len(values['axis_labels']) < ndim:\n # Append new \"default\" labels to existing ones\n if values['axis_labels'] == tuple(\n map(str, range(len(values['axis_labels'])))\n ):\n values['axis_labels'] = tuple(map(str, range(ndim)))\n else:\n values['axis_labels'] = (\n tuple(map(str, range(ndim - len(values['axis_labels']))))\n + values['axis_labels']\n )\n elif len(values['axis_labels']) > ndim:\n values['axis_labels'] = values['axis_labels'][-ndim:]\n\n return values", "def _validate_values(self, values):\n prev_len = -1\n i = j = -1\n if values is None or len(values) == 0:\n self.shape = 0, 0\n return\n for i, row in enumerate(values):\n if prev_len == -1:\n prev_len = len(row)\n if prev_len != len(row):\n raise ValueError(f\"Row {i} differs in length: {prev_len} != {len(row)}\")\n for j, val in enumerate(row):\n if type(val) not in (int, float, complex):\n raise ValueError(f\"[{i}, {j}]: {val} is of bad type ({type(val)})\")\n if val == 0:\n self.empty_loc = (i, j)\n if i == -1:\n self.shape = 0, 0\n else:\n self.shape = i + 1, j + 1", "def validate(cls, data, errors):", "def test_has_correct_length(self) -> None:\n assert len(list(self._dataset)) == 7168", "def check_matching_unit_dimension(\n ureg: UnitRegistry, base_units: str, units_to_check: List[str]\n) -> None:\n\n base_unit = getattr(ureg, base_units)\n\n for unit_string in units_to_check:\n unit = getattr(ureg, unit_string)\n if unit.dimensionality != base_unit.dimensionality:\n raise DimensionalityError(base_unit, unit)", "def check_sanity(self):\n # ensure numeric labels\n try:\n list(map(int, flatten(self.labels[:1])))\n except ValueError as ve:\n error(\"Non-numeric label encountered: {}\".format(ve))\n except TypeError as ve:\n warning(\"Non-collection labelitem encountered: {}\".format(ve))", "def validate(self, spss_limits=False, verbose=True):\n def validate_text_obj(text_obj):\n edits = ['x edits', 'y edits']\n if not isinstance(text_obj, dict):\n return False\n else:\n for tk, text in list(text_obj.items()):\n if ((tk in edits and not validate_text_obj(text_obj[tk]))\n or text in [None, '', ' ']):\n return False\n return True\n\n def validate_value_obj(value_obj):\n if not value_obj:\n return False\n else:\n for val in value_obj:\n if not 'value' in val or not validate_text_obj(val.get('text')):\n return False\n return True\n\n def validate_limits(text_obj, limit):\n if isinstance(text_obj, dict):\n for text in list(text_obj.values()):\n if isinstance(text, str):\n if len(text) > limit:\n return False\n elif not validate_limits(list(text.values()), limit):\n return False\n return True\n\n def collect_and_validate_tks(all_text_obj):\n edits = ['x edits', 'y edits']\n tks = []\n for obj in all_text_obj:\n if not isinstance(obj, dict): continue\n for tk in list(obj.keys()):\n if tk in ['x edits', 'y edits']: continue\n if not tk in tks: tks.append(tk)\n if not self.text_key in tks: return False\n for obj in all_text_obj:\n if not isinstance(obj, dict): continue\n if not all(tk in obj for tk in tks): return False\n return True\n\n msg = 'Please check the following variables, metadata is inconsistent.'\n err_columns = ['name', 'q_label', 'values', 'text keys', 'source', 'codes',\n 'spss limit name', 'spss limit q_label', 'spss limit values']\n if not spss_limits: err_columns = err_columns[:6]\n err_df = pd.DataFrame(columns=err_columns)\n\n skip = [v for v in self.masks() + self.columns() if v.startswith('qualityControl_')]\n skip += ['@1', 'id_L1.1', 'id_L1']\n\n for v in self.columns() + self.masks():\n if v in skip: continue\n collection = 'masks' if self.is_array(v) else 'columns'\n var = self._meta[collection][v]\n err_var = ['' for x in range(9)]\n # check name\n if not var.get('name') == v: err_var[0] = 'x'\n if len(var.get('name', '')) > 64: err_var[6] = 'x'\n # check q_label\n if not validate_text_obj(var.get('text')):\n err_var[1] = 'x'\n elif not validate_limits(var.get('text', {}), 256):\n err_var[7] = 'x'\n # check values\n if self._has_categorical_data(v):\n values = self._get_value_loc(v)\n if not validate_value_obj(values):\n err_var[2] = 'x'\n values = []\n elif not all(validate_limits(c.get('text', {}), 120) for c in values):\n err_var[8] = 'x'\n else:\n values = []\n # check sources\n if self._is_array_item(v):\n source = self._maskname_from_item(v)\n s = self._meta['masks'][source]\n s_tks = [s.get('text')]\n if not self.var_exists(source): err_var[4] = 'x'\n elif self.is_array(v):\n source = self.sources(v)\n s_tks = []\n if not all(self.var_exists(i) for i in source): err_var[4] = 'x'\n else:\n s_tks = []\n # check text_keys\n all_text_obj = [var.get('text', {})] + [val.get('text', {}) for val in values] + s_tks\n if not collect_and_validate_tks(all_text_obj): err_var[3] = 'x'\n # check codes\n if not self.is_array(v) and self._has_categorical_data(v):\n data_c = self.codes_in_data(v)\n meta_c = self.codes(v)\n if [c for c in data_c if not c in meta_c]: err_var[5] = 'x'\n if not spss_limits:\n err_var = err_var[:6]\n err_columns = err_columns[:6]\n if any(x=='x' for x in err_var):\n new_err = pd.DataFrame([err_var], index=[v], columns=err_columns)\n err_df = err_df.append(new_err)\n\n for c in [c for c in self._data.columns if not c in self._meta['columns']\n and not c in skip]:\n err_var = ['' for x in range(9)]\n err_var[5] = 'x'\n if not spss_limits:\n err_var = err_var[:6]\n err_columns = err_columns[:6]\n new_err = pd.DataFrame([err_var], index=[c], columns=err_columns)\n err_df = err_df.append(new_err)\n\n if not all(self.var_exists(v.split('@')[-1])\n for v in self._meta['sets']['data file']['items']) and verbose:\n print(\"'dataset._meta['sets']['data file']['items']' is not consistent!\")\n if not len(err_df) == 0:\n if verbose:\n print(msg)\n print(self.validate.__doc__)\n return err_df.sort_index()\n else:\n if verbose: print('No issues found in the dataset!')\n return None", "def _check_input_len(self, data):\n data_len = len(data[0])\n if self._input_len != data_len:\n msg = 'Received %d features, expected %d.' % (data_len,\n self._input_len)\n raise ValueError(msg)", "def checkParamsError(self):\n # check if parameter combinations match with the simulation filename.\n for i, f in enumerate(self.yadeDataFiles):\n # get the file name fore the suffix\n f = f.split('.' + f.split('.')[-1])[0]\n # get parameters from the remaining string\n paramsString = f.split('_')[-self.numParams:]\n # element wise comparison of the parameter vector\n if not (np.abs((np.float64(paramsString) - self.getSmcSamples()[-1][i])\n / self.getSmcSamples()[-1][i] < 1e-10).all()):\n raise RuntimeError(\n \"Parameters \" + \", \".join(\n [\"%s\" % v for v in self.getSmcSamples()[-1][i]]) + \" do not match with the data file name \" + f)", "def __validate_inputs(self):\n if self.train_df is None:\n raise ValueError(\"Dataframe cannot be null\")\n\n if (\n self.test_df is not None\n and self.train_df.shape[1] != self.test_df.shape[1]\n ):\n raise KeyError(\n \"Target variable in still present in one of the datasets or\"\n \" the number of columns in both test and train are not equal.\"\n )\n\n # target_label should not be in list of columns\n if self.target_label is None:\n warnings.warn(\n \"Parameter 'target_label' is empty. If not provided and is present in dataframe, it may get encoded. \"\n \"To mitigate, provide the target_label from dataframe or provide explicit list of columns for encoding \"\n \"via the 'cat_cols' parameter\",\n UserWarning,\n )\n if (\n self.target_label is not None\n and self.cat_cols is not None\n and (self.target_label in self.cat_cols)\n ):\n raise ValueError(\n f\"Target column: {self.target_label} will be encoded. Remove it from cat_cols if in there.\"\n )\n\n if self.ord_dict is not None:\n for key, mapping in self.ord_dict.items():\n if mapping is None or mapping == {}:\n raise ValueError(\n f\"Expected a weight mapping for ordinal column {key}.\"\n f\" Received {self.ord_dict[key]}\"\n )", "def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))", "def validVarConstructLength(self,varlen):\r\n if len(varlen)!=2:\r\n print 'variable must specify name and type'\r\n return False\r\n else:\r\n return True", "def check_inputs(x_unlabeled, x_labeled, y_labeled, y_true):\n if x_unlabeled is None:\n if x_labeled is None:\n raise Exception(\"No data, labeled or unlabeled, passed to check_inputs!\")\n x_unlabeled = x_labeled[0:0]\n if x_labeled is not None and y_labeled is not None:\n pass\n elif x_labeled is None and y_labeled is None:\n x_labeled = x_unlabeled[0:0]\n y_shape = y_true.get_shape()[1 : K.ndim(y_true)].as_list()\n y_labeled = np.empty([0] + y_shape)\n else:\n raise Exception(\"x_labeled and y_labeled must both be None or have a value\")\n return x_unlabeled, x_labeled, y_labeled", "def _check_shape(self, y_pred, y):\n if self._type == 'classification':\n if y_pred.ndim != y.ndim + 1:\n raise ValueError('Classification case, dims of y_pred equal dims of y add 1, '\n 'but got y_pred: {} dims and y: {} dims'.format(y_pred.ndim, y.ndim))\n if y.shape != (y_pred.shape[0],) + y_pred.shape[2:]:\n raise ValueError('Classification case, y_pred shape and y shape can not match. '\n 'got y_pred shape is {} and y shape is {}'.format(y_pred.shape, y.shape))\n else:\n if y_pred.ndim != y.ndim:\n raise ValueError('{} case, dims of y_pred need equal with dims of y, but got y_pred: {} '\n 'dims and y: {} dims.'.format(self._type, y_pred.ndim, y.ndim))\n if y_pred.shape != y.shape:\n raise ValueError('{} case, y_pred shape need equal with y shape, but got y_pred: {} and y: {}'.\n format(self._type, y_pred.shape, y.shape))", "def check_test_case_validity(test_case_dataset):\n for i, test_case in enumerate(test_case_dataset):\n assert \"NAME\" in test_case, f\"Test case #{i} Invalid NAME\"\n\n assert (\n \"N_STATES\" in test_case\n and isinstance(test_case[\"N_STATES\"], int)\n and 0 < test_case[\"N_STATES\"] <= 64\n ), f\"Test case #{i} Invalid N_STATES\"\n\n assert (\n \"N_SYMBOLS\" in test_case\n and isinstance(test_case[\"N_SYMBOLS\"], int)\n and 0 < test_case[\"N_SYMBOLS\"] <= 64\n ), f\"Test case #{i} Invalid N_SYMBOLS\"\n\n assert (\n \"PLAYER_INPUT_SIZES\" in test_case\n and isinstance(test_case[\"PLAYER_INPUT_SIZES\"], list)\n and len(test_case[\"PLAYER_INPUT_SIZES\"]) > 1\n and all(\n (isinstance(x, int) and x > 0) for x in test_case[\"PLAYER_INPUT_SIZES\"]\n )\n ), f\"Test case #{i} Invalid PLAYER_INPUT_SIZES\"\n\n assert \"REPETITIONS\" not in test_case or (\n isinstance(test_case[\"REPETITIONS\"], int) and 0 < test_case[\"REPETITIONS\"]\n ), f\"Test case #{i} Invalid REPETITIONS\"\n\n assert \"DEBUG\" not in test_case or isinstance(\n test_case[\"DEBUG\"], bool\n ), f\"Test case #{i} Invalid DEBUG\"\n\n assert \"VIRTUAL_MACHINE\" not in test_case or (\n isinstance(test_case[\"VIRTUAL_MACHINE\"], str)\n and test_case[\"VIRTUAL_MACHINE\"] in [\"./spdz2k-party.x\", \"./semi2k-party.x\"]\n ), f\"Test case #{i} Invalid VIRTUAL_MACHINE\"\n\n if \"PLAYER_DATA\" in test_case:\n assert isinstance(\n test_case[\"PLAYER_DATA\"], list\n ), f\"Test case #{i} Invalid PLAYER_DATA - Not a list\"\n for j, size in enumerate(test_case[\"PLAYER_INPUT_SIZES\"]):\n player_data = test_case[\"PLAYER_DATA\"][j]\n max_value = test_case[\"N_SYMBOLS\"]\n assert (\n isinstance(player_data, list)\n and len(player_data) == size\n and all(\n (isinstance(x, int) and 0 <= x <= max_value)\n for x in player_data\n )\n ), f\"Test case #{i} Invalid PLAYER_DATA - User {j} inputs are invalid\"", "def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()", "def check_validity(self):\n if len(self.constraints) < 2: # pragma: nocover\n raise ValueError(\n \"Invalid input value for type '{}': number of \"\n \"subexpression must be at least 2.\".format(type(self).__name__)\n )\n for constraint in self.constraints:\n constraint.check_validity()", "def check_shape_equal(pred, labels):\n if pred.shape != labels.shape:\n raise ValueError('Prediction and labels shapes must be equal:'\n f'{pred.shape} vs {labels.shape}.')", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def _check_xy(x: DataFrame, y: DataFrame) -> None:\n if x.shape[1] != y.shape[1]:\n raise ValueError(\n \"not compatible:\\n\"\n f\"- different number of columns: {x.shape[1]} vs {y.shape[1]}\"\n )\n\n in_y_not_x = setdiff(\n y.columns, x.columns, __calling_env=CallingEnvs.REGULAR\n )\n in_x_not_y = setdiff(\n x.columns, y.columns, __calling_env=CallingEnvs.REGULAR\n )\n if in_y_not_x or in_x_not_y:\n msg = [\"not compatible:\"]\n if in_y_not_x:\n msg.append(f\"- Cols in `y` but not `x`: {in_y_not_x}.\")\n if in_x_not_y:\n msg.append(f\"- Cols in `x` but not `y`: {in_x_not_y}.\")\n raise ValueError(\"\\n\".join(msg))", "def _check_inputvalues(self):\n # Check x, y and z are int or float dtypes\n # ie do not contain any unusable values like strings\n if not (self.x.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n if not (self.y.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (v.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")", "def test_basic1(self):\r\n self.validate((2, 2, 3, 3), (2, 2, 2, 2), 'valid', verify_grad=False)", "def _validate_param_shape(param_name, matrix_shape, actual_shape, time_steps=None):\n if time_steps is None:\n if actual_shape != matrix_shape:\n raise ValueError(\n f\"Shape of parameter `{param_name}` is: {actual_shape}, \"\n f\"but should be: {matrix_shape}.\"\n )\n else:\n matrices_shape = (time_steps, *matrix_shape)\n if not (actual_shape == matrix_shape or actual_shape == matrices_shape):\n raise ValueError(\n f\"Shape of parameter `{param_name}` is: {actual_shape}, but should be: \"\n f\"{matrix_shape} or {matrices_shape}.\"\n )", "def _check_same_shape(preds: Tensor, target: Tensor) ->None:\n if preds.shape != target.shape:\n raise RuntimeError(f'Predictions and targets are expected to have the same shape, but got {preds.shape} and {target.shape}.')", "def _verify_space(self) -> None:\n\n for dimension in self.space.values():\n\n if dimension.type != \"fidelity\" and dimension.prior_name not in [\n \"uniform\",\n \"reciprocal\",\n \"int_uniform\",\n \"int_reciprocal\",\n \"choices\",\n ]:\n raise ValueError(\n \"TPE now only supports uniform, loguniform, uniform discrete \"\n f\"and choices as prior: {dimension.prior_name}\"\n )\n\n shape = dimension.shape\n if shape and len(shape) != 1:\n raise ValueError(\"TPE now only supports 1D shape.\")", "def test_dim_empty_list(a, b, metrics):\n if metrics in correlation_metrics:\n metric, _metric = metrics\n with pytest.raises(ValueError) as excinfo:\n metric(a, b, dim=[])\n assert \"requires `dim` not being empty, found dim\" in str(excinfo.value)\n elif metrics in distance_metrics:\n metric, _metric = metrics\n res = metric(a, b, dim=[])\n assert len(res.dims) == len(a.dims), print(res.dims)", "def checkTrainData(cls, data):\n\n if data == None or len(data) == 0:\n raise Exception(\"No data\")\n\n if type(data[0]) != tuple:\n raise Exception(\"Not a list of tuples\")\n\n if len(data[0]) != 2 and type(data[0][0]) != str and type(data[0][1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n length = len(data[0][1])\n\n for tup in data:\n if len(tup) != 2 and type(tup[0]) != str and type(tup[1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n if len(tup[1]) != length:\n raise Exception(\"Not all elements have the same amount of data\")", "def validate(self) -> None:\n names: set[str] = set()\n for name in (\n *(i.name for i in self.typed_dicts),\n *(i.name for i in self.literals),\n *(i.name for i in self.waiters),\n *(i.name for i in self.paginators),\n *(self.service_resource.get_all_names() if self.service_resource else []),\n ):\n if is_reserved(name):\n raise ValueError(f\"{name} is a reserved keyword\")\n if name in names:\n for typed_dict in self.typed_dicts:\n if typed_dict.name == name:\n self.logger.warning(\n f\"{typed_dict}: {[c.render() for c in typed_dict.children]}\"\n )\n raise ValueError(f\"Duplicate name {name}\")\n names.add(name)", "def validate_dataset(self):\n if np.all(self.L_bpe == self.bpe_l):\n pass\n\n super(StandardDataset, self).validate_dataset()", "def test_validate_ndim():\n with pytest.raises(ValueError):\n validate_ndim(0)\n with pytest.raises(ValueError):\n validate_ndim(-1)\n with pytest.raises(ValueError):\n validate_ndim(0.5)\n\n assert validate_ndim(1) == 1\n assert validate_ndim(2) == 2" ]
[ "0.6732428", "0.65989214", "0.65425104", "0.64392585", "0.6399168", "0.63487905", "0.6343038", "0.62423515", "0.61932313", "0.6176423", "0.6091993", "0.6081119", "0.60686547", "0.6042439", "0.603004", "0.60111535", "0.601055", "0.60092235", "0.59968686", "0.5972382", "0.59626067", "0.59533864", "0.59476745", "0.5907038", "0.59007293", "0.5898824", "0.58938205", "0.5889132", "0.5886622", "0.58786273", "0.5862555", "0.58534926", "0.58461595", "0.5838252", "0.5836866", "0.5831239", "0.58268", "0.58211774", "0.58211696", "0.5814291", "0.5811069", "0.5802619", "0.5800561", "0.5793517", "0.57892406", "0.57846916", "0.5780046", "0.5776287", "0.5766281", "0.575742", "0.5740331", "0.57172436", "0.5717233", "0.57168233", "0.57044876", "0.56994987", "0.56822586", "0.5669567", "0.56485456", "0.5641295", "0.5632796", "0.562869", "0.56013006", "0.55882233", "0.5579164", "0.5571609", "0.5566282", "0.5554738", "0.55505836", "0.552904", "0.5526734", "0.5525382", "0.5500558", "0.5492629", "0.54867345", "0.54864854", "0.5482046", "0.54803884", "0.5478381", "0.5472484", "0.5471148", "0.54641414", "0.5463241", "0.5457224", "0.5454836", "0.54444474", "0.54444474", "0.5442384", "0.54340327", "0.5431589", "0.54258454", "0.5423371", "0.54193664", "0.5414799", "0.54113716", "0.5404436", "0.5401492", "0.53931135", "0.5391924", "0.5385499" ]
0.8259363
0
Extract acquisition and optimizer options from `model_gen_options`.
def construct_acquisition_and_optimizer_options( acqf_options: TConfig, model_gen_options: Optional[TConfig] = None ) -> Tuple[TConfig, TConfig]: acq_options = acqf_options.copy() opt_options = {} if model_gen_options: acq_options.update( checked_cast(dict, model_gen_options.get(Keys.ACQF_KWARGS, {})) ) # TODO: Add this if all acq. functions accept the `subset_model` # kwarg or opt for kwarg filtering. # acq_options[SUBSET_MODEL] = model_gen_options.get(SUBSET_MODEL) opt_options = checked_cast( dict, model_gen_options.get(Keys.OPTIMIZER_KWARGS, {}) ).copy() return acq_options, opt_options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_build_options(cls, opt: Opt):\n query_model = 'bert'\n document_model = 'bert'\n query_path = opt['model_file']\n document_path = opt['model_file']\n try:\n # determine if loading a RAG model\n loaded_opt = Opt.load(f\"{query_path}.opt\")\n document_path = loaded_opt.get('dpr_model_file', document_path)\n if loaded_opt['model'] in ['rag', 'fid'] and loaded_opt['query_model'] in [\n 'bert',\n 'bert_from_parlai_rag',\n ]:\n query_model = 'bert_from_parlai_rag'\n if loaded_opt['model'] == 'fid':\n # document model is always frozen\n # but may be loading a FiD-RAG Model\n doc_loaded_opt = Opt.load(\n f\"{modelzoo_path(opt['datapath'], document_path)}.opt\"\n )\n document_path = doc_loaded_opt.get('dpr_model_file', document_path)\n\n except FileNotFoundError:\n pass\n\n return query_model, query_path, document_model, document_path", "def optimizer_config(self):\r\n return {\r\n \"lr\": self.args.lr[0],\r\n \"momentum\": self.args.momentum,\r\n \"weight_decay\": self.args.weight_decay,\r\n }", "def get_model_kwargs(parsed_args):\n parsed_args.model_name = parsed_args.model_name.lower()\n if parsed_args.model_name not in SUPPORTED_MODELS:\n raise ValueError(\"Model name must be in the set: {}\".format(SUPPORTED_MODELS))\n res = {'learning_rate': parsed_args.learning_rate}\n restore_ckpt_dir = parsed_args.restore_efficient_net_weights_from\n res[\"restore_ckpt_dir\"] = restore_ckpt_dir\n if parsed_args.lsd:\n res[\"rsd\"] = parsed_args.lsd\n res[\"feature_extractor_name\"] = parsed_args.feature_extractor_name\n res[\"l2\"] = parsed_args.l2\n res[\"final_layer_dropout_rate\"] = parsed_args.final_layer_dropout_rate\n res[\"label_smoothing\"] = parsed_args.label_smoothing\n if \"dice\" not in parsed_args.loss_name:\n res[\"dice\"] = False\n if parsed_args.sgd:\n res['optimizer'] = tf.train.GradientDescentOptimizer\n else:\n res['optimizer'] = partial(tf.train.AdamOptimizer, beta1=0)\n res['loss_name'] = parsed_args.loss_name\n res[\"n_rows\"] = parsed_args.image_size\n res[\"n_cols\"] = parsed_args.image_size\n return res", "def parse_options(parser):\n TensorflowModel.parse_options(parser)\n parser.add_argument('--input-dim', type=int, default=160)\n parser.add_argument('--input-len', type=int, default=7501)\n parser.add_argument('--output-len', type=int, default=7501)\n parser.add_argument('--conv-layer-num', type=int, default=2)\n parser.add_argument('--conv-kernel-num', type=int, default=1)\n parser.add_argument('--conv-kernel-len', type=int, default=512)", "def iterate_optimizer_configs(options):\n for batch_size in options[consts.BATCH_SIZE]:\n for optimizer in options[consts.OPTIMIZER]:\n config = options.copy()\n config[consts.BATCH_SIZE] = batch_size\n config[consts.OPTIMIZER] = optimizer\n yield config", "def params(config):\n from transformer_tools.model import params as mparams\n mparams(config)\n\n group = OptionGroup(config,\"transformer_tools.Tagger\",\n \"Settings for tagger models\")\n\n group.add_option(\"--model_type\",\n dest=\"model_type\",\n default='bert-base-uncased',\n type=str,\n help=\"The type of tagger to use [default='bert-base-cased']\")\n\n group.add_option(\"--existing_model\",\n dest=\"existing_model\",\n default='',\n type=str,\n help=\"The path of an existing model to load [default='']\")\n\n group.add_option(\"--model_name\",\n dest=\"model_name\",\n default='bert',\n type=str,\n help=\"The name of the model [default='bert']\")\n\n group.add_option(\"--tagger_model\",\n dest=\"tagger_model\",\n default='arrow_tagger',\n type=str,\n help=\"The name of the model [default='arrow_tagger']\")\n\n group.add_option(\"--label_list\",\n dest=\"label_list\",\n default=\"B-up;B-down;B-=\",\n type=str,\n help=\"The types of labels to use [default='B-up;B-down;B-=']\")\n\n group.add_option(\"--save_model_every_epoch\",\n dest=\"save_model_every_epoch\",\n action='store_true',\n default=False,\n help=\"Backup up every model after epoch [default=False]\")\n\n group.add_option(\"--save_optimizer_and_scheduler\",\n dest=\"save_optimizer_and_scheduler\",\n action='store_true',\n default=False,\n help=\"Save the optimizer and schuler [default=False]\")\n\n group.add_option(\"--save_steps\",\n dest=\"save_steps\",\n default=-1,\n type=int,\n help=\"Save model at this frequency [default=-1]\")\n\n\n config.add_option_group(group)", "def parse(self):\n opt = self.gather_options()\n opt.isTrain = self.isTrain # train or test\n\n # process opt.suffix\n if opt.suffix:\n suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''\n opt.name = opt.name + suffix\n\n opt.f_map = [opt.crop_size, opt.crop_size * 2, opt.crop_size * 4, opt.crop_size * 8]\n self.print_options(opt)\n\n # set gpu ids\n str_ids = opt.gpu_ids.split(',')\n opt.gpu_ids = []\n for str_id in str_ids:\n id = int(str_id)\n if id >= 0:\n opt.gpu_ids.append(id)\n if len(opt.gpu_ids) > 0:\n torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt", "def get_optimizers(args):\r\n\t# Create a generator which can map a latent vector size 8 to 72\r\n\tG = Generator(\r\n\t\tinput_size=args.g_input_size,\r\n\t\thidden_size=args.g_hidden_size,\r\n\t\toutput_size=args.g_output_size,\r\n\t\tp=args.p\r\n\t)\r\n\t# Create a discriminator which can turn 72-dimensional particle to Binary\r\n\t# prediction\r\n\tD = Discriminator(\r\n\t\tinput_size=args.d_input_size,\r\n\t\thidden_size=args.d_hidden_size,\r\n\t\toutput_size=args.d_output_size,\r\n\t\tp=args.p,\r\n\t\tdropout=args.dropout\r\n\t)\r\n\r\n\t# Choose an optimizer\r\n\tif args.optim == 'Adam':\r\n\t\td_optimizer = optim.Adam(D.parameters(), lr=args.d_learning_rate)\r\n\t\tg_optimizer = optim.Adam(G.parameters(), lr=args.g_learning_rate)\r\n\telse:\r\n\t\td_optimizer = optim.SGD(D.parameters(), lr=args.d_learning_rate)\r\n\t\tg_optimizer = optim.SGD(G.parameters(), lr=args.g_learning_rate, momentum=args.sgd_momentum)\r\n\treturn G, D, d_optimizer, g_optimizer", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def _get_optimizer(self):\n raise NotImplementedError", "def get_simulation_options(self):\n return self.opts", "def get_optimization_parameters(self):\n pass", "def next_tune_cfg(self):\n # generate tuning space according to user chosen tuning strategy\n\n while True:\n op_cfgs = {}\n op_cfgs['calib_iteration'] = int(np.random.choice(self.calib_iter))\n op_cfgs['op'] = {}\n for op, configs in self.opwise_quant_cfgs.items():\n cfgs_len = len(configs)\n if cfgs_len > 0:\n op_cfgs['op'][op] = configs[np.random.choice(cfgs_len)]\n else:\n op_cfgs['op'][op] = self.opwise_tune_cfgs[op][np.random.choice(\n len(self.opwise_tune_cfgs[op]))]\n\n yield op_cfgs", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(\n dqpsk_demod.__init__, ('self',), options)", "def get_model_config(model_name, args):\n if model_name == 'Tacotron2':\n model_config = dict(\n # optimization\n mask_padding=args.mask_padding,\n # audio\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=args.n_symbols,\n symbols_embedding_dim=args.symbols_embedding_dim,\n # encoder\n encoder_kernel_size=args.encoder_kernel_size,\n encoder_n_convolutions=args.encoder_n_convolutions,\n encoder_embedding_dim=args.encoder_embedding_dim,\n # attention\n attention_rnn_dim=args.attention_rnn_dim,\n attention_dim=args.attention_dim,\n # attention location\n attention_location_n_filters=args.attention_location_n_filters,\n attention_location_kernel_size=args.attention_location_kernel_size,\n # decoder\n n_frames_per_step=args.n_frames_per_step,\n decoder_rnn_dim=args.decoder_rnn_dim,\n prenet_dim=args.prenet_dim,\n max_decoder_steps=args.max_decoder_steps,\n gate_threshold=args.gate_threshold,\n p_attention_dropout=args.p_attention_dropout,\n p_decoder_dropout=args.p_decoder_dropout,\n # postnet\n postnet_embedding_dim=args.postnet_embedding_dim,\n postnet_kernel_size=args.postnet_kernel_size,\n postnet_n_convolutions=args.postnet_n_convolutions,\n decoder_no_early_stopping=args.decoder_no_early_stopping\n )\n return model_config\n elif model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n else:\n raise NotImplementedError(model_name)", "def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}", "def get_optimizer(model, lr, transfer_optim):\n\n # different otpimizer lr for transfer to reuse low level features\n if transfer_optim:\n if isinstance(model, UNetRegressionModel):\n optimizer = torch.optim.Adam([{'params':\n list(model.msd.inc.parameters()) +\n list(model.msd.down1.parameters()) +\n list(model.msd.down2.parameters()), 'lr': 1e-6},\n {'params': \n list(model.msd.down3.parameters()) +\n list(model.msd.down4.parameters()) +\n list(model.msd.up1.parameters()), 'lr': 1e-5},\n {'params': \n list(model.msd.up2.parameters()) + list(model.msd.up3.parameters()) +\n list(model.msd.up4.parameters()) + list(model.msd.outc.parameters()), 'lr': 1e-4},\n ])\n\n else:\n params = list(model.msd.parameters())\n # case: MSD_d30\n if len(params) < 40:\n optimizer = torch.optim.Adam([{'params': params[1:10], 'lr':1e-6},\n {'params': params[:0]+ params[10:20], 'lr':1e-5},\n {'params': params[20:], 'lr':1e-4},\n ])\n # case: MSD_d80\n else:\n optimizer = torch.optim.Adam([{'params': params[1:20], 'lr':1e-6},\n {'params': params[:0]+ params[20:40], 'lr':1e-5},\n {'params': params[40:], 'lr':1e-4},\n ])\n else:\n optimizer = torch.optim.Adam(model.msd.parameters(), lr)\n\n return optimizer", "def write_optimization_options(self):\n\n # set common options\n g = self.f.require_group('optimizationOptions')\n g.attrs['optimizer'] = 0 # IpOpt\n g.attrs['retryOptimization'] = 1\n g.attrs['hierarchicalOptimization'] = 1\n g.attrs['numStarts'] = 1\n\n # set IpOpt options\n g = self.f.require_group('optimizationOptions/ipopt')\n g.attrs['max_iter'] = 100\n g.attrs['hessian_approximation'] = np.string_(\"limited-memory\")\n g.attrs[\"limited_memory_update_type\"] = np.string_(\"bfgs\")\n g.attrs[\"tol\"] = 1e-9\n g.attrs[\"acceptable_iter\"] = 1\n # set ridiculously high, so only the acceptable_* options below matter\n g.attrs[\"acceptable_tol\"] = 1e20\n g.attrs[\"acceptable_obj_change_tol\"] = 1e-12\n g.attrs[\"watchdog_shortened_iter_trigger\"] = 0\n\n # set fmincon options\n g = self.f.require_group('optimizationOptions/fmincon')\n g.attrs['MaxIter'] = 100\n g.attrs[\"TolX\"] = 1e-8\n g.attrs[\"TolFun\"] = 0\n g.attrs[\"MaxFunEvals\"] = 1e7\n g.attrs[\"algorithm\"] = np.string_(\"interior-point\")\n g.attrs[\"GradObj\"] = np.string_(\"on\")\n g.attrs[\"display\"] = np.string_(\"iter\")\n\n # set CERES options\n g = self.f.require_group('optimizationOptions/ceres')\n g.attrs['max_num_iterations'] = 100\n\n # set toms611/SUMSL options\n g = self.f.require_group('optimizationOptions/toms611')\n g.attrs['mxfcal'] = 1e8\n\n self.write_bounds()\n self.write_starting_points()", "def get_mo_options_from_cfg(\n deploy_cfg: mmengine.Config) -> ModelOptimizerOptions:\n backend_config = get_backend_config(deploy_cfg)\n mo_options = backend_config.get('mo_options', None)\n mo_options = ModelOptimizerOptions(mo_options)\n return mo_options", "def get_model_config(model_name, args):\n if model_name == 'WaveGlow':\n model_config = dict(\n n_mel_channels=args.n_mel_channels,\n n_flows=args.flows,\n n_group=args.groups,\n n_early_every=args.early_every,\n n_early_size=args.early_size,\n WN_config=dict(\n n_layers=args.wn_layers,\n kernel_size=args.wn_kernel_size,\n n_channels=args.wn_channels\n )\n )\n return model_config\n elif model_name == 'FastPitch':\n model_config = dict(\n # io\n n_mel_channels=args.n_mel_channels,\n # symbols\n n_symbols=len(get_symbols(args.symbol_set)),\n padding_idx=get_pad_idx(args.symbol_set),\n symbols_embedding_dim=args.symbols_embedding_dim,\n # input FFT\n in_fft_n_layers=args.in_fft_n_layers,\n in_fft_n_heads=args.in_fft_n_heads,\n in_fft_d_head=args.in_fft_d_head,\n in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,\n in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,\n in_fft_output_size=args.in_fft_output_size,\n p_in_fft_dropout=args.p_in_fft_dropout,\n p_in_fft_dropatt=args.p_in_fft_dropatt,\n p_in_fft_dropemb=args.p_in_fft_dropemb,\n # output FFT\n out_fft_n_layers=args.out_fft_n_layers,\n out_fft_n_heads=args.out_fft_n_heads,\n out_fft_d_head=args.out_fft_d_head,\n out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,\n out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,\n out_fft_output_size=args.out_fft_output_size,\n p_out_fft_dropout=args.p_out_fft_dropout,\n p_out_fft_dropatt=args.p_out_fft_dropatt,\n p_out_fft_dropemb=args.p_out_fft_dropemb,\n # duration predictor\n dur_predictor_kernel_size=args.dur_predictor_kernel_size,\n dur_predictor_filter_size=args.dur_predictor_filter_size,\n p_dur_predictor_dropout=args.p_dur_predictor_dropout,\n dur_predictor_n_layers=args.dur_predictor_n_layers,\n # pitch predictor\n pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,\n pitch_predictor_filter_size=args.pitch_predictor_filter_size,\n p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,\n pitch_predictor_n_layers=args.pitch_predictor_n_layers,\n # pitch conditioning\n pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,\n # speakers parameters\n n_speakers=args.n_speakers,\n speaker_emb_weight=args.speaker_emb_weight,\n # energy predictor\n energy_predictor_kernel_size=args.energy_predictor_kernel_size,\n energy_predictor_filter_size=args.energy_predictor_filter_size,\n p_energy_predictor_dropout=args.p_energy_predictor_dropout,\n energy_predictor_n_layers=args.energy_predictor_n_layers,\n # energy conditioning\n energy_conditioning=args.energy_conditioning,\n energy_embedding_kernel_size=args.energy_embedding_kernel_size,\n )\n return model_config\n\n else:\n raise NotImplementedError(model_name)", "def _options(self):\n return", "def default_optimization_hparams() -> Dict[str, Any]:\n return {\n \"optimizer\": {\n \"type\": \"Adam\",\n \"kwargs\": {\n \"lr\": 0.001\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n # TODO(zhiting): allow module-level control of gradient_multipliers\n \"name\": None\n }", "def configure_optimizers(self):\n optimizer = _get_optimizer(model_parameters=self.parameters(\n ), project_parameters=self.project_parameters)\n if self.project_parameters.step_size > 0:\n lr_scheduler = _get_lr_scheduler(\n project_parameters=self.project_parameters, optimizer=optimizer)\n return [optimizer], [lr_scheduler]\n else:\n return optimizer", "def get_model_args(args):\r\n global MODEL_ARCHITECTURE, MODEL_OPTIMIZER, ADVANCED_OPTIONS, \\\r\n DATA_OPTIONS, BERT_CONFIG\r\n\r\n required_args = MODEL_ARCHITECTURE | MODEL_OPTIMIZER | ADVANCED_OPTIONS \\\r\n | DATA_OPTIONS | BERT_CONFIG\r\n\r\n arg_values = {k: v for k, v in vars(args).items() if k in required_args}\r\n return argparse.Namespace(**arg_values)", "def gyp_generator_flags():\n return dict(arg.split('=', 1)\n for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,\n ('self',), options)\n extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)", "def parse_opts():\n MODELS = core.list_models()\n flags = [arg for arg in sys.argv[1:]\n if arg.startswith('-')]\n values = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' in arg]\n args = [arg for arg in sys.argv[1:]\n if not arg.startswith('-') and '=' not in arg]\n models = \"\\n \".join(\"%-15s\"%v for v in MODELS)\n if len(args) == 0:\n print(USAGE)\n print(\"\\nAvailable models:\")\n print(columnize(MODELS, indent=\" \"))\n sys.exit(1)\n if len(args) > 3:\n print(\"expected parameters: model N1 N2\")\n\n name = args[0]\n try:\n model_info = core.load_model_info(name)\n except ImportError as exc:\n print(str(exc))\n print(\"Could not find model; use one of:\\n \" + models)\n sys.exit(1)\n\n invalid = [o[1:] for o in flags\n if o[1:] not in NAME_OPTIONS\n and not any(o.startswith('-%s='%t) for t in VALUE_OPTIONS)]\n if invalid:\n print(\"Invalid options: %s\"%(\", \".join(invalid)))\n sys.exit(1)\n\n\n # pylint: disable=bad-whitespace\n # Interpret the flags\n opts = {\n 'plot' : True,\n 'view' : 'log',\n 'is2d' : False,\n 'qmax' : 0.05,\n 'nq' : 128,\n 'res' : 0.0,\n 'accuracy' : 'Low',\n 'cutoff' : 0.0,\n 'seed' : -1, # default to preset\n 'mono' : False,\n 'show_pars' : False,\n 'show_hist' : False,\n 'rel_err' : True,\n 'explore' : False,\n 'use_demo' : True,\n 'zero' : False,\n }\n engines = []\n for arg in flags:\n if arg == '-noplot': opts['plot'] = False\n elif arg == '-plot': opts['plot'] = True\n elif arg == '-linear': opts['view'] = 'linear'\n elif arg == '-log': opts['view'] = 'log'\n elif arg == '-q4': opts['view'] = 'q4'\n elif arg == '-1d': opts['is2d'] = False\n elif arg == '-2d': opts['is2d'] = True\n elif arg == '-exq': opts['qmax'] = 10.0\n elif arg == '-highq': opts['qmax'] = 1.0\n elif arg == '-midq': opts['qmax'] = 0.2\n elif arg == '-lowq': opts['qmax'] = 0.05\n elif arg == '-zero': opts['zero'] = True\n elif arg.startswith('-nq='): opts['nq'] = int(arg[4:])\n elif arg.startswith('-res='): opts['res'] = float(arg[5:])\n elif arg.startswith('-accuracy='): opts['accuracy'] = arg[10:]\n elif arg.startswith('-cutoff='): opts['cutoff'] = float(arg[8:])\n elif arg.startswith('-random='): opts['seed'] = int(arg[8:])\n elif arg == '-random': opts['seed'] = np.random.randint(1e6)\n elif arg == '-preset': opts['seed'] = -1\n elif arg == '-mono': opts['mono'] = True\n elif arg == '-poly': opts['mono'] = False\n elif arg == '-pars': opts['show_pars'] = True\n elif arg == '-nopars': opts['show_pars'] = False\n elif arg == '-hist': opts['show_hist'] = True\n elif arg == '-nohist': opts['show_hist'] = False\n elif arg == '-rel': opts['rel_err'] = True\n elif arg == '-abs': opts['rel_err'] = False\n elif arg == '-half': engines.append(arg[1:])\n elif arg == '-fast': engines.append(arg[1:])\n elif arg == '-single': engines.append(arg[1:])\n elif arg == '-double': engines.append(arg[1:])\n elif arg == '-single!': engines.append(arg[1:])\n elif arg == '-double!': engines.append(arg[1:])\n elif arg == '-quad!': engines.append(arg[1:])\n elif arg == '-sasview': engines.append(arg[1:])\n elif arg == '-edit': opts['explore'] = True\n elif arg == '-demo': opts['use_demo'] = True\n elif arg == '-default': opts['use_demo'] = False\n # pylint: enable=bad-whitespace\n\n if len(engines) == 0:\n engines.extend(['single', 'sasview'])\n elif len(engines) == 1:\n if engines[0][0] != 'sasview':\n engines.append('sasview')\n else:\n engines.append('single')\n elif len(engines) > 2:\n del engines[2:]\n\n n1 = int(args[1]) if len(args) > 1 else 1\n n2 = int(args[2]) if len(args) > 2 else 1\n use_sasview = any(engine=='sasview' and count>0\n for engine, count in zip(engines, [n1, n2]))\n\n # Get demo parameters from model definition, or use default parameters\n # if model does not define demo parameters\n pars = get_pars(model_info, opts['use_demo'])\n\n\n # Fill in parameters given on the command line\n presets = {}\n for arg in values:\n k, v = arg.split('=', 1)\n if k not in pars:\n # extract base name without polydispersity info\n s = set(p.split('_pd')[0] for p in pars)\n print(\"%r invalid; parameters are: %s\"%(k, \", \".join(sorted(s))))\n sys.exit(1)\n presets[k] = float(v) if not k.endswith('type') else v\n\n # randomize parameters\n #pars.update(set_pars) # set value before random to control range\n if opts['seed'] > -1:\n pars = randomize_pars(pars, seed=opts['seed'])\n print(\"Randomize using -random=%i\"%opts['seed'])\n if opts['mono']:\n pars = suppress_pd(pars)\n pars.update(presets) # set value after random to control value\n #import pprint; pprint.pprint(model_info)\n constrain_pars(model_info, pars)\n if use_sasview:\n constrain_new_to_old(model_info, pars)\n if opts['show_pars']:\n print(str(parlist(model_info, pars, opts['is2d'])))\n\n # Create the computational engines\n data, _ = make_data(opts)\n if n1:\n base = make_engine(model_info, data, engines[0], opts['cutoff'])\n else:\n base = None\n if n2:\n comp = make_engine(model_info, data, engines[1], opts['cutoff'])\n else:\n comp = None\n\n # pylint: disable=bad-whitespace\n # Remember it all\n opts.update({\n 'name' : name,\n 'def' : model_info,\n 'n1' : n1,\n 'n2' : n2,\n 'presets' : presets,\n 'pars' : pars,\n 'data' : data,\n 'engines' : [base, comp],\n })\n # pylint: enable=bad-whitespace\n\n return opts", "def modify_model_commandline_options(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n # module\n parser.add_argument('--discriminator_module_name', type=str, required=True, choices=discriminator_modules.keys())\n parser.add_argument('--generator_module_name', type=str, required=True, choices=generator_modules.keys())\n opt, _ = parser.parse_known_args()\n discriminator_module_modify_commandline_options = discriminator_module_options[opt.discriminator_module_name]\n generator_module_modify_commandline_options = generator_module_options[opt.generator_module_name]\n parser = discriminator_module_modify_commandline_options(parser)\n parser = generator_module_modify_commandline_options(parser)\n\n # optimizer\n parser.add_argument('--discriminator_optimizer_name', type=str, required=True, choices=optimizers.keys())\n parser.add_argument('--generator_optimizer_name', type=str, required=True, choices=optimizers.keys())\n opt, _ = parser.parse_known_args()\n discriminator_optimizer_modify_commandline_options = optimizer_options[opt.discriminator_optimizer_name]\n generator_optimizer_modify_commandline_options = optimizer_options[opt.generator_optimizer_name]\n parser = discriminator_optimizer_modify_commandline_options(parser)\n parser = generator_optimizer_modify_commandline_options(parser)\n\n # scheduler\n parser.add_argument('--discriminator_scheduler_name', type=str, required=True, choices=schedulers.keys())\n parser.add_argument('--generator_scheduler_name', type=str, required=True, choices=schedulers.keys())\n opt, _ = parser.parse_known_args()\n discriminator_scheduler_modify_commandline_options = scheduler_options[opt.discriminator_scheduler_name]\n generator_scheduler_modify_commandline_options = scheduler_options[opt.generator_scheduler_name]\n parser = discriminator_scheduler_modify_commandline_options(parser)\n parser = generator_scheduler_modify_commandline_options(parser)\n\n # init weight\n parser.add_argument('--init_weight_name', type=str, required=True, choices=init_weights.keys())\n opt, _ = parser.parse_known_args()\n init_weight_modify_commandline_options = init_weight_options[opt.init_weight_name]\n parser = init_weight_modify_commandline_options(parser)\n\n return parser", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,\n ('self',), options)", "def get_adv_optimizer(self, mode: str) -> torch.optim.Optimizer:\n pass", "def make_optimizer(self):\r\n # parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()]\r\n if self.flags.optim == 'Adam':\r\n op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'RMSprop':\r\n op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n elif self.flags.optim == 'SGD':\r\n op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\r\n else:\r\n raise Exception(\"Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben\")\r\n return op", "async def get_options(self):", "def MakeOpts():\n opt_parser = OptionParser()\n opt_parser.add_option(\"-s\", \"--thermodynamics_source\",\n dest=\"thermodynamics_source\",\n type=\"choice\",\n choices=['observed_only',\n 'hatzi_only',\n 'milo_only',\n 'milo_merged'],\n default=\"milo_merged\",\n help=\"The thermodynamic data to use\")\n opt_parser.add_option(\"-k\", \"--kegg_database_location\", \n dest=\"kegg_db_filename\",\n default=\"../data/public_data.sqlite\",\n help=\"The KEGG database location\")\n opt_parser.add_option(\"-d\", \"--database_location\", \n dest=\"db_filename\",\n default=\"../res/gibbs.sqlite\",\n help=\"The Thermodynamic database location\")\n opt_parser.add_option(\"-t\", \"--thermodynamics_filename\",\n dest=\"thermodynamics_filename\",\n default='../data/thermodynamics/dG0.csv',\n help=\"The name of the thermodynamics file to load.\")\n opt_parser.add_option(\"-i\", \"--input_filename\",\n dest=\"input_filename\",\n default=\"../data/thermodynamics/pathways.txt\",\n help=\"The file to read for pathways to analyze.\")\n opt_parser.add_option(\"-o\", \"--output_filename\",\n dest=\"output_filename\",\n default='../res/thermo_comparison/report.html',\n help=\"Where to write output to.\")\n return opt_parser", "def get_options(self):\n option_list = []\n if self.can_analyze():\n option_list.append((EpOp.TASK_ANALYZE, None))\n\n option_tup = self.predict_option()\n if option_tup:\n option_list.append(option_tup)\n\n option_tup = self.check_option()\n if option_tup:\n option_list.append(option_tup)\n\n return option_list", "def extra_options():\n extra_vars = {\n 'auto_detect_cpu_features': [True, \"Auto-detect available CPU features, and configure accordingly\", CUSTOM],\n 'with_shared': [True, \"Enable building of shared ELPA libraries\", CUSTOM],\n 'with_single': [True, \"Enable building of single precision ELPA functions\", CUSTOM],\n 'with_generic_kernel': [True, \"Enable building of ELPA generic kernels\", CUSTOM],\n }\n\n for flag in ELPA_CPU_FEATURE_FLAGS:\n if flag == 'sse4_2':\n conf_opt = ['sse', 'sse-assembly']\n elif flag == 'avx512f':\n conf_opt = ['avx512']\n else:\n conf_opt = [flag]\n\n for opt in conf_opt:\n help_msg = \"Configure with --enable-%s (if None, auto-detect support for %s)\" % (opt, flag.upper())\n extra_vars['use_%s' % flag] = [None, help_msg, CUSTOM]\n\n return ConfigureMake.extra_options(extra_vars)", "def model_arg_parse(cls, parser):\r\n # for mslite config\r\n parser.add_argument('--thread_affinity_mode',\r\n type=int,\r\n default=2,\r\n help='thread affinity number for mslite inference')\r\n\r\n parser.add_argument('--thread_num',\r\n type=int,\r\n default=1,\r\n help='thread number for mslite inference')\r\n\r\n parser.add_argument('--mslite_model_type',\r\n type=int,\r\n default=0,\r\n choices=[0, 4],\r\n help='input model type for mslite inference, '\r\n '0 for MINDIR, 4 for MINDIR_LITE')\r\n\r\n parser.add_argument('--ascend_provider',\r\n type=str,\r\n default='',\r\n choices=['', 'ge'],\r\n help=\"Ascend infer method: '' for acl, 'ge' for GE\")\r\n\r\n # for tensorrt infer\r\n parser.add_argument('--tensorrt_optim_input_shape',\r\n type=str,\r\n default=None,\r\n help='optim input shape for tensorrt'\r\n 'with key tensor name (str) '\r\n 'and value shape info(List[int])')\r\n\r\n parser.add_argument('--tensorrt_min_input_shape',\r\n type=str,\r\n default=None,\r\n help='optim input shape for tensorrt'\r\n 'with key tensor name (str) '\r\n 'and value shape info(List[int])')\r\n\r\n parser.add_argument('--tensorrt_max_input_shape',\r\n type=str,\r\n default=None,\r\n help='optim input shape for tensorrt'\r\n 'with key tensor name (str) '\r\n 'and value shape info(List[int])')\r\n\r\n parser.add_argument('--gpu_memory_size',\r\n type=int,\r\n default=100,\r\n help='gpu init memory size(M)')\r\n\r\n parser.add_argument('--is_enable_tensorrt',\r\n type=bool,\r\n default=False,\r\n help=\"flag indicate whether use tensorrt engine\")\r\n\r\n parser.add_argument('--is_fp16',\r\n type=bool,\r\n default=False,\r\n help=\"flag indicate whether apply fp16 infer\")\r\n\r\n parser.add_argument('--is_int8',\r\n type=bool,\r\n default=False,\r\n help=\"flag indicate whether apply int8 infer\")", "def training_opts(self):\n return self._training_opts", "def options(self, parser, env):\n pass", "def select_optimizer(opt, model):\n\n if opt == 'msgd':\n return optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n elif opt == 'adam':\n return optim.Adam(model.parameters(), lr=0.001)\n elif opt == 'rmsprop':\n return optim.RMSprop(model.parameters(), lr=0.001)\n elif opt == 'adagrad':\n return optim.Adagrad(model.parameters(), lr=0.001)\n else:\n return optim.SGD(model.parameters(), lr=0.001)", "def parse_options(self, extra):\n options = super().parse_options(extra)\n self.target_image = options.pop(\"target\")\n\n return options", "def _get_options(self):\n return self.options", "def model_args(self) -> Optional[Dict]:\n return self.config.get('model_args')", "def get_optimizer_config(idx):\n names = [x.strip() for x in _get_opt_name_content().split(\"\\n\") if x.strip()]\n name_to_use = names[idx]\n config, _ = _get_config_map()[name_to_use]\n logging.info(\"Using config:: %s\", str(config))\n return config", "def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n import sys\n from matdb import base\n pdescr = \"MATDB Context Finder\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in _script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args", "def get_optimizer(self, cfg_pipeline):\n return", "def modify_options(parser, is_train):\n return parser", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'curv_contour_length_14',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'left_right',\n 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'resize',\n # 'per_image_standardization',\n 'zero_one'\n ]]\n exp['val_augmentations'] = exp['data_augmentations']\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 16\n exp['exp_name'] = 'hgru_bn_pathfinder_14'\n exp['model_name'] = 'hgru'\n # exp['clip_gradients'] = 7.\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 50\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def _get_makeopts(self):\n if \"MAKEOPTS\" in os.environ:\n self.makeopts = os.environ[\"MAKEOPTS\"].split()", "def options_parse():\n parser = argparse.ArgumentParser()\n\n # Options for model parameters setup (only change if model training was changed)\n parser.add_argument('--num_filters', type=int, default=64,\n help='Filter dimensions for DenseNet (all layers same). Default=64')\n parser.add_argument('--num_classes_ax_cor', type=int, default=79,\n help='Number of classes to predict in axial and coronal net, including background. Default=79')\n parser.add_argument('--num_classes_sag', type=int, default=51,\n help='Number of classes to predict in sagittal net, including background. Default=51')\n parser.add_argument('--num_channels', type=int, default=7,\n help='Number of input channels. Default=7 (thick slices)')\n parser.add_argument('--kernel_height', type=int, default=5, help='Height of Kernel (Default 5)')\n parser.add_argument('--kernel_width', type=int, default=5, help='Width of Kernel (Default 5)')\n parser.add_argument('--stride', type=int, default=1, help=\"Stride during convolution (Default 1)\")\n parser.add_argument('--stride_pool', type=int, default=2, help=\"Stride during pooling (Default 2)\")\n parser.add_argument('--pool', type=int, default=2, help='Size of pooling filter (Default 2)')\n\n sel_option = parser.parse_args()\n\n return sel_option", "def iterate_model_architecture_configs(options):\n for model_architecture in options[consts.MODEL_ARCHITECTURE]:\n config = options.copy()\n config[consts.MODEL_ARCHITECTURE] = model_architecture\n yield config", "def _get_transformer_quantization_config(subset_size: int) -> Dict[str, Any]:\n return {\n \"algorithm\": \"quantization\",\n \"preset\": \"mixed\",\n \"initializer\": {\n \"range\": {\"num_init_samples\": subset_size, \"type\": DEFAULT_RANGE_TYPE},\n \"batchnorm_adaptation\": {\"num_bn_adaptation_samples\": 0},\n },\n \"scope_overrides\": {\"activations\": {\"{re}.*matmul_0\": {\"mode\": \"symmetric\"}}},\n \"ignored_scopes\": [\n \"{re}.*Embeddings.*\",\n \"{re}.*__add___[0-1]\",\n \"{re}.*layer_norm_0\",\n \"{re}.*matmul_1\",\n \"{re}.*__truediv__*\",\n ],\n \"overflow_fix\": \"first_layer_only\",\n }", "def add_parse_options(cls, parser):\n # Decoder params\n parser.add_argument(\"-beam_size\", default=1, type=int, help=\"Beam size\")\n parser.add_argument(\"-lm_weight\", default=0.0, type=float, help=\"LM weight in decoding\")\n parser.add_argument(\"-lm_path\", default=\"/share/data/speech/shtoshni/research/asr_multi/\"\n \"code/lm/models/best_models/run_id_301/lm.ckpt-250000\", type=str,\n help=\"LM ckpt path\")\n parser.add_argument(\"-cov_penalty\", default=0.0, type=float,\n help=\"Coverage penalty\")", "def get_extra_options(self):\n # Options change depending on the pdf generator..\n try:\n transform_module = getattr(transforms, self.pdf_generator)\n except AttributeError:\n return []\n\n options = []\n tool_options = self.pdf_tool.make_options()\n adapter_options, adapter_overrides = self._get_adapter_options()\n\n opts_order = [self.request, tool_options]\n if adapter_overrides:\n opts_order.insert(0, adapter_options)\n else:\n opts_order.append(adapter_options)\n\n # First we check the options for which no value is\n # needed.\n # For each one, it is possible to define a --no-xxx\n # option.\n for opt_name in transform_module.simple_options:\n for opts in opts_order:\n if opts.get('--no-%s' % opt_name):\n break\n\n if opts.get(opt_name, None):\n options.append('--%s' % opt_name)\n break\n # Then we check values that expect a value.\n for opt_name in transform_module.valued_options:\n for opts in opts_order:\n opt_val = opts.get(opt_name, None)\n\n if opt_val is None:\n continue\n\n # Value is put before the option name as we\n # insert them after in another list using l.insert(2, opt)\n if isinstance(opt_val, list):\n for x in reversed(opt_val):\n options.append(str(x))\n else:\n options.append(str(opt_val))\n\n options.append('--%s' % opt_name)\n break\n\n return options", "def options(opt):\n #gropt = opt.get_option_group('configure options')\n #gropt.add_option('-e', '--engine', action='store', default='dojo', help='engine to configure the build for [default: \\'dojo\\']', dest='engine')\n #TODO : add option for the compiler", "def options(self, parser):\n pass", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'Feat', 'VGG', 'SSIM', 'PSNR']\n self.visual_names = ['fake_B', 'real_B']\n if self.isTrain:\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n self.netG = generator.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids,\n not opt.no_transp_conv,\n opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,\n opt.n_blocks_local)\n\n if self.isTrain:\n self.netD = discriminator.define_D(opt.input_nc + opt.output_nc, opt.ndf, 'pix2pixHD_multiscale',\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,\n not (opt.gan_mode == 'lsgan'), opt.num_D)\n\n self.criterionGAN = loss.GANLoss(opt.gan_mode, multiscale_D=opt.netD == 'pix2pixHD_multiscale').to(\n self.device)\n self.criterionVGG = loss.VGGLoss().to(self.device)\n self.criterionFeat = loss.FeatureMatchingLoss(opt.n_layers_D, opt.num_D)\n\n self.criterionSSIM = loss.SkimageLoss(partial(ssim, multichannel=True))\n self.criterionPSNR = loss.SkimageLoss(psnr)\n\n if opt.netG.startswith('pix2pixHD') and (opt.n_epochs_fix_global > 0):\n params_dict = dict(self.netG.named_parameters())\n netG_params = []\n for key, value in params_dict.items():\n if key.startswith('model' + str(opt.n_local_enhancers)):\n netG_params += [value]\n else:\n netG_params = self.netG.parameters()\n\n self.optimizer_G = torch.optim.Adam(netG_params, lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n if opt.load_pretrain:\n pretrained_path = '' if not self.isTrain else opt.load_pretrain\n self.load_network(self.netG, 'G', opt.epoch, pretrained_path)\n if self.isTrain:\n self.load_network(self.netD, 'D', opt.epoch, pretrained_path)\n\n self.real_A = None\n self.real_B = None\n self.fake_A = None\n self.fake_B = None\n self.loss_D_real = None\n self.loss_D_fake = None\n self.loss_D = None\n self.loss_G_GAN = None\n self.loss_Feat = None\n self.loss_VGG = None\n self.loss_G = None\n self.loss_SSIM = None\n self.loss_PSNR = None", "def objective_options(self):\n return Optimizer.list_method_options(self.obj_creator.method_dict)", "def parser_dealer(parser: ArgumentParser, option: str):\r\n\r\n if option == \"sampling\":\r\n parser.add_argument(\r\n \"sampleset\",\r\n metavar=\"DS_NAME\",\r\n type=str,\r\n help=\"Name of sample dataset in active learning selecting algorithms\",\r\n )\r\n parser.add_argument(\r\n \"--load-state\",\r\n action=\"store_true\",\r\n default=False,\r\n help=\"Turn on if load state.\",\r\n )\r\n parser.add_argument(\r\n \"--state-suffix\",\r\n metavar=\"SE\",\r\n type=str,\r\n help=\"load selected samples from sample set\",\r\n required=False,\r\n default=\"\",\r\n )\r\n parser.add_argument(\r\n \"--partial\",\r\n metavar=\"N\",\r\n type=int,\r\n help=\"load partial set of sample set\",\r\n default=-1,\r\n )\r\n if option == \"blackbox\":\r\n parser.add_argument(\r\n \"blackbox_dir\",\r\n metavar=\"VIC_DIR\",\r\n type=str,\r\n help='Path to victim model. Should contain files \"model_best.pth.tar\" and \"params.json\"',\r\n )\r\n parser.add_argument(\r\n \"--argmax\",\r\n action=\"store_true\",\r\n help=\"Only consider argmax labels\",\r\n default=False,\r\n )\r\n parser.add_argument(\r\n \"--pseudoblackbox\",\r\n action=\"store_true\",\r\n help=\"Load prequeried labels as blackbox\",\r\n default=False,\r\n )\r\n parser.add_argument(\r\n \"--bydataset\",\r\n action=\"store_true\",\r\n help=\"Load prequeried labels as blackbox\",\r\n default=False,\r\n )\r\n parser.add_argument(\r\n \"--topk\", metavar=\"TK\", type=int, help=\"iteration times\", default=0\r\n )\r\n if option == \"train\":\r\n parser.add_argument(\r\n \"model_dir\",\r\n metavar=\"MODEL_DIR\",\r\n type=str,\r\n help=\"Destination directory of model to be trained\",\r\n )\r\n parser.add_argument(\r\n \"model_arch\", metavar=\"MODEL_ARCH\", type=str, help=\"Model name\"\r\n )\r\n parser.add_argument(\r\n \"input_size\",\r\n metavar=\"MODEL_SIZE\",\r\n type=int,\r\n help=\"The size of input image.\",\r\n choices=(32, 224),\r\n )\r\n parser.add_argument(\r\n \"dataset\",\r\n metavar=\"DS_NAME\",\r\n type=str,\r\n help=\"Name of test dataset. In the case of victim model training, \"\r\n \"this parameter refer to both training set and test set\",\r\n )\r\n # Optional arguments\r\n parser.add_argument(\r\n \"-e\",\r\n \"--epochs\",\r\n type=int,\r\n default=100,\r\n metavar=\"N\",\r\n help=\"number of epochs to train (default: 100)\",\r\n )\r\n # This is only useful when the model support this complexity settings\r\n parser.add_argument(\r\n \"-x\",\r\n \"--complexity\",\r\n type=int,\r\n default=-1,\r\n metavar=\"N\",\r\n help=\"Model conv channel size.\",\r\n )\r\n parser.add_argument(\r\n \"--lr\",\r\n type=float,\r\n default=0.01,\r\n metavar=\"LR\",\r\n help=\"learning rate (default: 0.01)\",\r\n )\r\n parser.add_argument(\r\n \"--momentum\",\r\n type=float,\r\n default=0.5,\r\n metavar=\"M\",\r\n help=\"SGD momentum (default: 0.5)\",\r\n )\r\n parser.add_argument(\r\n \"--log-interval\",\r\n type=int,\r\n default=50,\r\n metavar=\"N\",\r\n help=\"how many batches to wait before logging training status\",\r\n )\r\n parser.add_argument(\r\n \"--resume\",\r\n default=None,\r\n type=str,\r\n metavar=\"PATH\",\r\n help=\"path to latest checkpoint (default: none)\",\r\n )\r\n parser.add_argument(\r\n \"--lr-step\", type=int, default=60, metavar=\"N\", help=\"Step sizes for LR\"\r\n )\r\n parser.add_argument(\r\n \"--lr-gamma\", type=float, default=0.1, metavar=\"N\", help=\"LR Decay Rate\"\r\n )\r\n parser.add_argument(\r\n \"--pretrained\", type=str, help=\"Use pretrained network\", default=None\r\n )\r\n parser.add_argument(\r\n \"--weighted-loss\",\r\n action=\"store_true\",\r\n help=\"Use a weighted loss\",\r\n default=False,\r\n )\r\n parser.add_argument(\r\n \"--optimizer-choice\",\r\n type=str,\r\n help=\"Optimizer\",\r\n default=\"sgdm\",\r\n choices=(\"sgd\", \"sgdm\", \"adam\", \"adagrad\"),\r\n )\r\n parser.add_argument(\r\n \"--train-criterion\",\r\n type=str,\r\n help=\"Loss Function of training process\",\r\n default=\"SCE\",\r\n choices=[\"MSE\", \"CE\", \"L1\", \"NLL\", \"BCE\", \"SmoothL1\", \"SCE\"],\r\n )\r\n parser.add_argument(\r\n \"--test-criterion\",\r\n type=str,\r\n help=\"Loss Function of test process\",\r\n default=\"CE\",\r\n choices=[\"MSE\", \"CE\", \"L1\", \"NLL\", \"BCE\", \"SmoothL1\"],\r\n )\r\n parser.add_argument(\r\n \"--reduction\",\r\n type=str,\r\n help=\"Loss Function reduction type\",\r\n default=\"mean\",\r\n choices=[\"mean\", \"sum\"],\r\n )\r\n parser.add_argument(\r\n \"--freeze\", type=bool, help=\"Freeze the feature layers\", default=False\r\n )\r\n if option == \"common\":\r\n parser.add_argument(\r\n \"-b\",\r\n \"--batch-size\",\r\n type=int,\r\n default=64,\r\n metavar=\"N\",\r\n help=\"input batch size for training (default: 64)\",\r\n )\r\n parser.add_argument(\r\n \"-d\",\r\n \"--device-id\",\r\n metavar=\"D\",\r\n type=int,\r\n help=\"Device id. -1 for CPU.\",\r\n default=0,\r\n )\r\n parser.add_argument(\r\n \"-w\",\r\n \"--num-workers\",\r\n metavar=\"N\",\r\n type=int,\r\n help=\"# Worker threads to load data\",\r\n default=10,\r\n )", "def retrieve_options(env):\n\n options = []\n if env.core != -1:\n options.extend([\"--core {}\".format(env.core)])\n if env.mtor != 4:\n options.extend([\"--mtor {}\".format(env.mtor)])\n if env.n != 1000:\n options.extend([\"--n {}\".format(env.n)])\n if env.forcefield != \"OPLS2005\":\n options.extend([\"--force {}\".format(env.forcefield)])\n if env.mae_lig:\n options.extend([\"--mae_charges\"])\n if env.gridres != 10:\n options.extend([\"--gridres {}\".format(env.gridres)])\n return \" \".join(options)", "def configure_optimizers(self):\n model = self.model\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.hparams.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n if self.hparams.lamb:\n optimizer = FusedLAMB(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n\n elif self.hparams.adafactor:\n optimizer = Adafactor(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False\n )\n else:\n optimizer = FusedAdam(\n optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)\n self.opt = optimizer\n\n scheduler = self.get_lr_scheduler()\n\n return [optimizer], [scheduler]", "def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:\n norm_module_types = (\n torch.nn.BatchNorm1d,\n torch.nn.BatchNorm2d,\n torch.nn.BatchNorm3d,\n torch.nn.SyncBatchNorm,\n # NaiveSyncBatchNorm inherits from BatchNorm2d\n torch.nn.GroupNorm,\n torch.nn.InstanceNorm1d,\n torch.nn.InstanceNorm2d,\n torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm,\n torch.nn.LocalResponseNorm,\n )\n params: List[Dict[str, Any]] = []\n memo: Set[torch.nn.parameter.Parameter] = set()\n for module in model.modules():\n for key, value in module.named_parameters(recurse=False):\n if not value.requires_grad:\n continue\n # Avoid duplicating parameters\n if value in memo:\n continue\n memo.add(value)\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if isinstance(module, norm_module_types):\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM\n elif key == \"bias\":\n # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0\n # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer\n # hyperparameters are by default exactly the same as for regular\n # weights.\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)\n return optimizer", "def fetch_optimizer(args, model):\n optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)\n\n scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps + 100,\n pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')\n\n return optimizer, scheduler", "def add_model_specific_args(parent_parser):\n # MODEL specific\n parser = ArgumentParser(parents=[parent_parser])\n parser.add_argument(\"--learning_rate\", default=0.01, type=float)\n parser.add_argument(\"--batch_size\", default=1, type=int)\n parser.add_argument(\"--depth\", default=6, type=int)\n # parser.add_argument(\"--loss_type\", default=\"psnr\", type=str)\n # parser.add_argument(\"--loss_type\", default=\"mse\", type=str)\n # parser.add_argument(\"--loss_type\", default=\"weighted_psnr\", type=str)\n parser.add_argument(\"--loss_type\", default=\"weighted_mse\", type=str)\n parser.add_argument(\"--ckpt_path\", default=\"dip_model\", type=str)\n parser.add_argument(\"--use_gated_conv\", action=\"store_true\")\n\n # training specific (for this model)\n parser.add_argument(\"--max_nb_epochs\", default=5000, type=int)\n\n return parser", "def get_optional_params():\n return {\n 'regularizer': None, # any valid TensorFlow regularizer\n 'regularizer_params': dict,\n 'initializer': None, # any valid TensorFlow initializer\n 'initializer_params': dict,\n 'dtype': [tf.float32, tf.float16, 'mixed'],\n }", "def optimizer_creator(model, config):\n return torch.optim.SGD(model.parameters(), lr=config.get(\"lr\", 1e-4))", "def get_default_options():\n out = _SFrame({'name': ['method', 'feature_model', 'verbose'],\n 'default_value' : ['lsh', 'auto', 'True'],\n 'lower_bound': [None, None, 0],\n 'upper_bound': [None, None, 1],\n 'description': ['Method for searching reference data',\n 'Trained model for extracting features from raw data objects',\n 'Whether progress output is printed'],\n 'parameter_type': ['string', 'model', 'boolean']})\n\n return out", "def add_model_specific_args(parser): # pragma: no-cover\n # Model specification\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"path to pretrained model or model identifier from huggingface.co/models\",\n )\n parser.add_argument(\n \"--config_name\",\n default=\"\",\n type=str,\n help=\"pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"pretrained tokenizer name or path if not the same as model_name\",\n )\n\n # Cache settings\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"where to store the pre-trained models downloaded from s3\",\n )\n\n # Optimizer settings\n parser.add_argument(\n \"--max_grad_norm\",\n default=1.0,\n type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"number of update steps to accumulate before performing a backward/update pass\",\n )\n\n parser.add_argument(\n \"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\"\n )\n parser.add_argument(\n \"--weight_decay\",\n default=0.0,\n type=float,\n help=\"Weight decay if we apply some.\"\n )\n parser.add_argument(\n \"--adam_epsilon\",\n default=1e-8,\n type=float,\n help=\"Epsilon for Adam optimizer.\"\n )\n parser.add_argument(\n \"--epochs\",\n default=3,\n type=int,\n help=\"Total number of training epochs to perform.\"\n )\n\n parser.add_argument(\n \"--train_batch_size\",\n default=8,\n type=int\n )\n parser.add_argument(\n \"--eval_batch_size\",\n default=8,\n type=int\n )\n\n return parser", "def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options", "def load_arguments(parser):\n\n\t# paths\n\tparser.add_argument('--train_path_src', type=str, required=True, help='train src dir')\n\tparser.add_argument('--train_path_tgt', type=str, required=True, help='train tgt dir')\n\tparser.add_argument('--path_vocab_src', type=str, required=True, help='vocab src dir')\n\tparser.add_argument('--path_vocab_tgt', type=str, required=True, help='vocab tgt dir')\n\tparser.add_argument('--dev_path_src', type=str, default=None, help='dev src dir')\n\tparser.add_argument('--dev_path_tgt', type=str, default=None, help='dev tgt dir')\n\tparser.add_argument('--save', type=str, required=True, help='model save dir')\n\tparser.add_argument('--load', type=str, default=None, help='model load dir')\n\tparser.add_argument('--load_embedding_src', type=str, default=None, help='pretrained src embedding')\n\tparser.add_argument('--load_embedding_tgt', type=str, default=None, help='pretrained tgt embedding')\n\tparser.add_argument('--train_attscore_path', type=str, default=None, help='train set reference attention scores')\n\tparser.add_argument('--dev_attscore_path', type=str, default=None, help='dev set reference attention scores')\n\n\t# model\n\tparser.add_argument('--embedding_size_enc', type=int, default=200, help='encoder embedding size')\n\tparser.add_argument('--embedding_size_dec', type=int, default=200, help='decoder embedding size')\n\tparser.add_argument('--hidden_size_enc', type=int, default=200, help='encoder hidden size')\n\tparser.add_argument('--num_bilstm_enc', type=int, default=2, help='number of encoder bilstm layers')\n\tparser.add_argument('--num_unilstm_enc', type=int, default=0, help='number of encoder unilstm layers')\n\tparser.add_argument('--hidden_size_dec', type=int, default=200, help='encoder hidden size')\n\tparser.add_argument('--num_unilstm_dec', type=int, default=2, help='number of encoder bilstm layers')\n\tparser.add_argument('--hard_att', type=str, default='False', help='use hard attention or not')\n\tparser.add_argument('--att_mode', type=str, default='bahdanau', \\\n\t\t\t\t\t\t\thelp='attention mechanism mode - bahdanau / hybrid / dot_prod')\t\n\tparser.add_argument('--hidden_size_att', type=int, default=1, \\\n\t\t\t\t\t\t\thelp='hidden size for bahdanau / hybrid attention')\n\tparser.add_argument('--hidden_size_shared', type=int, default=200, \\\n\t\t\t\t\t\t\thelp='transformed att output hidden size (set as hidden_size_enc)')\n\tparser.add_argument('--additional_key_size', type=int, default=0, \\\n\t\t\t\t\t\t\thelp='additional attention key size: keys = [values, add_feats]')\n\n\t# train \n\tparser.add_argument('--random_seed', type=int, default=666, help='random seed')\t\n\tparser.add_argument('--max_seq_len', type=int, default=32, help='maximum sequence length')\n\tparser.add_argument('--batch_size', type=int, default=64, help='batch size')\t\n\tparser.add_argument('--embedding_dropout', type=float, default=0.0, help='embedding dropout')\n\tparser.add_argument('--dropout', type=float, default=0.0, help='dropout')\n\tparser.add_argument('--num_epochs', type=int, default=10, help='number of training epoches')\n\tparser.add_argument('--learning_rate', type=float, default=0.001, help='learning rate')\n\tparser.add_argument('--residual', type=str, default='False', help='residual connection')\n\tparser.add_argument('--max_grad_norm', type=float, default=1.0, help='optimiser gradient norm clipping: max grad norm')\t\n\tparser.add_argument('--batch_first', type=str, default='True', help='batch as the first dimension')\n\tparser.add_argument('--use_gpu', type=str, default='False', help='whether or not using GPU')\n\tparser.add_argument('--eval_with_mask', type=str, default='True', help='calc loss excluding padded words')\n\tparser.add_argument('--scheduled_sampling', type=str, default='False', \\\n\t\t\t\t\t \t\thelp='gradually turn off teacher forcing \\\n\t\t\t\t\t \t\t(if True, use teacher_forcing_ratio as the starting point)')\n\n\t# teacher forcing / attention forcing / dual\n\tparser.add_argument('--train_mode', type=str, default='dual', help='train mode; multi | dual | afdynamic')\n\tparser.add_argument('--load_tf', type=str, default=None, help='used with train_mode=af; tf model load dir')\n\tparser.add_argument('--teacher_forcing_ratio', type=float, default=1.0, help='ratio of teacher forcing')\n\tparser.add_argument('--attention_forcing', type=str, default='False', help='whether or not using attention forcing')\n\tparser.add_argument('--attention_loss_coeff', type=float, default=1.0, \\\n\t\t\t\t\t\t\thelp='attention loss coeff, ignored if attention_forcing=False')\n\t\n\t# save and print\n\tparser.add_argument('--checkpoint_every', type=int, default=10, help='save ckpt every n steps')\t\n\tparser.add_argument('--print_every', type=int, default=10, help='print every n steps')\t\n\n\treturn parser", "def options(self):\n return self.__options", "def modify_commandline_options(parser, is_train=True):\n #parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset\n if is_train:\n parser.add_argument('--g_loss_mode', nargs='*', default=['nsgan','lsgan','vanilla'], help='lsgan | nsgan | vanilla | wgan | hinge | rsgan')\n parser.add_argument('--d_loss_mode', type=str, default='lsgan', help='lsgan | nsgan | vanilla | wgan | hinge | rsgan') \n parser.add_argument('--which_D', type=str, default='S', help='Standard(S) | Relativistic_average (Ra)') \n\n parser.add_argument('--lambda_f', type=float, default=0.1, help='the hyperparameter that balance Fq and Fd')\n parser.add_argument('--candi_num', type=int, default=2, help='# of survived candidatures in each evolutinary iteration.')\n parser.add_argument('--eval_size', type=int, default=64, help='batch size during each evaluation.')\n return parser", "def _override_opt(self, new_opt):\n model_args = {\n 'arch',\n 'encoder-embed-dim',\n 'encoder-layers',\n 'decoder-embed-dim',\n 'decoder-layers',\n 'decoder-out-embed-dim',\n 'decoder-attention',\n }\n\n for k, v in new_opt.items():\n if k not in model_args:\n # skip non-model args\n continue\n if k not in self.opt:\n print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))\n elif self.opt[k] != v:\n print('Overriding option [ {k}: {old} => {v}]'.format(\n k=k, old=self.opt[k], v=v))\n self.opt[k] = v\n return self.opt", "def modify_commandline_options(parser, is_train=True):\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_S', type=float, default=1.0, help='weight for Shading loss')\n parser.add_argument('--lambda_BA', type=float, default=1.0, help='weight for Brightest area loss')\n # parser.add_argument('--lambda_BP', type=float, default=1.0, help='weight for Brightest pixel loss')\n parser.add_argument('--lambda_BC', type=float, default=1.0, help='weight for Brightest coordinate loss')\n parser.add_argument('--lambda_regLTM', type=float, default=1.0, help='weight for LTM regularization.')\n parser.add_argument('--latent_Ls', action='store_true', help='Input Ls as latent.')\n parser.add_argument('--latent_Lt', action='store_true', help='Input Lt as latent.')\n parser.add_argument('--in_Ls', action='store_true', help='Input Ls as Input.')\n parser.add_argument('--in_Lt', action='store_true', help='Input Lt as Input.')\n parser.add_argument('--LTM', action='store_true', help='Use LTM.')\n parser.add_argument('--cas', action='store_true', help='Cascade network.')\n parser.add_argument('--no_brightness', action='store_true', help='No to calc brightness')\n parser.add_argument('--no_latent_color', action='store_true', help='Not to extract latent color. (Not to use with LTM)')\n parser.add_argument('--cat_In', action='store_true', help='Concat Input')\n parser.add_argument('--reg_LTM', action='store_true', help='Regularizaiton LTM.')\n parser.add_argument('--enc_LTM', action='store_true', help='Encoding LTM.')\n parser.add_argument('--enc_ill_hid', type=int, default=-1, help='The hidden layer dimention of illumination encoder. if -1 no to use hidden layer.')\n parser.add_argument('--dim_LTM', type=int, default=5, help='Encoding LTM number.')\n \n return parser", "def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n from pydft import base\n pdescr = \"Numerical DFT code.\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args # pragma: no cover", "def _get_model(self):\n\n parameters = {keys._topology:self.topology,\n keys._size:self.size,\n keys._name:self.name,\n #keys._output_activation:self._outActiv_fun_key,\n #keys._hidden_activation:self._hiddenActiv_fun_key,\n keys._learning_rate:self.learningRate,\n keys._momentum:self.momentum}\n\n return parameters", "def Generator_Option_Parser(argv, extra_opt, ignore_infile = True):\n opts = [(\"states\" , int, None , True , True),\n (\"symbols\" , int, None , True , True),\n (\"tape\" , int, 10000, False, True),\n (\"steps\" , int, 10000, False, True),\n (\"infile\" , str, None , False, True),\n (\"outfile\" , str, None , False, True),\n (\"log_number\", int, None , False, True)] + extra_opt\n ignore_opts = []\n if ignore_infile:\n ignore_opts.append(\"infile\")\n opts, args = Option_Parser(argv, opts, help_flag = True, no_mult = True,\n ignore_opts = ignore_opts)\n\n # The furthest that the machine can travel in n steps is n+1 away from the\n # origin. It could travel in either direction so the tape need not be longer\n # than 2 * max_steps + 3\n if opts[\"tape\"] > 2 * opts[\"steps\"] + 3:\n opts[\"tape\"] = 2 * opts[\"steps\"] + 3\n\n # Default output filename is based off of parameters.\n if not opts[\"outfile\"]:\n opts[\"outfile\"] = \"%dx%d.out\" % (opts[\"states\"], opts[\"symbols\"])\n opts[\"outfilename\"] = opts[\"outfile\"]\n opts[\"outfile\"] = open_outfile(opts[\"outfilename\"])\n if not opts[\"outfile\"]:\n sys.exit(1)\n\n opts[\"infilename\"] = opts[\"infile\"]\n if not ignore_infile:\n opts[\"infile\"] = open_infile(opts[\"infilename\"])\n\n return opts, args", "def parse_args():\n parser = argparse.ArgumentParser('GACM')\n parser.add_argument('--pretrain', action='store_true',\n help='pretrain the model')\n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--test', action='store_true',\n help='test the model')\n parser.add_argument('--rank', action='store_true',\n help='rank on train set')\n parser.add_argument('--rank_cheat', action='store_true',\n help='rank on train set in a cheating way')\n parser.add_argument('--generate_click_seq', action='store_true',\n help='generate click sequence based on model itself')\n parser.add_argument('--generate_click_seq_cheat', action='store_true',\n help='generate click sequence based on ground truth data')\n parser.add_argument('--generate_synthetic_dataset', action='store_true',\n help='generate synthetic dataset for reverse ppl')\n parser.add_argument('--use_gpu', action='store_true',\n help='use gpu instead of cpu')\n parser.add_argument('--gpu_num', type=int, default=1,\n help='gpu_num')\n parser.add_argument('--data_parallel', action='store_true',\n help='data_parallel')\n parser.add_argument('--dataset_version', type=int, default=1,\n help='version number of the dataset that is used')\n parser.add_argument('--agent_version', type=int, default=1,\n help='version number of the agent that is used')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim', default='adam',\n help='optimizer type')\n train_settings.add_argument('--g_lr', type=float, default=0.001,\n help='learning rate of generator')\n train_settings.add_argument('--d_lr', type=float, default=0.01,\n help='learning rate of discriminator')\n train_settings.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n train_settings.add_argument('--momentum', type=float, default=0.99,\n help='momentum')\n train_settings.add_argument('--dropout_rate', type=float, default=0.5,\n help='dropout rate')\n train_settings.add_argument('--alpha', type=float, default=0.5,\n help='policy_surr')\n train_settings.add_argument('--beta', type=float, default=0.5,\n help='policy entropy')\n train_settings.add_argument('--gamma', type=float, default=0.99,\n help='discount factor')\n train_settings.add_argument('--tau', type=float, default=0.95,\n help='gae')\n train_settings.add_argument('--clip_epsilon', type=float, default=0.2,\n help='ppo')\n train_settings.add_argument('--batch_size', type=int, default=20,\n help='train batch size')\n train_settings.add_argument('--num_steps', type=int, default=200000,\n help='number of training steps')\n train_settings.add_argument('--num_train_files', type=int, default=1,\n help='number of training files')\n train_settings.add_argument('--num_dev_files', type=int, default=1,\n help='number of dev files')\n train_settings.add_argument('--num_test_files', type=int, default=1,\n help='number of test files')\n train_settings.add_argument('--num_label_files', type=int, default=1,\n help='number of label files')\n train_settings.add_argument('--minimum_occurrence', type=int, default=1,\n help='minimum_occurrence for NDCG')\n train_settings.add_argument('--g_step', type=int, default=4,\n help='generator is updated g_step times during one epoch')\n train_settings.add_argument('--d_step', type=int, default=1,\n help='synthetic trajectory is generated d_step times during one epoch')\n train_settings.add_argument('--k', type=int, default=1,\n help='discriminator is updated k times during one epoch')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--algo', default='GACM',\n help='choose the algorithm to use')\n model_settings.add_argument('--embed_size', type=int, default=100,\n help='size of the embeddings')\n model_settings.add_argument('--gru_hidden_size', type=int, default=64,\n help='size of LSTM hidden units')\n model_settings.add_argument('--critic_hidden_size', type=int, nargs='+', default=[64, 32],\n help='size of critic hidden units')\n model_settings.add_argument('--max_d_num', type=int, default=10,\n help='max number of docs in a session')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--train_dirs', nargs='+',\n default=['./data/train_per_query.txt'],\n help='list of dirs that contain the preprocessed train data')\n path_settings.add_argument('--dev_dirs', nargs='+',\n default=['./data/dev_per_query.txt'],\n help='list of dirs that contain the preprocessed dev data')\n path_settings.add_argument('--test_dirs', nargs='+',\n default=['./data/test_per_query.txt'],\n help='list of dirs that contain the preprocessed test data')\n path_settings.add_argument('--label_dirs', nargs='+',\n default=['data/human_label_for_GACM.txt'],\n help='list of dirs that contain the preprocessed label data')\n path_settings.add_argument('--human_label_dir', default='./data/human_label.txt',\n help='the dir to Human Label txt file')\n path_settings.add_argument('--load_dir', default='./outputs/models/',\n help='the dir to load models')\n path_settings.add_argument('--save_dir', default='./outputs/models/',\n help='the dir to save models')\n path_settings.add_argument('--result_dir', default='./outputs/results/',\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir', default='./outputs/summary/',\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_dir', default='./outputs/log/',\n help='path of the log file. If not set, logs are printed to console')\n\n path_settings.add_argument('--eval_freq', type=int, default=10,\n help='the frequency of evaluating on the dev set when training')\n path_settings.add_argument('--check_point', type=int, default=1000,\n help='the frequency of saving model')\n path_settings.add_argument('--patience', type=int, default=3,\n help='lr half when more than the patience times of evaluation\\' loss don\\'t decrease')\n path_settings.add_argument('--lr_decay', type=float, default=0.5,\n help='lr decay')\n path_settings.add_argument('--load_model', type=int, default=-1,\n help='load model at global step')\n path_settings.add_argument('--load_pretrain_model', type=int, default=-1,\n help='load the pretrained model at global step')\n\n return parser.parse_args()", "def __init__(self, opt: argparse.Namespace) -> None:\n super().__init__(opt)\n\n self.gpu_ids = opt.gpu_ids\n self.is_train = opt.is_train\n self.output_nch = opt.output_nch\n self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')\n\n # generator module\n self._generator_module = generator_modules[opt.generator_module_name](opt)\n apply_init_weight(self._generator_module, opt, init_weight=init_weights[opt.init_weight_name])\n if self.is_train:\n # discriminator module\n self._discriminator_module = discriminator_modules[opt.discriminator_module_name](opt)\n apply_init_weight(self._discriminator_module, opt, init_weight=init_weights[opt.init_weight_name])\n # generator optimizer\n self._generator_optimizer = optimizers[opt.generator_optimizer_name](self._generator_module.parameters(), opt)\n # discriminator optimizer\n self._discriminator_optimizer = optimizers[opt.discriminator_optimizer_name](self._discriminator_module.parameters(), opt)\n # generator scheduler\n self._generator_scheduler = schedulers[opt.generator_scheduler_name](self._generator_optimizer, opt)\n # discriminator scheduler\n self._discriminator_scheduler = schedulers[opt.discriminator_scheduler_name](self._discriminator_optimizer, opt)\n\n # register\n if not self.is_train:\n self.modules['generator'] = self._generator_module\n else:\n self.modules['generator'] = self._generator_module\n self.modules['discriminator'] = self._discriminator_module\n self.optimizers['generator'] = self._generator_optimizer\n self.optimizers['discriminator'] = self._discriminator_optimizer\n self.schedulers['generator'] = self._generator_scheduler\n self.schedulers['discriminator'] = self._discriminator_scheduler\n\n self.module_transfer_to_device()", "def options(self):\r\n return self._options", "def test_opt_presets(self):\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n # hardcoded example\n opt = pp.parse_args(['--model', 'transformer/generator', '-o', 'gen/meena'])\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['topk'] == 40\n # and preference for command line over opt presets\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n opt = pp.parse_args(\n ['--model', 'transformer/generator', '-o', 'gen/meena', '--topk', '7']\n )\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['topk'] == 7\n # double check ordering doesn't matter\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n opt = pp.parse_args(\n ['--model', 'transformer/generator', '--topk', '8', '-o', 'gen/meena']\n )\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['topk'] == 8\n # check composability\n pp = ParlaiParser(True, False)\n pp.add_argument(\"-m\", \"--model\")\n opt = pp.parse_args(['-o', 'arch/blenderbot_3B,gen/meena'])\n assert opt['beam_size'] == 20\n assert opt['inference'] == 'topk'\n assert opt['model'] == 'transformer/generator'\n assert opt['n_encoder_layers'] == 2", "def get_flags():\n flags.DEFINE_string(\n 'model_name',\n help='MobileNet version name: mobilenet_v1, mobilenet_v2, '\n 'mobilenet_v3_small and mobilenet_v3_large',\n default='mobilenet_v1'\n )\n flags.DEFINE_string(\n 'dataset_name',\n help='Dataset name from TDFS to train on: imagenette, imagenet2012',\n default='imagenette'\n )\n flags.DEFINE_string(\n 'model_dir',\n help='Working directory.',\n default='./tmp'\n )\n flags.DEFINE_string(\n 'data_dir',\n help='Directory for training data.',\n default=None\n )\n flags.DEFINE_bool(\n 'resume_checkpoint',\n help='Whether resume training from previous checkpoint.',\n default=False\n )\n flags.DEFINE_string(\n 'optimizer_name',\n help='Name of optimizer.',\n default='rmsprop'\n )\n flags.DEFINE_string(\n 'learning_scheduler_name',\n help='Name of learning rate scheduler.',\n default='exponential'\n )\n # for hyperparameter tuning\n flags.DEFINE_float(\n 'op_momentum',\n help='Optimizer momentum.',\n default=0.9\n )\n flags.DEFINE_float(\n 'op_decay_rate',\n help='Optimizer discounting factor for gradient.',\n default=0.9\n )\n flags.DEFINE_float(\n 'lr',\n help='Base learning rate.',\n default=0.008\n )\n flags.DEFINE_float(\n 'lr_decay_rate',\n help='Magnitude of learning rate decay.',\n default=0.97\n )\n flags.DEFINE_float(\n 'lr_decay_epochs',\n help='Frequency of learning rate decay.',\n default=2.4\n )\n flags.DEFINE_float(\n 'label_smoothing',\n help='The amount of label smoothing.',\n default=0.0,\n )\n flags.DEFINE_float(\n 'ma_decay_rate',\n help='Exponential moving average decay rate.',\n default=None\n )\n flags.DEFINE_float(\n 'dropout_rate',\n help='Dropout rate.',\n default=0.2\n )\n flags.DEFINE_float(\n 'std_weight_decay',\n help='Standard weight decay.',\n default=0.00004\n )\n flags.DEFINE_float(\n 'truncated_normal_stddev',\n help='The standard deviation of the truncated normal weight initializer.',\n default=0.09\n )\n flags.DEFINE_float(\n 'batch_norm_decay',\n help='Batch norm decay.',\n default=0.9997\n )\n flags.DEFINE_integer(\n 'batch_size',\n help='Training batch size.',\n default=4 # for testing purpose\n )\n flags.DEFINE_integer(\n 'epochs',\n help='Number of epochs.',\n default=5\n )", "def extra_options():\n extra_vars = {\n 'PrgEnv': [None, 'PrgEnv module to load, e.g., cray to load PrgEnv-cray, or None for automatic determination', CUSTOM],\n 'PrgEnv_load': [True, 'Load the PrgEnv module (if True) or just set the corresponding environment variable (if False)', CUSTOM],\n 'PrgEnv_family': [None, 'Declare to be a member of the PrgEnv family (if \\'PrgEnv\\), of the cpeToolchain family (if \\'cpeToolchain\\') or manually unload all known PrgEnv and cpe* modules (if None, needed when LMOD is not used)', CUSTOM],\n 'CPE_compiler': [None, 'Versionless compiler module to load, or None for automatic determination', CUSTOM],\n 'CPE_version': [None, 'Version of the CPE, if different from the version of the module', CUSTOM],\n 'CPE_load': [ 'first', 'First load the cpe module (if \\'first\\'), after the PrgEnv module (if \\'after\\'), load it at the end (if \\'last\\'), or do not load the cpe module (if None)', CUSTOM],\n 'cray_targets': [[], 'Targetting modules to load', CUSTOM],\n #'optional_example_param': [None, \"Example optional custom parameter\", CUSTOM],\n }\n return Bundle.extra_options(extra_vars)", "def init_params(options):\n\tparams = OrderedDict()\n\n\t# embedding\n\tparams['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])\n\n\t# encoder: GRU\n\tparams = get_layer(options['encoder'])[0](options, params, prefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\t nin=options['dim_word'], dim=options['dim'])\n\treturn params", "def init_params_bi(options):\n\tparams = OrderedDict()\n\n\t# embedding\n\tparams['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])\n\n\t# encoder: GRU\n\tparams = get_layer(options['encoder'])[0](options, params, prefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\t nin=options['dim_word'], dim=options['dim'])\n\tparams = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',\n\t\t\t\t\t\t\t\t\t\t\t nin=options['dim_word'], dim=options['dim'])\n\treturn params", "def get_model_params(self):\n params_dict = vars(self).copy()\n exclude_params = ['input_size',\n 'model',\n 'train_generator',\n 'val_generator',\n 'callbacks',\n 'save_to_dir',\n 'keras_logs_folder',\n 'samples_seen',\n 'params_filepath',\n 'session_number',\n 'params_file_name',\n 'weights_file_name',\n 'checkpoint_filename',\n 'curr_folder'\n ]\n\n for key in exclude_params:\n params_dict.pop(key)\n return params_dict", "def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")", "def get_cmd(j): #SKIP\n d = {k:v for k,v in j.items() if k in Build.TARGET_OPTIONS}\n if OS_KEY in j:\n os_d = {k:v for k,v in j.get(OS_KEY, {}).items() if k in Build.TARGET_OPTIONS}\n d.update(os_d)\n return d", "def rl_modelrl_ae_short():\n hparams = rl_modelrl_ae_base()\n hparams.autoencoder_train_steps //= 10\n hparams.true_env_generator_num_steps //= 5\n hparams.model_train_steps //= 10\n hparams.ppo_epochs_num //= 10\n return hparams", "def base_model_config():\n return {\n # TFRecord file pattern containing Example protos.\n \"input_file_pattern\": \"\",\n\n # Number of examples to keep in the input queue.\n \"input_queue_capacity\": 5 * 640000, # 5 shards of the BookCorpus.\n\n # Number of threads for prefetching TFRecord values.\n \"num_input_reader_threads\": 1,\n\n # Whether to shuffle the input data.\n \"shuffle_input_data\": True,\n\n # Scale of the random uniform initializer.\n \"uniform_init_scale\": 0.1,\n\n # Number of unique words in the vocab.\n \"vocab_size\": 20000,\n\n # Batch size (training and evaluation only).\n \"batch_size\": 128,\n\n # Word embedding dimension.\n \"word_embedding_dim\": 620,\n\n # Whether to use a bidirectional or unidirectional encoder RNN.\n \"bidirectional_encoder\": False,\n\n # Number of output dimensions of the sentence encoder.\n \"encoder_dim\": 2400,\n\n # Operation for combining the final states of the encoder GRU\n \"pooling_operation\": \"last\",\n }", "def _argParser():\n parser = argparse.ArgumentParser(\n prog='modelc',\n description='Compile the given sources.')\n if FULL_INTERFACE:\n for (parameter,label) in OPTIONS:\n parser.add_argument(\n '--'+parameter,\n dest=parameter,\n const=1,\n # default=0,\n type=int,\n nargs='?',\n help='debug: control %s' % label)\n parser.add_argument(\n '-bw',\n dest='bw',\n action='store_true',\n help='black and white output',\n default=False)\n parser.add_argument(\n '-dg',\n dest='dg',\n action='store_true',\n default=False)\n parser.add_argument(\n '--issues', '-i',\n dest='issues',\n const='top',\n default='inline',\n help='choose location of issues wrt to the listing.',\n choices=['top', 'inline', 'bottom'],\n type=str,\n nargs='?')\n parser.add_argument(\n '--list', '-l',\n dest='listing',\n const='source',\n default='no',\n help='display a listing of model.',\n choices=['no', 'source', 'model'],\n type=str,\n nargs='?')\n parser.add_argument(\n '--summary', '-s',\n dest='summary',\n const='bottom',\n default='no',\n help='display a summary of model.',\n choices=['no', 'top', 'bottom'],\n type = str,\n nargs = '?')\n parser.add_argument(\n '-mode', '-m',\n dest='mode',\n const='full',\n default='full',\n help='choose the checking level.',\n choices=['justAST', 'justASTDep', 'full'],\n type=str,\n nargs='?')\n parser.add_argument(\n '--verbose', '-v',\n dest='verbose',\n action='store_true',\n default=False,\n help='make output verbose.')\n parser.add_argument(\n '--quiet', '-q',\n dest='quiet',\n action='store_true',\n help='create minimal amount of output.',\n default=False)\n parser.add_argument(\n '--version', '-V',\n dest='version',\n action='store_true',\n help='display modelscript version.',\n default=False)\n parser.add_argument(\n 'sources',\n metavar='source',\n nargs='*',\n help='A model source file or a directory.')\n return parser", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def init_opt(self):\n raise NotImplementedError", "def setup_optims(self):\n lr = self.train_config['lr']\n b1 = self.train_config['b1']\n b2 = self.train_config['b2']\n weight_decay = self.train_config['weight_decay']\n self.opt = torch.optim.Adam(self.network.parameters(), lr=lr, betas=(b1, b2),\n weight_decay=weight_decay)", "def setting(self):\n\n if ADVERSARIAL_FLAG:\n return {'Net': self.net,\n \"AdvNet\": self.adv_net,\n 'TrainSet': self.train_dataset,\n 'ValSet': self.val_dataset,\n 'Optimizer': self.optimizer,\n 'AdvOptimizer': self.adv_optimizer,\n 'StatsManager': self.stats_manager,\n 'BatchSize': self.batch_size,\n 'PerformValidationDuringTraining': self.perform_validation_during_training}\n\n return {'Net': self.net,\n # \"AdvNet\": self.adv_net,\n 'TrainSet': self.train_dataset,\n 'ValSet': self.val_dataset,\n 'Optimizer': self.optimizer,\n # 'AdvOptimizer': self.adv_optimizer,\n 'StatsManager': self.stats_manager,\n 'BatchSize': self.batch_size,\n 'PerformValidationDuringTraining': self.perform_validation_during_training}", "def get_options(self):\n\t\treturn self.options", "def get_options():\n parser = argparse.ArgumentParser(\n description=\"view the aria2 queue on localhost:6800\",\n )\n # parser.add_argument() calls here\n options = parser.parse_args()\n # extra processing of options here\n return options", "def _setup_param_groups(self, model, config):\n encoder_opts = config['optimizer']['encoder']\n decoder_opts = config['optimizer']['decoder']\n\n encoder_weight_params = []\n encoder_bias_params = []\n decoder_weight_params = []\n decoder_bias_params = []\n\n for name, param in model.encoder.named_parameters():\n if name.endswith('bias'):\n encoder_bias_params.append(param)\n else:\n encoder_weight_params.append(param)\n\n for name, param in model.decoder.named_parameters():\n if name.endswith('bias'):\n decoder_bias_params.append(param)\n else:\n decoder_weight_params.append(param)\n\n self.logger.info(f'Found {len(encoder_weight_params)} encoder weight params')\n self.logger.info(f'Found {len(encoder_bias_params)} encoder bias params')\n self.logger.info(f'Found {len(decoder_weight_params)} decoder weight params')\n self.logger.info(f'Found {len(decoder_bias_params)} decoder bias params')\n\n params = [\n {'params': encoder_weight_params, **encoder_opts},\n {'params': decoder_weight_params, **decoder_opts},\n {'params': encoder_bias_params,\n 'lr': encoder_opts['lr'],\n 'weight_decay': encoder_opts['weight_decay']},\n {'params': decoder_bias_params,\n 'lr': decoder_opts['lr'],\n 'weight_decay': decoder_opts['weight_decay']},\n ]\n return params" ]
[ "0.6497855", "0.6085382", "0.57732934", "0.56216425", "0.55810773", "0.5563721", "0.54467785", "0.54459274", "0.54262084", "0.54096705", "0.53853124", "0.5346946", "0.5331329", "0.53296965", "0.53257495", "0.5312863", "0.5310399", "0.52858835", "0.52766055", "0.52655095", "0.5258684", "0.5227333", "0.5224458", "0.52107024", "0.5205455", "0.5203974", "0.5193468", "0.51817644", "0.51801366", "0.51705194", "0.5138714", "0.5128861", "0.51281434", "0.51243526", "0.5109916", "0.5095284", "0.50894654", "0.5080021", "0.5076423", "0.5075134", "0.5066456", "0.50657743", "0.5064596", "0.506246", "0.50594884", "0.5057694", "0.503541", "0.5029708", "0.5024999", "0.5009607", "0.500458", "0.5001459", "0.49983957", "0.49957225", "0.49947214", "0.4993258", "0.49875563", "0.49823055", "0.49783486", "0.4970767", "0.49565554", "0.49501094", "0.49415728", "0.49397844", "0.49389595", "0.49383134", "0.493242", "0.49276793", "0.4927486", "0.49249431", "0.49239397", "0.4918463", "0.49065694", "0.48942432", "0.4893856", "0.48930806", "0.48864096", "0.48845878", "0.4881419", "0.48745432", "0.48714444", "0.48711708", "0.48639", "0.48562622", "0.48475692", "0.48386246", "0.48366162", "0.48345762", "0.48312283", "0.4828738", "0.482826", "0.48276037", "0.48276037", "0.48276037", "0.48276037", "0.48256475", "0.4818988", "0.48178428", "0.48168075", "0.48168024" ]
0.6829646
0
Update the state of the hash object.
def update(self, data): self._total_length += len(data) self._buffer += data # A digest calculated for 240 bytes or less of data will use # self._seed and self._secret (at least one of which is the # default) directly whereas the digest calulated for more than # 240 bytes will use only self._secret. However, if a # non-default seed was provided (and not discarded because both # a seed and a secret were mistakenly provided) then # self._secret must be redefined to a secret generated from # self._seed (but only for more than 240 bytes of input data). # # Because of this, update() does nothing but store the data # until more than 240 bytes have been added. Then, it redefines # self._secret (if self.seed != 0) before continuing. So as to # do this only during the first call to update() in which there # is sufficient data, self._acc is also intialized at that time, # and the process is skipped if self._acc is already initialized. if self._total_length <= 240: return if self._acc is None: # There is sufficient data that _update_hashlong() will be # used and this is the first call to update that ensures # this. So, do setup for _update_hashlong(). self._acc = [ self._P32_3, self._P64_1, self._P64_2, self._P64_3, self._P64_4, self._P32_2, self._P64_5, self._P32_1, ] self._last_stripe = b"" if self._seed != 0: self._secret = self._customsecret(self._seed) # _update_hashlong() will consume as much of self._buffer # as possible. self._update_hashlong()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self._state = self._state", "def hash(self, hash):\n\n self._hash = hash", "def hash(self, hash):\n\n self._hash = hash", "def update(self):\n self.write_state(bytes([]))", "def update(self):\n self._state = 23", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def update(self):\n self._data.update()\n\n self._state = self._data.get_value(self._type)", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def update(self, new_content: dict):\n self.__init__(new_content, self.__previous_hash)", "def update_hashes(self, ret=False) -> None:\n self.hash = self.calculate_hash()\n self.stub_hash = self.calculate_hash(include_md=False)", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def update_hash(self, h):\n # Generate a sequence of fragments that add up to the canonical\n # version of the expression.\n fragments = []\n self.collect_str_fragments(fragments)\n # Update the hash. Wrapping with 'node<...>' prevents the hash\n # from being extended in a way that would clash with something we can\n # generate. (Probably not an important concern but it doesn't hurt.)\n h.update(\"node<\")\n for f in fragments:\n h.update(f)\n h.update(\">\")", "def update_state(self, result):\n for i,k in enumerate(self._current_state.keys()):\n self._current_state[k] = result[i]", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def _update_value(self, value):\n old_hash = get_hash(self._value)\n new_hash = get_hash(value)\n self._value = value\n if old_hash is None or new_hash is None or (old_hash != new_hash):\n self.is_dirty = True", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)", "def init_hash_state(self) -> None:\n self.hash_states = [hashlib.sha1()]", "def dup_hash_state(self) -> None:\n assert len(self.hash_states) > 0\n self.hash_states.append(self.hash_states[-1].copy())", "def __setstate__(self, state):\n self.__dict__.update(state)", "def update(self, key):\n return self.state", "def async_update_state(self, state):\n _LOGGER.debug(\"state=%s\", state)\n self._state = state\n self.async_write_ha_state()", "def updateSimState(self):\n self.sim_state = {k: v for k,v in self.state.iteritems()}", "def current_hash(self):", "def update_state(self, new_state):\n self.__state = new_state", "def update_state(self, context):\n pass", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]", "def calculate_hash(self, hash_fn=Crypto.calculate_hash):\n\n self.hash = hash_fn(\n self.index, self.previous_hash, self.timestamp, self.payload, self.nonce\n )", "def update(self, new_gameStateData):\r\n pass", "def update(self, data):\n logging.info('update state', data)\n self._client.update_state(data)\n\n # Also locally update our state so things aren't out of sync\n self._state.update(data)", "def update_state(self, dstate):\n pass", "def update(self):\n\n self._state = get_balance(self.addresses)", "def set_hash(self, hash_name, data):\n self.hashes[hash_name] = data", "def update_state(self, *args, **kwargs):\n raise NotImplementedError('Must be implemented in subclasses.')", "def update(self, *args, **kwargs) -> None:\n self.update_state(args[0])\n super().update(*args, **kwargs)", "def progression_hash(self, progression_hash):\n\n self._progression_hash = progression_hash", "def update_state(self, progress, policy_state=None):\n raise NotImplementedError", "def update_hash(hasher, obj):\r\n hasher.update(str(type(obj)))\r\n if isinstance(obj, (tuple, list)):\r\n for e in obj:\r\n update_hash(hasher, e)\r\n elif isinstance(obj, dict):\r\n for k in sorted(obj):\r\n update_hash(hasher, k)\r\n update_hash(hasher, obj[k])\r\n else:\r\n hasher.update(repr(obj))", "def update_to_state(self, game_state):\n pass", "def update_hash(cls, filelike, digest):\r\n block_size = digest.block_size * 1024\r\n for chunk in iter(lambda: filelike.read(block_size), b''):\r\n digest.update(chunk)", "def __setstate__(self, state):\n state['_lock'] = Lock()\n self.__dict__.update(state)", "def __hash__(self):\n return hash(self.hash)", "def __hash__(self):\n return hash((self.name, self.state))", "def update(self, state):\n self.states.append(state)", "def set_primary_object_hash(self, hsh):\n self.hash = hsh", "def __setstate__(self, state):\n # Restore instance attributes\n try: \n obj = Thing.ID_dict[state['id']] # is this obj already in dict?\n dbg.debug(\"Note: %s already in Thing.ID_dict, maps to %s\" % (state['id'], obj))\n except KeyError: # Not already in dict\n Thing.ID_dict[state['id']] = self\n if 'has_beat' in state:\n Thing.game.register_heartbeat(self)\n self.__dict__.update(state)", "def rehash(self):\n new_cap = self._get_new_capacity() # Choose not to handle the ValueError thrown by _get_new_capacity()\n new_table = HashTable(new_cap) # Create a new hash table directly\n for i in range(self.size):\n if self.keys[i] is not None: # Only put() when there exists a key (no Nones)\n new_table[self.keys[i]] = self.values[i] # Rehash and insert into the new table\n\n self.keys = new_table.keys # Update instance variables\n self.values = new_table.values\n self.size = new_cap # Update N\n self.count_rehashes += 1 # Increment total rehashes", "def __setstate__(self, state):\n\n for key, value in state.items():\n if key in self.__slots__:\n setattr(self, key, value)", "def update(self, data):\n # TODO: try not to use setattr\n for key, item in data.items():\n if key == \"password\":\n new_password = self.__generate_hash(item)\n setattr(self, key, new_password)\n else:\n setattr(self, key, item)\n\n super().update(data)\n db.session.commit()", "def __setstate__(self, state: dict) -> None: # pragma: no cover\n self.__dict__.update(state)\n self.rFp = {}\n self.wFp = {}\n self.Fp = ChainMap(self.rFp, self.wFp)\n self.open(mode=self.mode)", "def set_new_hash(court_id, new_hash):\n seals_data[court_id]['hash'] = new_hash", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def _update(self):\n pass", "async def update(self):\n resp = await self._request('get', 'state')\n if resp:\n for line in resp.splitlines():\n key, val = line.strip().split(None, 1)\n if val == 'on' or val == 'off':\n val = (val == 'on')\n self.state_data[key] = val\n else:\n self.state_data[key] = val", "def update_states(self) -> None:\n self.set_states()\n self.async_write_ha_state()", "def update(self, key, val):\n state_dict = self.todict()\n assert key in state_dict\n state_dict[key] = val\n return self.state_factory.build(state_dict)", "def update(self, cache_key):\r\n self._write_sha(cache_key)", "def update(self):\n self.data_service.update()\n self._state = self.data_service.data[self._json_key]\n self._attributes = self.data_service.attributes[self._json_key]", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def update(self,ztate):\n \n if not ztate: return\n assert isinstance(ztate,State) , 'must update with another State-type'\n for key in self.keys():\n if isinstance(ztate[key],dict):\n self[key].update( ztate[key] )\n elif ztate[key]:\n self[key] = ztate[key]\n \n self.set_timestamp()", "def hash_id(self, hash_id):\n\n self._hash_id = hash_id", "def update(self):\n\n pass", "def _hash(self, item):\r\n pass # TODO\r", "def hash_state(self):\n return hash(self.board.tostring())", "def update(self):\r\n self._state = self._dev.state", "def update(self):\r\n if self._block.info_values is not None:\r\n self._state = self._block.info_values.get(self._sensor_name, None)", "def update_hash(hash_obj, file_root, file_path):\n\n relative_path = os.path.relpath(file_path, file_root)\n hash_obj.update(relative_path.encode())\n\n with open(file_path, 'rb') as open_file:\n while True:\n data = open_file.read(1024)\n if not data:\n break\n hash_obj.update(data)", "def hash(self):\n raise NotImplementedError() # To be subclassed", "def store(self, hash, original_url):\n self.r.set(hash, original_url)", "def rehash(self):\n old = list()\n # use iteration to record existing items\n for i in range(self.capacity // 2):\n if self.table[i] is not None:\n old.append(self.table[i])\n self.table = self.capacity * [None] # then reset table to desired capacity\n self.size = 0\n for i in old:\n index = self.quadratic_probe(i.key)\n self.table[index] = i\n self.size += 1", "def update(self):\n try:\n self._state = self.pushbullet.data[self._element]\n self._state_attributes = self.pushbullet.data\n except (KeyError, TypeError):\n pass", "def em_update_h(self):\n with self.elbo_check('h'):\n self.update_h()", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self) -> None:\n ...", "def update(self, game_state):\n # print str(game_state.to_json())\n pass", "def __setstate__(self, state: Dict[str, Any]) -> None:\n self.__dict__ = state.copy()\n # Once state is ingested - repopulate, NOT recursing.\n # Child segments will do it for themselves on unpickling.\n self.set_as_parent(recurse=False)", "def update_job_state(self, job):", "def update_dict(new,old):", "def _hash(self, hashKey):\n return hashKey % self.size", "def update(self):\n raise NotImplementedError", "def update(self, dict):\n with self.__rlock:\n UserDict.update(self, dict)\n for key in dict.keys():\n if key not in self._keys:\n self._keys.append(key)", "def update(self, new_gameStateData):\r\n self.data = new_gameStateData\r\n self._refresh()", "def __Hash(self):\n return self._Hash()", "def _update_object(self, data_dict):\r\n pass", "def update(self, obj):\n self._updater.update(obj)", "def _rehash(self, hashKey, integer):\n return ( hashKey + integer * integer ) % self.size" ]
[ "0.6944641", "0.6727618", "0.6727618", "0.66924864", "0.6656745", "0.6560295", "0.65587795", "0.6553051", "0.65319693", "0.6484831", "0.6480291", "0.6452417", "0.6441518", "0.6440476", "0.64331573", "0.6417873", "0.6360725", "0.6358439", "0.6283338", "0.62017864", "0.6175439", "0.612766", "0.60906535", "0.6066099", "0.6062357", "0.604157", "0.6031779", "0.60302436", "0.60126984", "0.5998002", "0.59960604", "0.5982639", "0.5980567", "0.5958645", "0.59555274", "0.5922857", "0.5897502", "0.58697665", "0.5867111", "0.5860965", "0.58381873", "0.58320546", "0.5827481", "0.582585", "0.58029056", "0.57919204", "0.5780319", "0.5778524", "0.57769585", "0.57742643", "0.5769962", "0.5748755", "0.5748755", "0.57373536", "0.57329893", "0.57177585", "0.5698371", "0.5697073", "0.56842", "0.5681469", "0.56772697", "0.56692713", "0.5669172", "0.56684464", "0.5655118", "0.5642882", "0.56390125", "0.56374574", "0.56340694", "0.56333697", "0.5632681", "0.56178904", "0.5608874", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5599445", "0.5595922", "0.5595713", "0.559099", "0.5588113", "0.5573724", "0.55611014", "0.5557277", "0.5556265", "0.5549208", "0.55481464", "0.55462843", "0.5534879", "0.55338734" ]
0.0
-1
Return the hash digest as a 64bit unsigned integer. This is the typical output format of the `reference implementation`_.
def intdigest(self): if self._total_length <= 240: if self._total_length == 0: return self._len_0() elif self._total_length <= 3: return self._len_1to3() elif self._total_length <= 8: return self._len_4to8() elif self._total_length <= 16: return self._len_9to16() elif self._total_length <= 128: return self._len_17to128() elif self._total_length <= 240: return self._len_129to240() # self._update_hashlong() has consumed as much of self._buffer # as possible. self._finalize_hashlong() will complete the # hash process return self._finalize_hashlong()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash64bits(*args):\n # 64 bits hexdigest\n h = hashlib.sha1(bytes(repr(args), \"utf-8\")).hexdigest()[:16]\n # Convert to an integer and return\n return int(h, 16)", "def digest(self):\n # For discussion of big-endian vs little-endian for the hash\n # digest of XXHASH algorithms, see\n # https://github.com/Cyan4973/xxHash/issues/45\n return struct.pack(\">Q\", self.intdigest())", "def digest(self):\n d = MegaCrypto.str_to_a32(self.hash)\n return (d[0] ^ d[1], d[2] ^ d[3])", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _digest(self):\n return self._hasher.hexdigest()", "def digest(self):\n return self._hash", "def hash(self) -> types.UInt256:\n with serialization.BinaryWriter() as bw:\n bw.write_uint32(settings.network.magic)\n self.serialize_unsigned(bw)\n data_to_hash = bytearray(bw._stream.getvalue())\n data = hashlib.sha256(hashlib.sha256(data_to_hash).digest()).digest()\n return types.UInt256(data=data)", "def b64hash(s):\n _hash = hashlib.sha256()\n _hash.update(str2bytes(s))\n return bytes2str(b64encode(_hash.digest()))", "def read_uint64(self):\n return self.read(BitTypes.UINT_64.value)", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def hash_str_length(self):\n return self.hash_byte_length() * 2", "def hash_2(self):\n return self.unpack_qword(0x20)", "def uint64_t(n):\n return int(n).to_bytes(8, byteorder='little', signed=False)", "def _sha_byte_len(self):\n if self is HashType.SHA1:\n return 20\n if self is HashType.SHA224:\n return 28\n if self is HashType.SHA256:\n return 32\n if self is HashType.SHA384:\n return 48\n if self is HashType.SHA512:\n return 64\n return 0", "def hash_int(c, hash_length):\n if isinstance(c, float):\n if numpy.isnan(c):\n return c\n else:\n raise ValueError(f\"numpy.nan expected, not {c}\")\n else:\n b = struct.pack(\"i\", c)\n m = hashlib.sha256()\n m.update(b)\n r = m.hexdigest()\n if len(r) >= hash_length:\n r = r[:hash_length]\n return int(r, 16) % (10 ** 8)", "def digest(self):\r\n\r\n H0 = self.H0\r\n H1 = self.H1\r\n H2 = self.H2\r\n H3 = self.H3\r\n H4 = self.H4\r\n inputdata = [] + self.inputdata\r\n count = [] + self.count\r\n\r\n index = (self.count[1] >> 3) & 0x3fL\r\n\r\n if index < 56:\r\n padLen = 56 - index\r\n else:\r\n padLen = 120 - index\r\n\r\n padding = ['\\200'] + ['\\000'] * 63\r\n self.update(padding[:padLen])\r\n\r\n # Append length (before padding).\r\n bits = _sha_bytelist2longBigEndian(self.inputdata[:56]) + count\r\n\r\n self._transform(bits)\r\n\r\n # Store state in digest.\r\n digest = _sha_long2bytesBigEndian(self.H0, 4) + \\\r\n _sha_long2bytesBigEndian(self.H1, 4) + \\\r\n _sha_long2bytesBigEndian(self.H2, 4) + \\\r\n _sha_long2bytesBigEndian(self.H3, 4) + \\\r\n _sha_long2bytesBigEndian(self.H4, 4)\r\n\r\n self.H0 = H0 \r\n self.H1 = H1 \r\n self.H2 = H2\r\n self.H3 = H3\r\n self.H4 = H4\r\n self.inputdata = inputdata \r\n self.count = count \r\n\r\n return digest", "def _hash(self):\r\n MAX = sys.maxint\r\n MASK = 2 * MAX + 1\r\n n = len(self)\r\n h = 1927868237 * (n + 1)\r\n h &= MASK\r\n for x in self:\r\n hx = hash(x)\r\n h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167\r\n h &= MASK\r\n h = h * 69069 + 907133923\r\n h &= MASK\r\n if h > MAX:\r\n h -= MASK + 1\r\n if h == -1:\r\n h = 590923713\r\n return h", "def hash_byte_length(self):\n if self.is_crc():\n return self._crc_byte_len()\n if self.is_md():\n return 16\n if self.is_sha():\n return self._sha_byte_len()\n return 0", "def _get_u64(property_value):\n size = struct.calcsize('!Q')\n return struct.unpack('!Q', property_value[:size])[0], property_value[size:]", "def pack_ssh_uint64(i):\n if not isinstance(i, int):\n raise TypeError(\"Must be an int\")\n elif i.bit_length() > 64:\n raise ValueError(\"Must be a 64bit value.\")\n\n return struct.pack('>Q', i)", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hash(self):\n return Hash.dhash(bytes(self))", "def calculate_hash_id(self):\n return get_md5_hash(f'{self.type}{self.get_primary_id()}')", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def calculateHash(self):\n hashData = str(self.index) + str(self.data) + self.timestamp + self.previousHash + str(self.nonce)\n return hashlib.sha256(hashData.encode(encoding='UTF-8',errors='strict')).hexdigest()", "def get_hash(data, n):\n import hashlib\n message_hash = hashlib.sha512(data).digest()\n e = int.from_bytes(message_hash, 'big')\n\n # FIPS 180 says that when a hash needs to be truncated, the rightmost bits\n # should be discarded.\n z = e >> (e.bit_length() - n.bit_length())\n\n assert z.bit_length() <= n.bit_length()\n\n return z", "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def get_hash(self) -> str:\n return self.__hash.hexdigest()", "def hexdigest(self):\n return self.hashObject.hexdigest()", "def hash(self) -> bytes:", "def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])", "def digest(self):\n val = (self.numerator * pow(self.denominator, -1, self.MODULUS)) % self.MODULUS\n bytes384 = val.to_bytes(384, 'little')\n return hashlib.sha256(bytes384).digest()", "def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))", "def computeHash(string):\n\tif isBytes(string):\n\t\tstring = string.decode(\"latin-1\")\n\thash_ = 63689\n\tfor char in string:\n\t\thash_ = hash_ * 378551 + ord(char)\n\treturn hash_ % 65536", "def digest(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"digest\", _args)\n return _ctx.execute_sync(str)", "def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def hash_string(self):\n return self._hash_string", "def get_hash(link):\n return hashlib.sha256(link.encode('utf-8')).hexdigest()", "def hash(self) -> str:\n return pulumi.get(self, \"hash\")", "async def get_hash(identifier):\n return hashlib.md5(identifier.encode('utf8')).hexdigest()", "def hash(self):\n block = 1024 * 1024 * 4 # 4 MB.\n hasher = hashlib.sha256()\n\n with open(self.path, \"rb\") as f:\n while True:\n chunk = f.read(block)\n if not chunk:\n break\n hasher.update(hashlib.sha256(chunk).digest())\n\n digest = hasher.hexdigest()\n pdbox.debug(\"Hash for %s: %s\" % (self.path, digest))\n return digest", "def id_to_hash(self, id):\n mm = hashlib.sha256(struct.pack('>q', id))\n vv = struct.unpack(\">q\", mm.digest()[0:8])\n return vv[0] & 0x7fffffffffffffff # Don't be negative", "def make_hash(self, long_url: str, hash_length: int):\n hasher = hashlib.md5(long_url.encode())\n bytes_hash = base64.urlsafe_b64encode(hasher.digest())[:hash_length]\n str_hash = bytes_hash.decode()\n return str_hash", "def nextRandom(self):\n # Apply SHA-256, interpreting digest output as integer\n # to yield 256-bit integer (a python \"long integer\")\n hash_output = self.basehash.digest()\n self.next()\n return hash_output", "def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")", "def hash(self):\n return self._hash", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def get_hash_code(s):\n h = 0\n n = len(s)\n for i, c in enumerate(s):\n h = h + ord(c) * 31 ** (n - 1 - i)\n return StrUtil.convert_4_bytes(h)", "def HashAlgorithm(self) -> _n_7_t_0:", "def digest(self):\n return digest_tools.sha256_digest(self._payload.as_encoded_str())", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def _CalculateDigestHash(self, file_entry, data_stream_name):\n file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)\n if not file_object:\n return\n\n try:\n file_object.seek(0, os.SEEK_SET)\n\n hasher_object = hashers_manager.HashersManager.GetHasher(u'sha256')\n\n data = file_object.read(self._READ_BUFFER_SIZE)\n while data:\n hasher_object.Update(data)\n data = file_object.read(self._READ_BUFFER_SIZE)\n\n finally:\n file_object.close()\n\n return hasher_object.GetStringDigest()", "def digest_size(self):\n\n return self.__digest_size", "def _electrum_script_hash(script: bytes) -> str:\n bytes = bytearray(scripts.sha256(script))\n bytes.reverse()\n return bytes.hex()", "def _guid64():\n return _base91(random.randint(0, 2**64 - 1))", "def calc_statistics_hash(self) -> bytes:\n raise NotImplementedError()", "def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result", "def u64(value: bytes, endian: str = \"little\", sign: bool = False) -> int:\n return unpack(value, 64, endian, sign)", "def hash_1(self):\n return self.unpack_qword(0x18)", "def digest(self):\n return self._parsed[\"digest\"]", "def hash(self):\n return hashlib.sha256(self.to_json().encode()).hexdigest()", "def pack_uint64(data: int) -> bytes:\n return struct.pack(\">Q\", data)", "def pack_uint64(data: int) -> bytes:\n return struct.pack(\">Q\", data)", "def hash(self):\n return xxhash.xxh64(self._pwm_to_str(3)).hexdigest()", "def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())", "def get_hash(self):\r\n return", "def digest(self, seq):\n\n h = hashlib.new(self._hash_algorithm)\n h.update(seq)\n dig = h.hexdigest()\n\n return dig", "def hash(self) -> str:\r\n ...", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def get_last_hash(self):\n return self.get_last().hash_block()", "def elf_hash(s):\n h = 0\n for c in s:\n h = (h << 4) + ord(c)\n t = (h & 0xF0000000)\n if t != 0:\n h = h ^ (t >> 24)\n h = h & ~t\n return h", "def incore_digest(self):\n return hasher(self.content).hexdigest()", "def SHA256(self) -> _n_0_t_3[_n_0_t_9]:", "def get_digest(self) -> Digest:\n ...", "def digest(self):\n pass", "def digest(self):\n pass", "def digest(self):\n pass", "def compute_dhash(im):\n return imagehash.dhash(ensure_pil(im))", "def create_hash(self):\n return os.urandom(32).encode('hex')", "def hash_float(c, hash_length):\n if numpy.isnan(c):\n return c\n else:\n b = struct.pack(\"d\", c)\n m = hashlib.sha256()\n m.update(b)\n r = m.hexdigest()\n if len(r) >= hash_length:\n r = r[:hash_length]\n i = int(r, 16) % (2 ** 53)\n return float(i)", "def u64(d):\n return unpack('<Q', d)[0]", "def get_hash(self, params):\n return self.sha", "def get_file_hash(fname, hash_length):\n hash_sha = hashlib.sha256()\n with open(fname, 'rb') as infile:\n for chunk in iter(lambda: infile.read(4096), b''):\n hash_sha.update(chunk)\n hash_sha = hash_sha.hexdigest()\n hash_sha = int(hash_sha, 16) % (2 ** (4 * hash_length))\n return hex_encode(hash_sha, hash_length)", "def get_digest_hash(response_data: dict, force: bool = False) -> str:\n if force:\n return 'forced-{}'.format(time.time())\n\n r = response_data.copy()\n r['timestamp'] = None\n serialized = json.dumps(r, cls=ComplexJsonEncoder)\n func = getattr(hashlib, 'blake2b', hashlib.sha256)\n return func(serialized.encode()).hexdigest()", "def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]", "def strhash(s: str) -> int:\n h = hashlib.md5(s.encode('utf-8'))\n h = int(h.hexdigest(), base=16)\n return h", "def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def seed_hash(*args):\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2**31)", "def get_hash(self):\n return self.__hash", "def encoded_hash(sha):\n return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')", "def hash(self, text):\n hashval = 0\n for i in xrange(0, len(text)):\n hashval += ord(text[i])**i\n return hashval", "def to_win_64_hex(self):\n try:\n dt_obj = duparser.parse(timestamp)\n minus_epoch = dt_obj - self.epoch_1601\n calculated_time = minus_epoch.microseconds + (minus_epoch.seconds * 1000000) + (minus_epoch.days * 86400000000)\n self.out_windows_hex_64 = str(hex(int(calculated_time)*10))[2:].zfill(16)\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.out_windows_hex_64 = False\n return self.out_windows_hex_64", "def ReadUInt64(self, endian=\"<\"):\n return self.unpack('%sQ' % endian, 8)", "def get_hash(hash_function, x: str):\n hash_function.update(x.encode())\n return int.from_bytes(hash_function.digest(), byteorder=\"big\")" ]
[ "0.76398647", "0.7091548", "0.6616426", "0.66112274", "0.66112274", "0.65616864", "0.6554457", "0.6453391", "0.63296676", "0.6313555", "0.63109", "0.6277919", "0.6270182", "0.6270182", "0.62685853", "0.62674755", "0.6248589", "0.624666", "0.622734", "0.61998284", "0.61998194", "0.6195548", "0.6183146", "0.6164658", "0.6123467", "0.61212355", "0.6106043", "0.60724705", "0.6031556", "0.60084194", "0.6007206", "0.59880066", "0.5978985", "0.5977758", "0.59587604", "0.5950699", "0.5919066", "0.59124446", "0.5894648", "0.588245", "0.5874103", "0.5873536", "0.587335", "0.5861652", "0.58515763", "0.5844978", "0.5842109", "0.58416355", "0.5840062", "0.5840017", "0.5837233", "0.58238864", "0.5820622", "0.5810748", "0.58105284", "0.58027697", "0.58027697", "0.5801429", "0.57758725", "0.5773341", "0.57728106", "0.57684976", "0.576332", "0.5761812", "0.57527614", "0.57507753", "0.5749478", "0.5748944", "0.5748944", "0.57454973", "0.5745336", "0.57425344", "0.57412577", "0.5735636", "0.5733542", "0.57321525", "0.5732093", "0.572274", "0.57215315", "0.5713109", "0.57117987", "0.57117987", "0.57117987", "0.57049716", "0.5701814", "0.5685651", "0.5684015", "0.56838185", "0.56763935", "0.56762296", "0.5672189", "0.5667569", "0.56549287", "0.56502646", "0.56469935", "0.5628428", "0.56251675", "0.5620405", "0.5618767", "0.561762", "0.5614482" ]
0.0
-1
Return the hash digest as a bytes object. This is the bigendian representation of the value returned by ``intdigest()`` and is equivalent to the output of the ``XXH64_canonicalFromHash()`` function in the `reference implementation`_ applied to the value returned by ``intdigest()``.
def digest(self): # For discussion of big-endian vs little-endian for the hash # digest of XXHASH algorithms, see # https://github.com/Cyan4973/xxHash/issues/45 return struct.pack(">Q", self.intdigest())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hexdigest(self):\n return self.hashObject.hexdigest()", "def digest(self):\n return self._hash", "def hash(self) -> bytes:", "def digest(self):\n return digest_tools.sha256_digest(self._payload.as_encoded_str())", "def hash(self):\n return Hash.dhash(bytes(self))", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def _digest(self):\n return self._hasher.hexdigest()", "def digest(self):\n d = MegaCrypto.str_to_a32(self.hash)\n return (d[0] ^ d[1], d[2] ^ d[3])", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())", "def digest(self) -> bytes:\n # items in data MUST be byte-like objects\n data = []\n\n for key, value in self.items():\n data.append(key)\n if value is not None:\n data.append(value)\n\n return hashlib.sha3_256(b'|'.join(data)).digest()", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.public_key.public_bytes(serialization.Encoding.X962,\r\n serialization.PublicFormat.CompressedPoint)\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])", "def get_hash(self) -> str:\n return self.__hash.hexdigest()", "def incore_digest(self):\n return hasher(self.content).hexdigest()", "def digest(self):\r\n\r\n H0 = self.H0\r\n H1 = self.H1\r\n H2 = self.H2\r\n H3 = self.H3\r\n H4 = self.H4\r\n inputdata = [] + self.inputdata\r\n count = [] + self.count\r\n\r\n index = (self.count[1] >> 3) & 0x3fL\r\n\r\n if index < 56:\r\n padLen = 56 - index\r\n else:\r\n padLen = 120 - index\r\n\r\n padding = ['\\200'] + ['\\000'] * 63\r\n self.update(padding[:padLen])\r\n\r\n # Append length (before padding).\r\n bits = _sha_bytelist2longBigEndian(self.inputdata[:56]) + count\r\n\r\n self._transform(bits)\r\n\r\n # Store state in digest.\r\n digest = _sha_long2bytesBigEndian(self.H0, 4) + \\\r\n _sha_long2bytesBigEndian(self.H1, 4) + \\\r\n _sha_long2bytesBigEndian(self.H2, 4) + \\\r\n _sha_long2bytesBigEndian(self.H3, 4) + \\\r\n _sha_long2bytesBigEndian(self.H4, 4)\r\n\r\n self.H0 = H0 \r\n self.H1 = H1 \r\n self.H2 = H2\r\n self.H3 = H3\r\n self.H4 = H4\r\n self.inputdata = inputdata \r\n self.count = count \r\n\r\n return digest", "def digest(o):\n ser = serialize(o)\n return _truncated_digest(ser.encode(enc)).decode(enc)", "def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def compute_hash(self):\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def hash(self):\n return self._hash", "def sign_hash(self, private_key, hash_id, digest):\n d_digest = Data(digest)\n signature = Buffer(self.signature_len(private_key=private_key))\n status = self._lib_vscf_ecc.vscf_ecc_sign_hash(self.ctx, private_key.c_impl, hash_id, d_digest.data, signature.c_buffer)\n VscfStatus.handle_status(status)\n return signature.get_bytes()", "def encoded_hash(sha):\n return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')", "def digest(self) -> Digest:\n return self.exe.digest", "def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()", "def hash_bytes_256(b: bytes) -> str:\n return hashlib.sha256(b).hexdigest()", "def hash(self):\n return hashlib.sha256(self.to_json().encode()).hexdigest()", "def digest(self):\n return self._parsed[\"digest\"]", "def get_hash(self, params):\n return self.sha", "def getBytes(self, hashV):\n results = []\n remaining = int(hashV, 16)\n while remaining > 0:\n results.append(remaining & 0xff)\n remaining >>= 8\n return results", "def get_hash(self):\r\n return", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def digest(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"digest\", _args)\n return _ctx.execute_sync(str)", "def get_hash(self):\n return freeze_dict(self.get_hash_params())", "def get_digest(self) -> Digest:\n ...", "def hash(self):\n return os.popen('git rev-parse HEAD').read().strip()", "def hex(self) -> str:\n return self.__hash.hexdigest()", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def hash_string(self):\n return self._hash_string", "def get_hash(self, data: Optional[bytes] = None) -> str:\n return self.__handle__.hash", "def hash(self) -> str:\n return pulumi.get(self, \"hash\")", "def digest(self):\n val = (self.numerator * pow(self.denominator, -1, self.MODULUS)) % self.MODULUS\n bytes384 = val.to_bytes(384, 'little')\n return hashlib.sha256(bytes384).digest()", "def calculateHash(self):\n hashData = str(self.index) + str(self.data) + self.timestamp + self.previousHash + str(self.nonce)\n return hashlib.sha256(hashData.encode(encoding='UTF-8',errors='strict')).hexdigest()", "def hash(self) -> types.UInt256:\n with serialization.BinaryWriter() as bw:\n bw.write_uint32(settings.network.magic)\n self.serialize_unsigned(bw)\n data_to_hash = bytearray(bw._stream.getvalue())\n data = hashlib.sha256(hashlib.sha256(data_to_hash).digest()).digest()\n return types.UInt256(data=data)", "def sha(self):\n return self._sha", "def sha(self):\n return self._sha", "def sha(self):\n return self._sha", "def get_digest_hash(response_data: dict, force: bool = False) -> str:\n if force:\n return 'forced-{}'.format(time.time())\n\n r = response_data.copy()\n r['timestamp'] = None\n serialized = json.dumps(r, cls=ComplexJsonEncoder)\n func = getattr(hashlib, 'blake2b', hashlib.sha256)\n return func(serialized.encode()).hexdigest()", "def get_digest(self, message):\n to_digest_message = blackjack_pb2.DigestMessage(ToDigest=message)\n return self.stub.GetDigestor(to_digest_message)", "def get_hash(thing):\n n = hashlib.sha256()\n \n if isinstance(thing,str):\n n.update(thing.encode('utf-8' ))\n elif isinstance(thing, bytes):\n n.update(thing)\n elif isinstance(thing,BeautifulSoup):\n n.update(get_hash(str(thing)))\n else:\n raise RuntimeError(\"unknown type: {}\".format(str(type(thing))))\n \n return(n.digest())", "def hash(self):\n return hashlib.sha1(str(self._dict))", "def getHash(self):\n if self.chash:\n return self.chash\n else:\n self.setHash()\n return self.chash", "def generate_hash(self):\n if not self.public_key:\n raise ValueError('Requires a public publicKey')\n return self.public_key.encode(encoding='bytes')", "def compute_hash(self) -> str:\r\n #block_dict = self.__dict__.pop('hash', None) # Remove hash field value before calculating hash\r\n block_dict = self.__dict__.copy()\r\n block_dict.pop('hash', None) # Remove hash field value before calculating hash\r\n block_string = json.dumps(block_dict, sort_keys=True).encode('utf-8')\r\n return sha256(block_string).hexdigest()", "def _Hash(content: bytes) -> str:\n return hashlib.sha256(content).hexdigest()", "def hashable(obj):\n return bytes(str(obj), \"utf-8\")", "def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())", "def get_hash(self):\n return self.__hash", "def hash(self):\n block = 1024 * 1024 * 4 # 4 MB.\n hasher = hashlib.sha256()\n\n with open(self.path, \"rb\") as f:\n while True:\n chunk = f.read(block)\n if not chunk:\n break\n hasher.update(hashlib.sha256(chunk).digest())\n\n digest = hasher.hexdigest()\n pdbox.debug(\"Hash for %s: %s\" % (self.path, digest))\n return digest", "def toHashable(self) -> str:\r\n\r\n return self.toHashBase().encode('utf-8')", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def calc_statistics_hash(self) -> bytes:\n raise NotImplementedError()", "def get_hash(cert_file):\n # Use OpenSSL to extract hash\n command = 'openssl x509 -noout -in %s -hash'\n command = command % cert_file\n command = shlex.split(command)\n\n pipe = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n hash = pipe.stdout.read()\n hash = hash.strip()\n\n return hash", "def hash(self):\n return self.hash_by_id(self.id)", "def _get_hex_digest(cls, message, secret):\n hmac_digester = hmac.new(secret.encode('utf-8'), message.encode('utf-8'), digestmod='sha512')\n return hmac_digester.hexdigest()", "def h(x):\n\n hasher = hashlib.sha256()\n hasher.update(x)\n return hasher.digest()", "def hash(self):\n hash_properties = self.artifacts\n return hashlib.md5(','.join(hash_properties).encode()).hexdigest()", "def get_results_hash(self, data):\n data = json.dumps(data, sort_keys=True)\n result = hashlib.sha512(data.encode())\n result_hash = result.hexdigest()\n return result_hash", "def internal_hash(self):\r\n return _TripleCanonicalizer(self).to_hash()", "def generichash_blake2b_final(state: Blake2State) -> bytes:\n\n ensure(\n isinstance(state, Blake2State),\n \"State must be a Blake2State object\",\n raising=exc.TypeError,\n )\n\n _digest = ffi.new(\"unsigned char[]\", crypto_generichash_BYTES_MAX)\n rc = lib.crypto_generichash_blake2b_final(\n state._statebuf, _digest, state.digest_size\n )\n\n ensure(rc == 0, \"Unexpected failure\", raising=exc.RuntimeError)\n return ffi.buffer(_digest, state.digest_size)[:]", "def __bytes__(self):\n return (\n pack_u32(self.version) +\n bytes(self.prev_block_hash) +\n bytes(self.merkle_root_hash) +\n pack_u32(self.time) +\n pack_u32(self.bits) +\n pack_u32(self.nonce)\n )", "def hash_cipher(self):\n return self._digest_cipher", "def _electrum_script_hash(script: bytes) -> str:\n bytes = bytearray(scripts.sha256(script))\n bytes.reverse()\n return bytes.hex()", "def coerce(self, value):\n if isinstance(value, bytes) and len(value) == self.bit_length:\n return HashString.from_b256(value)\n elif len(value) == self.b16_length:\n return HashString.from_b16(value)\n elif self.b64_length - len(value) <= 4:\n return HashString.from_b64(value)", "def hash(self):\n return self.__hash__()", "def get_hash(self):\n context = aq_inner(self.context)\n # Light guard\n if ICommunity.providedBy(context):\n return sha1(context.absolute_url()).hexdigest()", "def hash_digest(data: bytes, hash_algo: str=\"sha256\") -> bytes:\n if hash_algo == \"sha256\":\n digester = sha256()\n digester.update(data)\n return digester.hexdigest()", "def sha256(self):\n return self._sha256", "def get_hash(self):\n source = \"\"\n for cell in self.original_cells:\n source += \"\\n\".join(get_source(cell))\n return hashlib.sha256(source.encode(\"utf-8\")).hexdigest()", "def to_h(self):\n return str(self).encode('hex')", "def to_bytes(self) -> bytes:\n parent_hash_bytes = self.parent_hash.encode(\"utf-8\") if \\\n self.parent_hash else \\\n b\"\"\n return self.DELIMITER.join((self.contents, parent_hash_bytes))", "def get_hash(dictionary):\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()", "def hash64bits(*args):\n # 64 bits hexdigest\n h = hashlib.sha1(bytes(repr(args), \"utf-8\")).hexdigest()[:16]\n # Convert to an integer and return\n return int(h, 16)", "def digest(self):\n pass", "def digest(self):\n pass", "def digest(self):\n pass", "def _get_hash(self, query):\n return hashlib.sha1(str(query)).hexdigest()", "def extended_hash(self):\n return self._extended_hash", "def hash(self):\n return self.block_header.hash", "def to_script_hash(data_bytes: Any) -> bytes:\n pass", "def innerHash(self) -> str:\r\n\r\n return self.__inner_hash", "def _calculate_hash(self) -> str:\n data_str = str(self.version) + str(self.index) + self.pre_hash + str(self.timestamp) + str(self.data)\n return sha256(data_str.encode('utf-8')).hexdigest()" ]
[ "0.69930434", "0.69264513", "0.67882836", "0.6712462", "0.66841334", "0.66802466", "0.6673555", "0.6600667", "0.6553306", "0.6533125", "0.6499518", "0.6499283", "0.6481292", "0.6456057", "0.6456057", "0.64141536", "0.6401427", "0.6399107", "0.638602", "0.63651985", "0.62907857", "0.6269256", "0.626924", "0.6265182", "0.6249195", "0.62076014", "0.62076014", "0.6181992", "0.61363965", "0.61119395", "0.60841185", "0.60690624", "0.60367525", "0.60262376", "0.6026044", "0.601964", "0.6010211", "0.6005593", "0.6002102", "0.60006493", "0.5994497", "0.59787583", "0.59757084", "0.5975286", "0.59630257", "0.59577006", "0.5956934", "0.59516144", "0.59437376", "0.5933905", "0.59197193", "0.59177697", "0.59177697", "0.59177697", "0.59133005", "0.5855986", "0.5849705", "0.58462065", "0.5836185", "0.58293414", "0.5821235", "0.5807271", "0.5798172", "0.5790281", "0.5780137", "0.57769734", "0.5774707", "0.5773881", "0.5773881", "0.5762735", "0.57580316", "0.5756429", "0.5740933", "0.5733191", "0.57158804", "0.57133806", "0.57109505", "0.5696969", "0.5695386", "0.5691578", "0.56911284", "0.56714404", "0.56683177", "0.5664571", "0.56575584", "0.56532127", "0.56506664", "0.5646852", "0.5646113", "0.563894", "0.5637001", "0.5635211", "0.5635211", "0.5635211", "0.5627779", "0.56262064", "0.5617735", "0.56033105", "0.55974466", "0.55818737" ]
0.78258497
0
Return the hash digest as a string of hexidecimal digits. This is the value returned by ``digest()`` expressed as a printable hex string for easy display.
def hexdigest(self): # bytes.hex() is simpler, but not available For Python <= 3.4 return "".join("{0:0>2x}".format(b) for b in self.digest())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hexdigest(self):\r\n return ''.join(['%02x' % ord(c) for c in self.digest()])", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def hexdigest(self):\n return self.hashObject.hexdigest()", "def hex(self) -> str:\n return self.__hash.hexdigest()", "def digest(self):\n # For discussion of big-endian vs little-endian for the hash\n # digest of XXHASH algorithms, see\n # https://github.com/Cyan4973/xxHash/issues/45\n return struct.pack(\">Q\", self.intdigest())", "def __str__(self: Hash) -> str:\n return self.to_hex()", "def printable_hash(h):\n return int(h).to_bytes(32, byteorder='big', signed=False).hex()", "def digest(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"digest\", _args)\n return _ctx.execute_sync(str)", "def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash", "def _digest(self):\n return self._hasher.hexdigest()", "def digest(self):\n return self._hash", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self._intern[0],self._intern[1],self._intern[2])", "def get_hash(self) -> str:\n return self.__hash.hexdigest()", "def as_hex(self):\n return binascii.hexlify(self.as_bytes()).decode('ascii')", "def toHex(self):\r\n rgb = self.toRGB()\r\n return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],\r\n hex(rgb[2])[2:])).replace(' ', '0')", "def printsha(self):\n print(self.sha256.hex())", "def digest(self, seq):\n\n h = hashlib.new(self._hash_algorithm)\n h.update(seq)\n dig = h.hexdigest()\n\n return dig", "def createHashcodeString(digest):\n map_num2hex = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n hashcodelist = [None] * len(digest)\n \n for i1 in range(0, len(digest)):\n digest_i = digest[i1] # Extracts the number from the digest.\n hashcodelist[i1] = map_num2hex[digest_i] # Turns the number to a hex value and assigns it to the hashcodelist.\n \n hashcodestring = \"\"\n \n for i1 in range(0, len(hashcodelist)):\n hashcodestring = hashcodestring + hashcodelist[i1] # Appends the characters to form a string.\n \n return hashcodestring", "def hash_string(self):\n return self._hash_string", "def getFingerprint(self):\r\n return b2a_hex(SHA1(self.bytes))", "def as_hex(self, *, align='left'):\n return self.as_bytes(align=align).hex()", "def fingerprint(self) -> str:\n fp = self.sha256.hex()\n return fp", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def hash_str(self):\n return '___'.join([self.key.kind(), self.key.string_id(),\n self._Hash()])", "def digest(self):\n d = MegaCrypto.str_to_a32(self.hash)\n return (d[0] ^ d[1], d[2] ^ d[3])", "def to_h(self):\n return str(self).encode('hex')", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)", "def hash(self):\n hash_properties = self.artifacts\n return hashlib.md5(','.join(hash_properties).encode()).hexdigest()", "def digest(self):\n return digest_tools.sha256_digest(self._payload.as_encoded_str())", "def int2hex(n: int) -> str:", "def requirements_hex_digest(self) -> str:\n return calculate_invalidation_digest(self.requirements)", "def hash(self) -> str:\r\n ...", "def get_hash_string(self) -> str:\n\t\ts = ''\n\t\tfor i in range(self.size):\n\t\t\ts += ''.join(map(str,self.tiles[i]))\n\t\treturn s", "def __str__(self) -> str:\n return self.hash", "def hash(self):\n return Hash.dhash(bytes(self))", "def hash(self) -> str:\n return pulumi.get(self, \"hash\")", "def hex_digest(x):\r\n\r\n global hashlib\r\n if hashlib is None:\r\n try:\r\n import hashlib\r\n except ImportError:\r\n raise RuntimeError(\"Can't run hex_digest because hashlib is not available.\")\r\n assert isinstance(x, np.ndarray)\r\n rval = hashlib.md5(x.tostring()).hexdigest()\r\n # hex digest must be annotated with strides to avoid collisions\r\n # because the buffer interface only exposes the raw data, not\r\n # any info about the semantics of how that data should be arranged\r\n # into a tensor\r\n rval = rval + '|strides=[' + ','.join(str(stride) for stride in x.strides) + ']'\r\n rval = rval + '|shape=[' + ','.join(str(s) for s in x.shape) + ']'\r\n return rval", "def __repr__(self) -> str:\n return \"DHash(hash=%s, hash_hex=%s, path=%s)\" % (\n self.hash,\n self.hash_hex,\n self.path,\n )", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s)\n return h.hexdigest()", "def hex_hash(s):\n if not s:\n return '0'\n s = s.encode('utf-8')\n return '{:x}'.format(adler32(s) & 0xffffffff)", "def to_hex6_string(self):\n def c(x):\n return int(x * 255.0)\n return '#{:02x}{:02x}{:02x}'.format(c(self.r), c(self.g), c(self.b))", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode())\n return h.hexdigest()", "def w__format_hex(self, string):\n d = map(None, string)\n d = map(ord, d)\n d = map(lambda x: \"%02x\" % x, d)\n return ' '.join(d)", "def toHashable(self) -> str:\r\n\r\n return self.toHashBase().encode('utf-8')", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def hex(self):\n return binascii.hexlify(self.data)", "def digest(self):\n val = (self.numerator * pow(self.denominator, -1, self.MODULUS)) % self.MODULUS\n bytes384 = val.to_bytes(384, 'little')\n return hashlib.sha256(bytes384).digest()", "def hash(self) -> bytes:", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def toHex(self):\n return hexlify(self.serialize()).decode(\"utf-8\")", "def get_hex_color(self) -> str:\n return f'#{self.color.hex()}'", "def create_hash(self):\n return os.urandom(32).encode('hex')", "def hash(self):\n return hashlib.sha256(self.to_json().encode()).hexdigest()", "def hashhex(s):\n h = hashlib.sha1()\n h.update(s.encode('utf-8'))\n return h.hexdigest()", "def toString(self):\r\n str = \"\"\r\n for i in range(len(self.Data)):\r\n str += (self.__hexLookup[int(self.Data[i] / 16)]).decode()\r\n str += (self.__hexLookup[int(self.Data[i] % 16)]).decode()\r\n \r\n return str", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def get_hash_str():\r\n\tli = \"\"\r\n\tfor i in range(5):\r\n\t\tli += str(int(int((6 * random.random()) + 1)))\r\n\treturn li", "def incore_digest(self):\n return hasher(self.content).hexdigest()", "def sha256_hexoutput(in_str):\r\n return sha256(in_str.encode('ascii')).hexdigest()", "def hexdigest(jsonable):\n string = json.dumps(jsonable, sort_keys=True).encode()\n return hashlib.sha1(string).hexdigest()", "def digest(self) -> bytes:\n # items in data MUST be byte-like objects\n data = []\n\n for key, value in self.items():\n data.append(key)\n if value is not None:\n data.append(value)\n\n return hashlib.sha3_256(b'|'.join(data)).digest()", "def md5hash(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"md5hash\")", "def __get_hashstr(_config_object: dict):\n hashobj = hashlib.md5()\n json_str = json.dumps(_config_object, sort_keys=True).encode('utf-8')\n hashobj.update(json_str)\n dig = hashobj.hexdigest()\n return dig\n # return hashobj.update(json.dumps(_config_object, sort_keys=True).encode('utf-8')).hexdigest()", "def hash(password):\n result = hashlib.sha1(password.encode())\n # return a hexadecimal digits\n return result.hexdigest()", "def calculateHash(self):\n hashData = str(self.index) + str(self.data) + self.timestamp + self.previousHash + str(self.nonce)\n return hashlib.sha256(hashData.encode(encoding='UTF-8',errors='strict')).hexdigest()", "def hashcode(self) -> str:\n spreadsheet_spec_dict = self.to_dict()\n sorted_keys = dict(sorted(spreadsheet_spec_dict.items()))\n return md5(json.dumps(sorted_keys).encode(\"utf-8\")).hexdigest()", "def get_digest_hash(response_data: dict, force: bool = False) -> str:\n if force:\n return 'forced-{}'.format(time.time())\n\n r = response_data.copy()\n r['timestamp'] = None\n serialized = json.dumps(r, cls=ComplexJsonEncoder)\n func = getattr(hashlib, 'blake2b', hashlib.sha256)\n return func(serialized.encode()).hexdigest()", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")", "def encoded_hash(sha):\n return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')", "def __repr__(self):\n return self._hex", "def toHex(self):\n \n t=self.boolVals[:]\n t.reverse()\n \n string=str(self)\n \n \n string=hex(int(string,2))\n string=string[2:]\n\n d=ceil(self.n/4)-len(string)\n string=d*\"0\"+string\n return string", "def digest(self) -> Digest:\n return self.exe.digest", "def _get_hex_digest(cls, message, secret):\n hmac_digester = hmac.new(secret.encode('utf-8'), message.encode('utf-8'), digestmod='sha512')\n return hmac_digester.hexdigest()", "def hash(self) -> bytes:\n block_string = json.dumps(self.serialize(), sort_keys=True).encode()\n return bytes.fromhex(hashlib.sha256(block_string).hexdigest())", "def hash_str(c, hash_length):\n if isinstance(c, float):\n if numpy.isnan(c):\n return c\n raise ValueError(f\"numpy.nan expected, not {c}\")\n m = hashlib.sha256()\n m.update(c.encode(\"utf-8\"))\n r = m.hexdigest()\n if len(r) >= hash_length:\n return r[:hash_length]\n return r", "def get_binary_sha256_hash(hash: str) -> str:\n result = \"\"\n\n for character in hash:\n character_number = int(character, base=16)\n binary_number = bin(character_number)\n # CAVEAT: each hash character is 4 bit size since SHA256 hash is hexidecimal string, so 4 * 64 = 256 bit\n formatted_binary_number = binary_number[2:].ljust(4, \"0\")\n result += formatted_binary_number\n\n return result", "def hash(self):\n return xxhash.xxh64(self._pwm_to_str(3)).hexdigest()", "def get_report_hash(self, consolidated):\n jsonstr = json.dumps(consolidated, sort_keys=True)\n hashobj = hashlib.sha1(jsonstr)\n hexval = hashobj.hexdigest()\n return hexval", "def hash_value(self, value):\n h = hashlib.sha256()\n h.update(str(value))\n return h.hexdigest()", "def _electrum_script_hash(script: bytes) -> str:\n bytes = bytearray(scripts.sha256(script))\n bytes.reverse()\n return bytes.hex()", "def __str__(self) -> str:\n return \"scapacity of hash: {}, current size of hash: {}\".format(\n self.capacity, self.length\n )", "def hashname(self):\n return hashlib.md5(self.name.encode('utf-8')).hexdigest()", "def digest(self):\n return self._parsed[\"digest\"]", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def digest(self):\n pass", "def digest(self):\n pass", "def digest(self):\n pass", "def get_hash(self):\r\n block_data = self.prev_hash\r\n block_data += bytearray(struct.pack(\"!f\", self.time))\r\n block_data += self.user_id.encode()\r\n block_data += self.signature.encode()\r\n block_data += self.choice.encode()\r\n\r\n digest = hashes.Hash(hashes.SHA256())\r\n digest.update(block_data)\r\n return digest.finalize()", "def hash(self):\n return self._hash", "def get_hash(self):\n return \"%03d_%03d_%03d\" % (self.chest_region, self.chest_type, self.feature_type)" ]
[ "0.8446957", "0.8241606", "0.8034233", "0.7728154", "0.7493876", "0.72516394", "0.7215683", "0.70375633", "0.6966864", "0.69554013", "0.6895401", "0.6893466", "0.6879853", "0.6794466", "0.6791125", "0.6765434", "0.67632526", "0.67488396", "0.6718266", "0.6668175", "0.6663794", "0.66539264", "0.6644642", "0.6637669", "0.6594874", "0.6577905", "0.6569104", "0.6555183", "0.6544478", "0.65430665", "0.65359366", "0.65169287", "0.6508787", "0.6494875", "0.6479136", "0.6471283", "0.6435679", "0.6427293", "0.6426841", "0.6388365", "0.63735783", "0.6364939", "0.6360892", "0.6339879", "0.6338865", "0.6338788", "0.6327831", "0.6303999", "0.6303999", "0.6300749", "0.62824214", "0.6272809", "0.625504", "0.62524664", "0.62521094", "0.624635", "0.62456083", "0.6238843", "0.6228027", "0.6220834", "0.6220344", "0.61983746", "0.6197756", "0.61862564", "0.61838925", "0.6138359", "0.6137509", "0.6129034", "0.6126266", "0.6122026", "0.6119209", "0.61167103", "0.61167103", "0.61167103", "0.61167103", "0.61167103", "0.61167103", "0.6097184", "0.609505", "0.60924184", "0.6091856", "0.6089345", "0.6089166", "0.60830104", "0.60790783", "0.6060079", "0.6056771", "0.6045045", "0.60434204", "0.6037772", "0.60299915", "0.60250294", "0.60174847", "0.60027176", "0.6001148", "0.6001148", "0.6001148", "0.59987515", "0.5973065", "0.59700006" ]
0.8364801
1
Return a copy (clone) of the hash object.
def copy(self): cp = self.__class__() # create a new instance of the subclass # copy current state to the new instance cp._acc = self._acc cp._seed = self._seed cp._secret = self._secret cp._last_stripe = self._last_stripe cp._total_length = self._total_length cp._buffer = self._buffer return cp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clone(self):\n memo = dict()\n c = self._clone(memo)\n c._clone_rip(memo)\n return c", "def clone(self):\n return self.copy()", "def clone(self):\n clone = super(LongObjectHashMap, self).clone()\n clone.clear()\n clone.initialize()\n for key in keySet():\n value = self.get(key)\n clone.put(key, value)\n return clone", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def copy(self):\n return self.__class__(dict(self))", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def clone(self):\n return copy.deepcopy(self)", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def clone(self):\n return shallow_clone(self)", "def copy(self):\n return pdict(dict.copy(self))", "def copy(self):\r\n return copy.deepcopy(self)", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def clone(self) -> Any:\n clone = super().clone()\n clone.clear()\n return clone", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\r\n return copy.copy(self)", "def get_mutable_clone(self, block_hash: Hash32) -> 'MutableSnapshot':\n return MutableSnapshot(\n signers=list(self.signers),\n block_hash=block_hash,\n votes=list(self.votes),\n tallies=self.tallies.copy()\n )", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def __copy__(self):\n return self.copy()", "def clone(self):\n return self", "def copy (self):\n import copy\n return copy.copy(self)", "def copy(self):\r\n new = WeakKeyIdentityDict()\r\n for key, value in self.data.items():\r\n o = key()\r\n if o is not None:\r\n new[o] = value\r\n return new", "def deepcopy(self):\n return copymod.deepcopy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def clone(self) -> Any:\n return cp.copy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n from copy import deepcopy\n return deepcopy(self)", "def __deepcopy__(self, memodict=None):\n return self.copy()", "def _copy_(self):\n return copy.copy(self)", "def deepcopy(self):\n return self.copy()", "def clone(self):\n return self.__class__(self.name, *self)", "def copy(self):\n \n return deepcopy(self)", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def copy(self):\n return object.__new__(type(self))", "def copy (self):\n return self.__class__(self.name, self[:])", "def copy(self):\n return self.update({})", "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def copy(self):\n out = type(self).__new__(self.__class__)\n out.__dict__.update(self.__dict__)\n # make sure the copy has its own unique random number generator\n seed_seq = self.rng._bit_generator._seed_seq.spawn(1)[0]\n out.__dict__['rng'] = get_generator(seed_seq)\n return out", "def clone(self):\n return None", "def deepcopy(self):\n return copy.deepcopy(self)", "def copy(self):\n return self.mutate().simple_copy()", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def clone(self):", "def __deepcopy__(self, memo):\n return self.copy()", "def copy(self):\n new = self.__class__()\n new.values = self.values.copy()\n return new", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def copy(self):\r\n # This way of initializing the copy means it works for subclasses, too.\r\n obj = self.__class__(self)\r\n obj.keyOrder = self.keyOrder[:]\r\n return obj", "def clone(self):\n raise NotImplementedError", "def copy(self):\n copy = Node(self.ident)\n for k, v in self.iteritems():\n copy[k] = v\n return copy", "def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def clone(self) -> Self:\n return clone(self, safe=True)", "def clone(self):\n return type(self)(iterator=self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n return defaultdict.copy(self)", "def copy(self):\n try:\n return self.__class__(self, copy=True)\n except TypeError:\n new = self.__class__(copy.deepcopy(self))\n return new", "def __deepcopy__(self, memo):\n copy = self.__class__()\n copy.wvalues = self.wvalues\n return copy", "def __deepcopy__(self, memo):\n from copy import deepcopy\n return self.__class__(deepcopy(self.items(), memo), self.strict)", "def copy(self):\n new = self\n return new" ]
[ "0.76727754", "0.7490999", "0.7473199", "0.74517614", "0.74517614", "0.74517614", "0.74517614", "0.74502987", "0.7433837", "0.74067515", "0.73838836", "0.72835857", "0.72783124", "0.72249395", "0.72157496", "0.7209674", "0.71724945", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71661943", "0.71578956", "0.7152161", "0.7149331", "0.7149331", "0.7149331", "0.7149331", "0.7131434", "0.7122499", "0.7122253", "0.7121962", "0.7113026", "0.71020484", "0.71020484", "0.71020484", "0.70913625", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.70887756", "0.7076383", "0.7065067", "0.70517206", "0.704203", "0.7015894", "0.7001071", "0.69918376", "0.69918376", "0.6991763", "0.6987039", "0.6982711", "0.6978443", "0.697732", "0.69640654", "0.6962972", "0.6957332", "0.69531053", "0.69319296", "0.6929266", "0.69226944", "0.6892067", "0.68893665", "0.68893665", "0.68738097", "0.68637514", "0.68635327", "0.6849049", "0.68306494", "0.6828243", "0.68109924", "0.6803843", "0.6803843", "0.6803843", "0.6795117", "0.67824984", "0.6772981", "0.67647326", "0.6763099" ]
0.0
-1
Prepare the batch export of tag definitions to GetResponse
def tag_export_batch(session, model_name, backend_id, domain=None, fields=None, delay=False, **kwargs): connector_env = get_environment(session, model_name, backend_id) # Get the exporter connector unit batch_exporter = connector_env.get_connector_unit(TagBatchExporter) # Start the batch export batch_exporter.batch_run(domain=domain, fields=fields, delay=delay, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_response_objects(self) -> list[JsonDict]:\n responses = []\n for feat_type, feat_name, _ in self.features:\n if feat_type.is_array():\n feat_name = cast(str, feat_name) # can only be string since it's an array type\n responses.append(SentinelHubRequest.output_response(feat_name, MimeType.TIFF))\n elif feat_type.is_meta():\n responses.append(SentinelHubRequest.output_response(\"userdata\", MimeType.JSON))\n else:\n # should not happen as features have already been validated\n raise ValueError(f\"{feat_type} not supported!\")\n\n return responses", "def get_objects(self) -> Response:\n tags = [tag for tag in request.args.get(\"tags\", \"\").split(\",\") if tag]\n # filter types\n types = [type_ for type_ in request.args.get(\"types\", \"\").split(\",\") if type_]\n\n try:\n tagged_objects = TagDAO.get_tagged_objects_for_tags(tags, types)\n result = [\n self.object_entity_response_schema.dump(tagged_object)\n for tagged_object in tagged_objects\n ]\n return self.response(200, result=result)\n except TagInvalidError as ex:\n return self.response_422(message=ex.normalized_messages())\n except TagCreateFailedError as ex:\n logger.error(\n \"Error creating model %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))", "def respuesta(response):\n for report in response.get('reports', []):\n\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get(\n 'metricHeader', {}).get('metricHeaderEntries', [])\n\n return_data = []\n\n for row in report.get('data', {}).get('rows', []):\n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n pipeline_insert = {}\n for header, dimension in zip(dimensionHeaders, dimensions):\n pipeline_insert[header] = dimension\n\n for i, values in enumerate(dateRangeValues):\n\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n pipeline_insert[metricHeader.get('name')] = value\n return_data.append(pipeline_insert)\n\n return return_data", "def get_auto_anno_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n languages = ['English','english']\n batch = Report.objects.filter(name=use_obj,language__in = languages).exclude(institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_auto_anno_PUBMED_batch_list(request):\n\n json_resp = {}\n usecase = request.GET.get('usecase')\n # print(usecase)\n languages = ['English', 'english']\n use_obj = UseCase.objects.get(name=usecase)\n json_resp['batch_list'] = []\n batch = Report.objects.filter(name=use_obj,language__in = languages,institute = 'PUBMED').values('batch')\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def _create_tag_response(response):\n if 'errortext' in response:\n if 'Unable to find resource by id' in response['errortext']:\n errors.invalid_resource_id()\n\n return {\n 'template_name_or_list': 'status.xml',\n 'response_type': 'CreateTagsResponse',\n 'return': 'true'\n }", "def all_responses(csv_links_response, csv_response, metadata_response):\n return", "def get_tag_batch(self, tag, filenames):\n data = self.get_tags_batch([tag], filenames)\n result = []\n for d in data:\n d.pop(\"SourceFile\")\n result.append(next(iter(d.values()), None))\n return result", "def archive_bag_api_messages(bags, bucket):\n for bag in bags:\n yield {\n 'type': 'Ingest',\n 'ingestType': {\n 'id': 'create',\n 'type': 'IngestType'\n },\n 'uploadUrl': f's3://{bucket}/{bag}'\n }", "def _describe_tags_response(response):\n return {\n 'template_name_or_list': 'tags.xml',\n 'response_type': 'DescribeTagsResponse',\n 'response': response['listtagsresponse']\n }", "def preprocess(self, requests):\r\n input_batch = None\r\n for idx, data in enumerate(requests):\r\n text = data.get(\"data\")\r\n if text is None:\r\n text = data.get(\"body\")\r\n input_text = text.decode('utf-8')\r\n\r\n ################input处理\r\n question = input_text\r\n entity = self.NER(question)\r\n print('your question:{}\\nentity:{}'.format(question,entity))\r\n ################处理完毕\r\n return [entity]", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def export_tags(self, filename):\n all_tags = self.sql_query(\"\"\"select from QueryResult\"\"\").node_objs\n\n query_results = {}\n for tag in all_tags:\n props = tag.get_props()\n results = tag.out()\n records = {}\n try:\n color = props['color'].copy()\n visibility = props['visibility'].copy()\n pinned = props['pinned']\n except KeyError:\n print('tag {} is incomplete and thus not exported'.format(tag.tag))\n continue\n for n in results:\n if isinstance(n, models.Neuron):\n try:\n morphology = [n for n in n.out('HasData') if isinstance(n, models.MorphologyData)][0]\n except IndexError:\n print('neuron {} not exported in tag {}'.format(n.name if n.uname is None else n.uname, tag.tag))\n continue\n records[n.uname] = {'type': 'Neuron',\n 'referenceId': n.referenceId,\n 'visible': visibility.pop(morphology._id, True),\n 'color': color.pop(morphology._id, [1.0, 0., 0.]),\n 'pinned': morphology._id in pinned}\n elif isinstance(n, (models.Synapse, models.InferredSynapse)):\n try:\n morphology = [n for n in n.out('HasData') if isinstance(n, models.MorphologyData)][0]\n except IndexError:\n print('synapse {} not exported in tag {}'.format(n.name if n.uname is None else n.uname, tag.tag))\n continue\n records[n.uname] = {'type': n.element_type,\n 'pre': n.in_('SendsTo')[0].referenceId,\n 'post': n.out('SendsTo')[0].referenceId,\n 'visible': visibility.pop(morphology._id, True),\n 'color': color.pop(morphology._id, [1.0, 0., 0.]),\n 'pinned': morphology._id in pinned}\n else:\n raise TypeError(\"type of record not understood: {}\".format(n.element_type))\n\n for n in list(visibility.keys()):\n if n.startswith('#'):\n visibility.pop(n)\n for n in list(color.keys()):\n if n.startswith('#'):\n color.pop(n)\n\n neuropils = {'visibility': visibility, 'color': color}\n query_results[tag.tag] = {'target': props['target'],\n 'camera': props['camera'],\n 'records': records,\n 'neuropils': neuropils}\n settings = props.get('settings', None)\n if settings is not None:\n query_results[tag.tag]['settings'] = settings\n\n with open(filename, 'w') as f:\n json.dump(query_results, f)", "def collect_outputs(self):\n # collect submex output hdf urls and add them to top mex outputs section\n top_mex = self.bqSession.fetchxml(self.options.mexURL, view='deep')\n outputTag = top_mex.xpath('/mex/tag[@name=\"outputs\"]')\n if not outputTag:\n # no \"outputs\" tag in mex => add it now\n etree.SubElement(top_mex, 'tag', name='outputs') \n top_mex = self.bqSession.postxml(url=top_mex.get('uri'), xml=top_mex, view='deep')\n outputTag = top_mex.xpath('/mex/tag[@name=\"outputs\"]')\n outputTag = outputTag[0]\n output_hdfs = top_mex.xpath('/mex/mex/tag[@name=\"outputs\"]/tag[@name=\"output_hdf\"]/@value')\n etree.SubElement(outputTag, 'tag', name='all_outputs', value=';'.join([ohdf.split('/')[-1] for ohdf in output_hdfs]))\n self.bqSession.postxml(url=outputTag.get('uri'), xml=outputTag)", "def generate(entities_to_proceed):\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"", "def get_response(self):\n res = IODWriteMultipleRes()\n for field in [\"seqNum\", \"ARUUID\", \"API\", \"slotNumber\",\n \"subslotNumber\", \"index\"]:\n res.setfieldval(field, self.getfieldval(field))\n\n # append all block response\n res_blocks = []\n for block in self.getfieldval(\"blocks\"):\n res_blocks.append(block.get_response())\n res.setfieldval(\"blocks\", res_blocks)\n return res", "def __load_tags(self) -> None:\n self.tags = TagHelper.TagHelper.generate_tag_object(self)\n self.tag_helper = TagHelper.TagHelper(self)\n self.tag_helper.fetch()", "def compute(self, batch: Dataset) -> List[TaggingResponse]: # type: ignore\n syntax_options: SyntaxOptions = assert_not_none(self.config.syntax)\n spacy_model = spacy.load(syntax_options.spacy_model)\n\n utterances = batch[self.config.columns.text_input]\n records: List[TaggingResponse] = []\n\n for utterance in utterances:\n tag: Dict[Tag, bool] = {\n smart_tag: False\n for family in [SmartTagFamily.extreme_length, SmartTagFamily.partial_syntax]\n for smart_tag in SMART_TAGS_FAMILY_MAPPING[family]\n }\n\n doc = spacy_model(clean_utterance(utterance))\n # Remove punctuation for word count and smart tags\n tokens = [token.text for token in doc if not token.is_punct]\n\n if len(tokens) >= syntax_options.long_utterance_min_word:\n tag[SmartTag.long] = True\n if len(tokens) <= syntax_options.short_utterance_max_word:\n tag[SmartTag.short] = True\n\n sub_toks = [tok for tok in doc if (tok.dep_ in syntax_options.subj_tags)]\n obj_toks = [tok for tok in doc if (tok.dep_ in syntax_options.obj_tags)]\n vrb_toks = [tok for tok in doc if (tok.pos_ in self.verb_tags)]\n if not sub_toks:\n tag[SmartTag.no_subj] = True\n if not obj_toks:\n tag[SmartTag.no_obj] = True\n if not vrb_toks:\n tag[SmartTag.no_verb] = True\n\n # Some issues occur with other languages such as french if using doc.sents directly.\n # Hence, we use an English sentencizer that seems to work better for similar languages.\n doc_sentencizer_en = self.spacy_sentencizer_en(clean_utterance(utterance))\n sentence_count = len(list(doc_sentencizer_en.sents))\n if sentence_count > 1:\n tag[SmartTag.multi_sent] = True\n\n adds = {DatasetColumn.word_count: len(tokens)}\n records.append(TaggingResponse(tags=tag, adds=adds))\n\n return records", "def after_parse(self, response):\n\n extraction_requests = []\n\n for container in response.xpath('//tr[@align=\"center\"]'):\n detail_url = container.xpath('./td[1]/a/ @href').extract()[0]\n\n l = BusinessLoader(selector=container, response=response)\n l.add_xpath('telephone', './td[1]/span/ text()')\n l.add_xpath('website', './td[2]/a/ @href')\n l.add_xpath('email', \"substring-after(./td[4]/a/ @href,'mailto:')\")\n l.add_xpath('legalName', './td[1]/a/ text()')\n item = l.load_item()\n\n log.msg('business details extracted from index: {0}'.format(item))\n\n extraction_requests.append(Request(url = urljoin(response.url, detail_url), meta={'item':item}, callback=self.extract))\n\n return extraction_requests", "def _extract_data(self, eopatch: EOPatch, responses: list[Any], shape: tuple[int, ...]) -> EOPatch:", "def get_all(context, namespace_name, session, filters=None, marker=None,\n limit=None, sort_key='created_at', sort_dir='desc'):\n\n namespace = namespace_api.get(context, namespace_name, session)\n query = (session.query(models.MetadefTag).filter_by(\n namespace_id=namespace['id']))\n\n marker_tag = None\n if marker is not None:\n marker_tag = _get(context, marker, session)\n\n sort_keys = ['created_at', 'id']\n sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys\n\n query = paginate_query(query=query,\n model=models.MetadefTag,\n limit=limit,\n sort_keys=sort_keys,\n marker=marker_tag, sort_dir=sort_dir)\n metadef_tag = query.all()\n metadef_tag_list = []\n for tag in metadef_tag:\n metadef_tag_list.append(tag.to_dict())\n\n return metadef_tag_list", "def parse(self, response):", "def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )", "def get(self, request):\n serializer = self.serializer_class(self.queryset.all(), many=True)\n return Response({'tags':serializer.data}, status=status.HTTP_200_OK)", "def tags():", "def _extract_data(self, eopatch: EOPatch, responses: list[Any], shape: tuple[int, ...]) -> EOPatch:\n if len(self.requested_bands) + len(self.requested_additional_bands) == 1:\n # if only one band is requested the response is not a tar so we reshape it\n only_band = (self.requested_bands + self.requested_additional_bands)[0]\n responses = [{only_band.name + \".tif\": image} for image in responses]\n\n if self.additional_data:\n self._extract_additional_features(eopatch, responses, shape)\n\n if self.bands_feature:\n self._extract_bands_feature(eopatch, responses, shape)\n\n return eopatch", "def create_auto_annotations(request): # post\n\n request_body_json = json.loads(request.body)\n usecase_list = request_body_json['usecase']\n fields_list = request_body_json['selected']\n report_key = request_body_json['report_type']\n batch = request_body_json['batch']\n\n # check existence of examode labels and concepts\n\n if report_key == 'reports':\n for usecase in usecase_list:\n fields = []\n if fields_list != {}:\n if usecase in fields_list.keys():\n fields = list(set(fields_list[usecase]))\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'r') as use_outfile:\n json_to_ret = json.load(use_outfile)\n json_to_ret['extract_fields'][usecase] = fields\n # print(json_to_ret)\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'), 'w') as use_outfile:\n json.dump(json_to_ret,use_outfile)\n\n # print(fields)\n\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n # output_concepts_dir = os.path.join(workpath, './sket/outputs')\n # for root, dirs, files in os.walk(output_concepts_dir):\n # for f in files:\n # os.unlink(os.path.join(root, f))\n # for d in dirs:\n # shutil.rmtree(os.path.join(root, d))\n\n bool_val,error = create_auto_gt_1(usecase,fields,report_key,batch)\n if bool_val == False:\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'r') as use_outfile:\n json_to_ret = json.load(use_outfile)\n json_to_ret['extract_fields'][usecase] = []\n # print(json_to_ret)\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json'),\n 'w') as use_outfile:\n json.dump(json_to_ret, use_outfile)\n json_resp = {'error': error}\n return JsonResponse(json_resp)\n\n elif report_key == 'pubmed':\n for usecase in usecase_list:\n fields = ['title','abstract']\n # workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n # output_concepts_dir = os.path.join(workpath, './sket/outputs')\n # for root, dirs, files in os.walk(output_concepts_dir):\n # for f in files:\n # os.unlink(os.path.join(root, f))\n # for d in dirs:\n # shutil.rmtree(os.path.join(root, d))\n\n bool_val, error = create_auto_gt_1(usecase, fields, report_key, batch)\n if bool_val == False:\n json_resp = {'error': error}\n return JsonResponse(json_resp)\n\n json_resp = {'msg':'ok'}\n return JsonResponse(json_resp)", "def gen_tag_addlist(self) -> tp.List[xml.TagAddList]:\r\n if not self.tag_adds:\r\n robot_config = self.main_config['ros']['robots'][self.robot]\r\n prefix = robot_config['prefix']\r\n model_base = robot_config['model']\r\n model_variant = robot_config.get('model_variant', '')\r\n\r\n if model_variant != '':\r\n model = f\"{model_base}_{model_variant}\"\r\n else:\r\n model = model_base\r\n\r\n desc_cmd = f\"$(find xacro)/xacro $(find {model_base}_description)/urdf/{model}.urdf.xacro\"\r\n for s in self.sizes:\r\n exp_adds = xml.TagAddList()\r\n pos_i = random.randint(0, len(self.positions) - 1)\r\n\r\n exp_adds.append(xml.TagAdd(\".\",\r\n \"master\",\r\n {},\r\n True))\r\n exp_adds.append(xml.TagAdd(\"./master\",\r\n \"group\",\r\n {\r\n 'ns': 'sierra'\r\n },\r\n False))\r\n exp_adds.append(xml.TagAdd(\"./master/group/[@ns='sierra']\",\r\n \"param\",\r\n {\r\n 'name': 'experiment/n_robots',\r\n 'value': str(s)\r\n },\r\n False))\r\n\r\n for i in range(0, s):\r\n\r\n ns = f'{prefix}{i}'\r\n pos = self.positions[pos_i]\r\n pos_i = (pos_i + 1) % len(self.positions)\r\n spawn_cmd_args = f\"-urdf -model {model}_{ns} -x {pos.x} -y {pos.y} -z {pos.z} -param robot_description\"\r\n\r\n exp_adds.append(xml.TagAdd(\"./robot\",\r\n \"group\",\r\n {\r\n 'ns': ns\r\n },\r\n True))\r\n\r\n exp_adds.append(xml.TagAdd(f\"./robot/group/[@ns='{ns}']\",\r\n \"param\",\r\n {\r\n \"name\": \"tf_prefix\",\r\n \"value\": ns\r\n },\r\n True))\r\n\r\n # These two tag adds are OK to use because:\r\n #\r\n # - All robots in Gazebo are created using spawn_model\r\n # initially.\r\n #\r\n # - All robots in Gazebo will provide a robot description\r\n # .urdf.xacro per ROS naming conventions\r\n exp_adds.append(xml.TagAdd(f\"./robot/group/[@ns='{ns}']\",\r\n \"param\",\r\n {\r\n \"name\": \"robot_description\",\r\n \"command\": desc_cmd\r\n },\r\n True))\r\n\r\n exp_adds.append(xml.TagAdd(f\"./robot/group/[@ns='{ns}']\",\r\n \"node\",\r\n {\r\n \"name\": \"spawn_urdf\",\r\n \"pkg\": \"gazebo_ros\",\r\n \"type\": \"spawn_model\",\r\n \"args\": spawn_cmd_args\r\n },\r\n True))\r\n\r\n self.tag_adds.append(exp_adds)\r\n\r\n return self.tag_adds", "def build_response(self, data_list):\n raise NotImplementedError(\"build_response method is not implemented.\")", "def get_tag_and_body(tagged_events):\n tag_events = []\n for tag_event in tagged_events:\n body = tag_event[1].get(\"body\")\n tb = {\"body\": body}\n tb[\"tags\"] = []\n tb[\"display_name\"] = tag_event[1].get(\"display_name\")\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tb[\"tags\"].append(tag)\n tag_events.append(tb)\n return tag_events", "def extract_and_tag_test():\n test_untagged_path = os.getcwd() + \"/data/test/test_untagged/\"\n test_untagged_directory = os.fsencode(test_untagged_path)\n\n print(\"Tagging text. Please wait...\")\n for file in os.listdir(test_untagged_directory):\n filename = os.fsdecode(file)\n try:\n if filename.endswith(\".txt\"):\n text = entity_process.read_data(test_untagged_path, file)\n text = text.lower()\n header,body = entity_process.split_text(text)\n header_array = header.splitlines()\n\n\n start_time, end_time = entity_process.extract_time(header)\n location = entity_process.extract_location(header_array, body)\n speaker = entity_process.extract_speaker(header_array, body)\n\n entity_tagger.tag_all(filename, text, start_time, end_time, location, speaker)\n except Exception as e:\n raise e\n return \"No files found here!\"\n print(\"Tagging complete! Text saved to\" + os.getcwd() + \"/out\")", "def test_export_wo_tags(self):\r\n self._get_good_request_wo_tags()\r\n\r\n res = self.app.get(\r\n '/api/v1/admin/bmarks/export?api_key={0}'.format(\r\n self.api_key),\r\n status=200)\r\n\r\n self.assertTrue(\r\n \"bmark.us\" in res.body,\r\n msg='Bmark is in the exported body: ' + res.body)\r\n data = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n 1,\r\n data['count'],\r\n \"Should be one result: \" + str(data['count']))", "def download_all_reports(request):\n\n request_body_json = json.loads(request.body)\n report_list = request_body_json['report_list']\n mode = request_body_json['format']\n action = request_body_json['action']\n annot = request_body_json['annotation_mode']\n\n if annot == 'Manual':\n annot = 'Human'\n elif annot == 'Automatic':\n annot = 'Robot'\n\n try:\n response = HttpResponse(content_type='text/csv')\n resp = download_report_gt(report_list, action, annot, mode, response)\n if mode == 'biocxml' or mode == 'biocjson':\n return HttpResponse(resp, content_type='application/xml')\n elif mode == 'csv':\n return resp\n elif mode == 'json':\n return JsonResponse(resp)\n\n except Exception as e:\n print(e)\n json_error = {'error': e}\n return JsonResponse(json_error)", "def get_events_batch() -> PayloadDictList:\n ...", "def __prepare_wsdl_objects(self):\r\n pass", "def simulate_response(self, documents):", "def pre_pipeline(self, results):\n results[\"img_prefix\"] = self.img_prefix\n results[\"seg_prefix\"] = self.seg_prefix\n results[\"proposal_file\"] = self.proposal_file\n results[\"bbox_fields\"] = []\n results[\"mask_fields\"] = []\n results[\"seg_fields\"] = []\n results[\"site_fields\"] = []\n results[\"label_fields\"] = []", "def response_helper(self, response, **kwargs):\n self.resolve_schema(response)\n if \"headers\" in response:\n for header in response[\"headers\"].values():\n self.resolve_schema(header)\n return response", "def parse_response(self):\n pass", "def bulk(self) -> None:\n helpers.bulk(self.client, self.gen_business_data(BUSINESS_FP))\n helpers.bulk(self.client, self.gen_review_data(REVIEW_FP))\n helpers.bulk(self.client, self.gen_tip_data(TIP_FP))", "def processed_bulk(self, pipeline):\n docs = [Document([], text=t) for t in EN_DOCS]\n return pipeline(docs)", "def build_parameters(self):\n for key in entity_map:\n if key in self.sample_frame:\n parameter = {\n \"id\": str(uuid.uuid4()),\n \"required\": True,\n \"name\": entity_map[key]['entity_type'],\n \"dataType\": \"@{}\".format(entity_map[key]['entity_type']),\n \"value\": \"${}\".format(entity_map[key]['entity_type']),\n \"isList\": False\n }\n self.frame['responses'][0]['parameters'].append(parameter)", "def on_get(self, req, resp):\n resp.set_header('Content-Type', 'text/json')\n tif_paths = encode.get_files_in_directory(DIARIES_TO_ENCODE_DIR, \".tif\")\n zip_paths = encode.get_files_in_directory(DIARIES_TO_ENCODE_DIR, \".zip\")\n diaries_paths = tif_paths + zip_paths\n def extract_file_name(path): return os.path.basename(path)\n resp.body = json.dumps({\"diaries\": list(map(extract_file_name, diaries_paths)),\n \"diaries_paths\": diaries_paths})", "def build_expected_bq_response(self):\n response = [\n { \n \"json\": {\n \"batch_id\": self.batch_id, \n \"metric\": {\n \"labels\": [\n {\n \"key\": \"response_code\", \n \"value\": \"0\"\n }\n ], \n \"type\": \"agent.googleapis.com/agent/request_count\"\n }, \n \"metric_kind\": \"DELTA\", \n \"point\": {\n \"interval\": {\n \"end_time\": \"2019-02-18T22:09:53.939194Z\", \n \"start_time\": \"2019-02-18T21:09:53.939194Z\"\n }, \n \"value\": {\n \"int64_value\": \"62\"\n }\n }, \n \"resource\": {\n \"labels\": [\n {\n \"key\": \"instance_id\", \n \"value\": \"9113659852587170607\"\n }, \n {\n \"key\": \"project_id\", \n \"value\": \"YOUR_PROJECT_ID\"\n }, \n {\n \"key\": \"zone\", \n \"value\": \"us-east4-a\"\n }\n ], \n \"type\": \"gce_instance\"\n }, \n \"value_type\": \"INT64\"\n }\n }, \n {\n \"json\": {\n \"batch_id\": self.batch_id, \n \"metric\": {\n \"labels\": [\n {\n \"key\": \"response_code\", \n \"value\": \"0\"\n }\n ], \n \"type\": \"agent.googleapis.com/agent/request_count\"\n }, \n \"metric_kind\": \"DELTA\", \n \"point\": {\n \"interval\": {\n \"end_time\": \"2019-02-18T21:09:53.939194Z\", \n \"start_time\": \"2019-02-18T20:09:53.939194Z\"\n }, \n \"value\": {\n \"int64_value\": \"61\"\n }\n }, \n \"resource\": {\n \"labels\": [\n {\n \"key\": \"instance_id\", \n \"value\": \"9113659852587170607\"\n }, \n {\n \"key\": \"project_id\", \n \"value\": \"YOUR_PROJECT_ID\"\n }, \n {\n \"key\": \"zone\", \n \"value\": \"us-east4-a\"\n }\n ], \n \"type\": \"gce_instance\"\n }, \n \"value_type\": \"INT64\"\n }\n }\n ]\n return response", "def test_tag_list(self):\n self.seed_static_data()\n params = {\n 'event_id': 1,\n 'language': 'en'\n }\n\n response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data[0]['id'], 1)\n self.assertEqual(data[0]['event_id'], 1)\n self.assertEqual(data[0]['tag_type'], 'RESPONSE')\n self.assertEqual(data[0]['name'], 'English Tag 1 Event 1')\n self.assertEqual(data[0]['description'], 'English Tag 1 Event 1 Description')\n self.assertEqual(data[1]['id'], 2)\n self.assertEqual(data[1]['event_id'], 1)\n self.assertEqual(data[1]['tag_type'], 'RESPONSE')\n self.assertEqual(data[1]['name'], 'English Tag 2 Event 1')\n self.assertEqual(data[1]['description'], 'English Tag 2 Event 1 Description')\n\n params = {\n 'event_id': 1,\n 'language': 'fr'\n }\n\n response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data[0]['id'], 1)\n self.assertEqual(data[0]['event_id'], 1)\n self.assertEqual(data[0]['tag_type'], 'RESPONSE')\n self.assertEqual(data[0]['name'], 'French Tag 1 Event 1')\n self.assertEqual(data[0]['description'], 'French Tag 1 Event 1 Description')\n self.assertEqual(data[1]['id'], 2)\n self.assertEqual(data[1]['event_id'], 1)\n self.assertEqual(data[1]['tag_type'], 'RESPONSE')\n self.assertEqual(data[1]['name'], 'French Tag 2 Event 1')\n self.assertEqual(data[1]['description'], 'French Tag 2 Event 1 Description')", "def on_get(self, req, resp):\n resp.set_header('Content-Type', 'text/json')\n diaries_paths = encode.get_files_in_directory(DIARIES_TO_CREATE_DIR, \".pdf\")\n\n def extract_file_name(path): return os.path.basename(path)\n resp.body = json.dumps({\"templates_file_names\": list(map(extract_file_name, diaries_paths)),\n \"templates_paths\": diaries_paths})", "def identify_datasets(self, language_objects, context):\n\n datasets, new_sen = self.extractor.extract_all_templates(\n language_objects, context\n )\n context[\"datasets\"] = datasets\n return {'type': 'result', 'result': (new_sen, context[\"datasets\"])}", "def content(tmp_loc, ref_names_dict, order):\n \n fl = '[Content_Types].xml'\n inp_path = '/'.join([tmp_loc, fl])\n out_path = '/'.join([output_path, fl])\n \n cnt_lst = []\n asset_lst = []\n def_att = []\n d = dict()\n \n root1,tree1 = gen_tree(inp_path)\n root2,tree2 = gen_tree(out_path)\n \n # get all the extensions belongs to \"Default\" tag\n for relation in root2:\n if 'Default' in relation.tag:\n def_att.append(relation.attrib['Extension'])\n else:\n break\n \n for relation in root1:\n if 'Override' in relation.tag:\n attrib = relation.attrib['PartName'][1:]\n try:\n cnt = attrib.split('ppt/')[-1]\n ini = '/ppt/'\n except:\n cnt = attrib\n ini = '/'\n if cnt in ref_names_dict.keys():\n relation.attrib['PartName'] = f'{ini}{ref_names_dict[cnt]}'\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['PartName'])\n else:\n cnt_lst.append(relation)\n if relation.attrib['PartName'] not in asset_lst:\n asset_lst.append(relation.attrib['PartName'])\n else:\n attrib = relation.attrib['Extension']\n if attrib not in def_att:\n cnt_lst.append(relation)\n # asset_lst.append(relation.attrib['Extension'])\n # deal with the assest_lst\n # print(\"AA: \", asset_lst)\n cnt_lst = natsort.natsorted(cnt_lst)\n for ele in cnt_lst:\n prev = tree2.find(ele.tag)\n prev.addnext(ele)\n \n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n unq_attr = []\n for relation in root2:\n if 'Override' in relation.tag:\n if relation.attrib['PartName'] not in unq_attr:\n unq_attr.append(relation.attrib['PartName'])\n else:\n root2.remove(relation)\n tree2.write(out_path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)", "def convert(self, tag=\"Data\", delimiter=\",\", noheader=False,\n limit=-1, buffer_size=1000):\n\n\n file_ctr = 0\n item_ctr = 0\n for dirName, subdirList, fileList in os.walk(self.input_directory):\n print('Found directory: %s' % dirName)\n for fname in fileList:\n print('\\t%s' % fname)\n # open the xml file for iteration\n if not fname.endswith(\".xml\"):\n continue\n #pdb.set_trace()\n \n input_file = dirName + \"/\" + fname\n self.context = ETree.iterparse(input_file, events=(\"start\", \"end\"))\n\n # iterate through the xml\n items = [{}]\n\n depth = 0\n min_depth = 0\n row_depth = -1\n n = 0\n for event, elem in self.context:\n if event == \"start\":\n depth += 1\n continue\n else:\n depth -= 1\n if depth < min_depth:\n min_depth = depth\n\n if depth < row_depth and items:\n if noheader:\n noheader = False\n else:\n # new line\n self.output_buffer.append(items)\n items = []\n # flush buffer to disk\n if len(self.output_buffer) > buffer_size:\n self._write_buffer(delimiter)\n\n plain_tag = elem.tag\n last_delim = max(elem.tag.rfind('}'), elem.tag.rfind(':'))\n if 0 < last_delim < len(elem.tag) - 1:\n plain_tag = elem.tag[last_delim + 1:]\n if tag == plain_tag:\n if n == 0:\n min_depth = depth\n elif n == 1:\n row_depth = min_depth\n n += 1\n if 0 < limit < n:\n break\n elem_name = elem.get(\"name\")\n if elem_name in self.output_dict[0].keys():\n if elem_name == 'SamS.ArchivedURL':\n if hash(elem.text) in self.item_titles.keys() and self.item_titles[hash(elem.text)] == elem.text:\n #item is repetative\n self.output_dict[item_ctr]={}\n #item_ctr-=1\n break\n else:\n self.item_titles[hash(elem.text)] = elem.text\n self.output_dict[item_ctr][elem_name]= elem.text and elem.text.encode('utf8') or ''\n\n #if (len(self.output_dict[item_ctr]) > 0 ) :\n if ('SamS.ArchivedURL' in self.output_dict[item_ctr]):\n item_ctr+=1\n self.output_dict.append({})\n else:\n self.output_dict[item_ctr] = {}\n \n file_ctr+=1 #next row in the dictionary array\n print \"processing file no \", file_ctr, \" item no\", item_ctr\n\n #pdb.set_trace()\n self._write_buffer(delimiter) # write rest of the buffer to file\n\n return n", "def get_response(self, response, pack):\n\n pass", "def parse_response(response):\n data = []\n \n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n \n row_count = 0 \n for row in rows:\n #print '\\n\\n', 'ROW_COUNT: ', row_count, '\\n'\n data.append({}) \n\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n #print header + ': ' + dimension\n data[row_count][header[3:]] = dimension\n \n for i, values in enumerate(dateRangeValues):\n #print 'Date range (' + str(i) + ')'\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n #print metricHeader.get('name') + ': ' + value\n data[row_count][metricHeader.get('name')[3:]] = value\n \n row_count += 1 \n \n return data", "def cleanup(self):\n self.result.extend(self.endTagList)", "def setup_response_collector(self):\n pass", "def parse(self, response):\n return super().parse(response)", "def parse(self, response):\n return super().parse(response)", "def _postprocess(self, responses):\n for idx, response in enumerate(responses):\n responses[idx] = {'id': response[0],\n 'text': self.target_test[response[0]]}\n\n for jdx, score in enumerate(response[1:]):\n responses[idx]['score_' + str(jdx)] = response[1:][jdx]\n\n return responses", "def process_response(self, response: Dict) -> Iterator[dict]:", "def post_process_result(self, result: np.ndarray) -> np.ndarray:\n to_cut = len(\"_tag\")\n return np.asarray([[tag[:-to_cut] for tag in list_of_tags] for list_of_tags in result])", "def save_tags(context):\n items = context.response.json()['items']\n tags = set()\n for item in items:\n for tag in item['tags']:\n tags.add(tag)\n context.tags = list(tags)\n logging.debug('Saved all tags in context.tags:\\n%s', pformat(sorted(context.tags)))", "def getTags(number=None):", "def postprocess(self, inference_output):\n ret = []\n quantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n # for each request\n for inference_output_request in inference_output:\n ret_request = []\n # for each time series\n for i in inference_output_request:\n l = {}\n l[\"item_id\"] = i.item_id\n l[\"quantiles\"] = {}\n for q in quantiles:\n l[\"quantiles\"][str(q)] = i.quantile(q).tolist()\n l[\"mean\"] = i.mean.tolist()\n ret_request.append(json.dumps(l))\n ret.append('\\n'.join(ret_request) + '\\n')\n return ret", "def tag_extract_result(self, request, result):\n return request, map(lambda x: util.build_recursive(x[0], suppress=[]), \n result)", "def to_requests(self):\n pass", "def process_multi_body_format(commands):", "def _prepare_wsdl_objects(self):\r\n self.DeletionControlType = self.client.factory.create('DeletionControlType')\r\n self.TrackingId = self.client.factory.create('TrackingId')\r\n self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')", "def process_metadata(self):\n\n self._responses = self._get_responses()\n\n (\n self._request_body_parameter,\n self._request_body_class,\n self._request_body_content_types,\n ) = self._get_request_body_parameter()\n\n if self._request_body_content_types is None:\n self._request_body_content_types = [\"application/json\"]\n\n self._request_body_file_type = self._get_request_body_file_type()\n if self._request_body_parameter is not None and self._request_body_file_type is not None:\n raise TypeError(\"An endpoint cannot accept both a file and a model\")\n\n self._query_parameters = dict(self._get_query_string_parameters())\n self._path_parameters = dict(self._get_path_parameters())\n\n self._security = [*self._get_security_requirements()]\n self._tags = [*self._get_tags()]", "def _generate_entities(data):\n\n i = 0\n while i < len(data):\n release_date = datetime.strptime(data[i].text, '%m/%d/%Y')\n release_date = release_date.strftime('%Y-%m-%d')\n name = data[i+1].text\n url = data[i+1].find_element_by_tag_name('a').get_attribute(\"/href\")\n\n href = data[i+2].find_element_by_tag_name('a').get_attribute(\"/href\")\n related = []\n if href:\n doc = BeautifulSoup(helpers.fetch_string(href), \"html.parser\")\n tds = doc.find_all(\"td\", class_='ms-vb')\n for td in tds:\n try:\n related.append(td.find('a')['href'])\n except AttributeError:\n pass\n \n related_documents = ' '.join(related) \n fields = [{\"name\": \"Release date\", \"value\": release_date},\n {\"tag\": \"url\", \"value\": url},\n {\"name\": \"Related documents\", \"value\": related_documents}]\n i += 3\n\n my_id = helpers.make_id(name)\n if len(my_id) > 99:\n my_id = my_id[:99]\n\n entity = {\n \"_meta\": {\n \"id\": my_id,\n \"entity_type\": \"company\"\n },\n \"fields\": fields,\n \"name\": name,\n }\n\n helpers.emit(entity)", "def teardown(self):\n self.bqSession.update_mex('Returning results...')\n log.debug('Returning results...')\n tag_list = self.model_xml.xpath('tag[@name=\"Classes\"]/tag[@value=\"%s\"]'%str(self.class_number))[0]\n \n outputTag = etree.Element('tag', name='outputs')\n outputSubTag = etree.SubElement(outputTag, 'tag', name='summary')\n \n if self.options.model_url:\n etree.SubElement(outputSubTag, 'tag',name='Model File', value=self.options.model_url, type='url')\n else:\n etree.SubElement(outputSubTag, 'tag',name='Model File', value='Internal Model File')\n \n etree.SubElement(outputSubTag, 'tag',name='Class', value=str(self.class_number))\n \n query = []\n for t in tag_list:\n etree.SubElement(outputSubTag, 'tag', name=t.attrib['name'], value=t.attrib['value'])\n query.append('\"%s\":\"%s\"'%( t.attrib['name'], t.attrib['value']))\n query = ' & '.join(query)\n etree.SubElement( outputSubTag, 'tag', name='confidence', value=str(self.confidence))\n\n etree.SubElement(outputTag, 'tag', name='similar_images', value=query, type='browser')\n\n self.bqSession.finish_mex(tags = [outputTag])\n log.debug('FINISHED')\n self.bqSession.close()", "def parse(self, response, **kwargs):\n title = response.xpath('//*[@id=\"wrap\"]/h1/text()').extract_first()\n if title:\n url_to_full_version = response._get_url()\n first_160 = ''.join(response.xpath('//*[@id=\"woe\"]/section/div/p/text()').extract())[:160]\n base_date = response.xpath('//*[@id=\"wrap\"]/div/div[2]/text()').extract_first()\n date_formatted = conf.exec_func_chain(base_date,\n [conf.clean_records_regex,\n lambda v: v[0:-2],\n lambda v: conf.parse_dtts(v, '%b %d, %Y')])\n\n tags = response.xpath('//*[@id=\"woe\"]/section[3]/div/div/a/text()').extract()\n authors_section = response.xpath('//*[@id=\"wrap\"]/div/div[1]/div/span/a')\n for row in authors_section:\n full_author_url = Selector(text=row.extract()).xpath('///@href') \\\n .extract_first()\n author_fullname = conf.clean_records_regex(\n Selector(text=row.extract()).xpath('///span/text()').extract_first())\n if date_formatted >= conf.crawl_date[0].get('LastExecutionDate'):\n conf.write_data_append('articles.json', json.dumps({'title': title,\n 'urlFullVersion': url_to_full_version,\n 'first160': first_160,\n 'dateFormatted': date_formatted,\n 'tags': tags,\n 'authorUrl': f\"{conf.gd_base_url}\"\n f\"{full_author_url}\",\n 'authorName': author_fullname,\n 'author_key': full_author_url.rsplit('/')[-2]\n }))", "def _init_sequences(self, resp: Response) -> List[Dict[str, str]]:\n\n time_token = build_time_token(arrow.utcnow(), UPDATE_INTERVAL)\n\n sequences_data = []\n for sequence in resp.json()['value']:\n self._logger.info(self._thread_name + \" sequence:\\n\" + str(json.dumps(sequence)))\n prefix = self._url_sequences + \"/\" + sequence[\"sequenceId\"] + \"/data\"\n\n sequence_name = sequence[\"name\"]\n data = {\"valueType\": sequence_name, \"time\": time_token}\n if sequence_name == \"LAeq\" or sequence_name == \"LCeq\":\n data[\"url_prefix\"] = prefix + \"/single\"\n elif sequence_name == \"Annoyance\":\n data[\"url_prefix\"] = prefix + \"/single\"\n\n # min2_ago_in_seconds = arrow.utcnow() - timedelta(seconds=120)\n # data[\"time\"] = build_time_token(min2_ago_in_seconds, 60)\n\n min10_ago_in_seconds = arrow.utcnow() - timedelta(seconds=600)\n data[\"time\"] = build_time_token(min10_ago_in_seconds, 60)\n elif sequence_name == \"Avg5minLAeq\":\n data[\"url_prefix\"] = prefix + \"/single\"\n\n min10_ago_in_seconds = arrow.utcnow() - timedelta(seconds=600)\n data[\"time\"] = build_time_token(min10_ago_in_seconds, MIN5_IN_SECONDS)\n elif sequence_name == \"CPBLZeq\":\n data[\"url_prefix\"] = prefix + \"/array\"\n else:\n self._logger.info(sequence_name + \" not yet integrated!\")\n continue\n\n sequences_data.append(data)\n\n return sequences_data", "def task_fetch_tag_posts_and_comments(\n tag_name,\n count=100,\n posts_out='data/tag_posts_data.xlsx',\n comments_out='data/tag_comments_data.xlsx'):\n\n # Create query instances for posts and comments\n post_query = Query(TagPostParser)\n comment_query = Query(CommentParser)\n\n # Query posts data\n post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {\n \"tag_name\": tag_name,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)\n\n # Query comments data of posts\n comment_data = []\n for i, post in enumerate(post_data):\n logger.info(\"Get comment of %d %s\" % (i, post['short_code']))\n comment_data_of_one_post = comment_query.query_all(COMMENTS_QUERY_HASH_PARAM, {\n \"shortcode\": post['short_code'],\n \"first\": 50,\n }, 100)\n for comment in comment_data_of_one_post:\n comment['post_short_code'] = post['short_code']\n comment_data.extend(comment_data_of_one_post)\n logger.info(\"Count of comment_data: %d\" % len(comment_data))\n\n # Save the comments data\n comment_data_df = pd.DataFrame(comment_data)\n comment_data_df.to_excel(comments_out, encoding='utf-8', index=False)\n logger.info(\"Save the comments data to %s.\" % comments_out)", "def gen_report(self):\n # type: () -> Optional[List[Dict[AnyStr, Any]]]\n if isinstance(self.report_count, int) and self.report_count <= 0:\n return None\n if self.bbox is None or self.true is None:\n return None\n # flatten batches/samples\n pack = list(zip(*[(*pack,)\n for packs in zip(self.bbox, self.true, *self.meta.values())\n for pack in zip(*packs)]))\n data = {key: val for key, val in zip([\"detect\", \"target\", *self.meta.keys()], pack)}\n assert all([len(val) == len(data[\"target\"]) for val in data.values()]), \"messed up unpacking\"\n\n # flatten targets per batches/samples\n all_targets = []\n sample_count = len(data[\"target\"])\n for sample_idx in range(sample_count):\n if isinstance(self.report_count, int) and self.report_count >= len(all_targets):\n logger.warning(f\"report max count {self.report_count} reached at {len(all_targets)} targets \"\n f\"(sample {sample_idx}/{sample_count} processed)\")\n break\n sample_targets = data[\"target\"][sample_idx]\n sample_detects = data[\"detect\"][sample_idx]\n sample_report = self.group_bbox(sample_targets, sample_detects)\n for target in sample_report:\n for k in self.meta:\n target[k] = self.meta[k][sample_idx]\n target[\"target\"] = {\n \"bbox\": target[\"target\"],\n \"class_name\": self.class_names[target[\"target\"].class_id]\n }\n all_targets.extend(sample_report)\n # format everything nicely as json\n for item in all_targets:\n if isinstance(item[\"target\"][\"bbox\"], BoundingBox):\n item[\"target\"].update(item[\"target\"].pop(\"bbox\").json())\n item[\"target\"][\"class_name\"] = self.class_names[item[\"target\"][\"class_id\"]]\n for det in item[\"detect\"]:\n if isinstance(det[\"bbox\"], BoundingBox):\n det.update(det.pop(\"bbox\").json())\n det[\"class_name\"] = self.class_names[det[\"class_id\"]]\n return all_targets", "def setup():\n global definitions\n\n # fully replace dict with AttrDict\n complete = AttrDict({})\n for k, v in definitions.items():\n data = AttrDict({})\n data.create = v['create'] if 'create' in v else 'create_' + v['stem']\n data.fetch = v['fetch'] if 'fetch' in v else v['stem'] + 's'\n data.destroy = v['destroy'] if 'destroy' in v else 'delete'\n complete[k] = data\n definitions = complete\n\n global _existing\n for r in definitions.keys():\n _existing[r] = _fetch(r)", "def _prepare_wsdl_objects(self):\r\n self.TrackPackageIdentifier = self.client.factory.create('TrackPackageIdentifier')\r\n # Default to tracking number.\r\n self.TrackPackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def boldExtract(genera):\r\n # Prepare Web Service Endpoint for BOLD's Public Data Portal API\r\n # Appending BOLD's base URL to each genera from the NSR list\r\n base_url = 'http://v4.boldsystems.org/index.php/API_Public/combined?taxon='\r\n source_urls = list(map(lambda x: \"{}{}{}\".\r\n format(base_url, x, '&format=tsv'), genera))\r\n\r\n # Download sequence data from BOLD using list of url's\r\n print('Beginning sequence data retrieval...')\r\n counter = 0\r\n for url in source_urls:\r\n r = http.request('GET', url)\r\n name = genera[counter]\r\n counter += 1\r\n with open(args.outdir1+\"/\"+name+\".tsv\", \"wb\") as fcont:\r\n fcont.write(r.data)", "def retrieve_all (self, user, pwd, vector_file, tiles, product, startdate, enddate, cloud_max) :\n q_param = (SciHubMetadataExtractor.\n __compose_q_param(vector_file,tiles, product,startdate,enddate,cloud_max))\n if (q_param=='') :\n print (\"ERROR: can't compose query string\")\n return list()\n\n start = 0\n list_result = list()\n while True :\n query_base = SciHubMetadataExtractor.base_url\n query_base+='&start='+str(start) + '&rows='+str(SciHubMetadataExtractor.page_num)\n r = requests.post(query_base,{\"q\":q_param},auth=(user,pwd))\n if (r.status_code!=200) :\n print ('ERROR: ' + str(r.status_code))\n return ''\n json_response = json.loads(r.text)\n total = int(json_response[\"feed\"][\"opensearch:totalResults\"])\n if (total == 0) :\n return list_result\n \n raw_entities = json_response[\"feed\"][\"entry\"]\n if total == 1:\n t = list()\n t.append(raw_entities)\n raw_entities = t.copy()\n\n for re in raw_entities :\n list_result.append(SciHubMetadataExtractor.__convert_raw_entity(re)) \n \n if (start + SciHubMetadataExtractor.page_num >= total) :\n break\n else :\n start+=SciHubMetadataExtractor.page_num\n \n return list_result", "def extract():\n queries = querylist_builder()\n \n pathlib.Path('/tmp/street_data').mkdir(parents=True, exist_ok=True) \n for i,q in enumerate(queries):\n print(\"running extract query\")\n url = ENDPOINT + \"?CommandData=\" + q\n print(url)\n r = requests.get(url)\n text_file = open(\"/tmp/street_data/\" + str(i) + \".xml\", 'w')\n data = r.text\n print(data)\n text_file.write(data) \n print(\"data saved for {}\".format(str(i)))\n text_file.close()", "def get_tags_batch(self, tags, filenames):\n # Explicitly ruling out strings here because passing in a\n # string would lead to strange and hard-to-find errors\n if isinstance(tags, basestring):\n raise TypeError(\"The argument 'tags' must be \"\n \"an iterable of strings\")\n if isinstance(filenames, basestring):\n raise TypeError(\"The argument 'filenames' must be \"\n \"an iterable of strings\")\n params = [\"-\" + t for t in tags]\n params.extend(filenames)\n return self.execute_json(*params)", "def do_tag(self, project_id):\n page_token = None\n more_results = True\n while more_results:\n try:\n response = self.bigquery.datasets().list(\n projectId=project_id, pageToken=page_token).execute()\n except errors.HttpError as e:\n logging.error(e)\n return\n if 'datasets' in response:\n for dataset in response['datasets']:\n self.tag_one_dataset(dataset)\n table_page_token = None\n table_more_results = True\n while table_more_results:\n tresponse = self.bigquery.tables().list(\n projectId=project_id,\n datasetId=dataset['datasetReference']['datasetId'],\n pageToken=table_page_token).execute()\n if 'tables' in tresponse:\n for t in tresponse['tables']:\n t['location'] = dataset['location']\n self.tag_one_table(t)\n if 'nextPageToken' in tresponse:\n table_page_token = tresponse['nextPageToken']\n table_more_results = True\n else:\n table_more_results = False\n if 'nextPageToken' in response:\n page_token = response['nextPageToken']\n else:\n more_results = False", "def download_batch_respones(self, response_body_url: str,\n keep_files: bool = False) -> List[str]:\n raise NotImplementedError", "def write_response(response):\n logging.info(\"Parsing analytics response...\")\n\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n\n pages = {'popular': []}\n\n for row in report.get('data', {}).get('rows', []):\n dimensions = row.get('dimensions', [])\n metrics = row.get('metrics', [])\n metricValues = []\n if metrics:\n metricValues = metrics[0]['values']\n\n for header, dimension in list(zip(dimensionHeaders, dimensions)):\n if dimension == '/' or dimension.startswith('/categories') or dimension.startswith('/series') or dimension.startswith('/about'):\n continue\n all_metrics = zip(metricHeaders, metricValues) \n page_views_metric = next((metricValue for metricHeader, metricValue in all_metrics if metricHeader['name'] == 'ga:pageviews'), None)\n \n pages['popular'].append({\n 'path': dimension,\n 'views': int(page_views_metric)\n })\n\n logging.info(\"Writing response...\")\n if OUTPUT_FORMAT == 'json':\n write_json_response(pages)\n elif OUTPUT_FORMAT == 'yaml' or OUTPUT_FORMAT == 'yml':\n write_yaml_response(pages)\n else:\n print(pages)", "def setup_response(self, system, location, definition, descriptor):\r\n pass", "def getDataBatch(self, batch_size):\n for i in range(batch_size):\n params.offset = params.offset+i #increment by 1 for the next set of batch\n url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json'\n url_params = {'q': self.args.query.replace(' ', '+'),'api-key': self.args.api_key,'page': params.offset}\n response = requests.get(url, params=url_params)\n r = response.json()\n\n #start by checking call was successful\n if response.ok:\n if r['status'] != 'OK':\n log.error(\"Error with API call, NYT status not ok\")\n return None\n\n # TODO: implement - this dummy implementation returns one batch of data\n list_of_art = []\n for art in r['response']['docs']:\n list_of_art.append(functions.flatten_json(art)) #attach to list returned in call\n yield list_of_art\n else:\n log.error(\"Error during API call on request side\")", "def create_entities(self, entity_type):\n data = self.read_file(entity_type)\n base_url = data['url']\n for entity in data['entities']:\n url = base_url + entity['url']\n for data in entity['entities']:\n r = requests.post(url, json.dumps(data))\n print(r.text)", "def complete_xml_parsing(self):\n for item in self.entities:\n item.severity = self.parsed_severity\n item.cwes.extend(self.parsed_cwes)\n item.advisory_id = self.parsed_advisory_id\n item.attack_vector = self.parsed_attack_vector\n if self.parsed_cvss_base != '' and is_correct_score(self.parsed_cvss_base):\n cvss_v3 = CvssV3(base_sc=self.parsed_cvss_base)\n if self.parsed_cvss_temporal != '' \\\n and is_correct_score(self.parsed_cvss_temporal):\n cvss_v3.temporal_sc = self.parsed_cvss_temporal\n item.cvss_v3 = cvss_v3\n item.cvss_base_sc_v3 = self.parsed_cvss_base\n item.cvss_temporal_score_v3 = self.parsed_cvss_temporal\n item.published = self.parsed_date", "def task_fetch_tag_posts(\n tag_name,\n count=100,\n posts_out='data/tag_posts_data.xlsx'):\n # Create query instances for posts\n post_query = Query(TagPostParser)\n\n # Query posts data\n post_data = post_query.query_all(TAG_POSTS_QUERY_HASH_PARAM, {\n \"tag_name\": tag_name,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)", "def build_tags_cache(self):\n tags_fetch_success = False\n tags_by_arn_cache = {}\n get_resources_paginator = resource_tagging_client.get_paginator(\"get_resources\")\n\n try:\n for page in get_resources_paginator.paginate(\n ResourceTypeFilters=[GET_RESOURCES_LAMBDA_FILTER], ResourcesPerPage=100\n ):\n send_forwarder_internal_metrics(\"get_resources_api_calls\")\n page_tags_by_arn = parse_get_resources_response_for_tags_by_arn(page)\n tags_by_arn_cache.update(page_tags_by_arn)\n tags_fetch_success = True\n\n except ClientError as e:\n logger.exception(\n \"Encountered a ClientError when trying to fetch tags. You may need to give \"\n \"this Lambda's role the 'tag:GetResources' permission\"\n )\n additional_tags = [\n f\"http_status_code:{e.response['ResponseMetadata']['HTTPStatusCode']}\"\n ]\n send_forwarder_internal_metrics(\n \"client_error\", additional_tags=additional_tags\n )\n tags_fetch_success = False\n\n logger.debug(\n \"Built this tags cache from GetResources API calls: %s\", tags_by_arn_cache\n )\n\n return tags_fetch_success, tags_by_arn_cache", "def process_documents(session, endpoint, docs, id_map):\n for doc in docs:\n original_asset = doc['asset']\n\n if original_asset['name'] == '' or original_asset['name'] is None:\n LOG.warn('Skipping asset {} with empty name'.format(original_asset['id']))\n\n asset = {}\n asset.update(original_asset)\n del asset['id'] # since it is going to be different\n report = {'source_id': original_asset['id'], 'type': 'upload'}\n\n dest_id = id_map.get(original_asset['id'])\n\n already_exists = dest_id is not None\n if already_exists:\n url = endpoint + dest_id + '/'\n r = session.get(url)\n if r.status_code == 404:\n already_exists = False\n LOG.warn('asset {} not found (original id {})'.format(\n dest_id, original_asset['id']))\n\n if already_exists:\n report['method'] = 'PUT'\n report['url'] = url\n r = session.put(url, json=asset)\n else:\n report['method'] = 'POST'\n r = session.post(endpoint, json=asset)\n\n try:\n r.raise_for_status()\n except requests.HTTPError:\n LOG.error('Saving asset failed: %s', r.content)\n LOG.error('Original asset: %s', asset)\n report['error'] = r.content\n yield report\n continue\n\n response = r.json()\n LOG.info('Saved asset: %s as %s', original_asset['id'], response['id'])\n report['dest_id'] = response['id']\n yield report", "def _pullbundle2extraprepare(pullop, kwargs):", "def gen_multi_v0(self, namespace, count):\n conn = self.pick_conn()\n retries = self.max_retries\n url = \"/v0/gen?ns=%s&count=%d\" % (namespace, count)\n while 1:\n try:\n r = conn.request(\"GET\", url)\n content = r.data\n assert r.status == 200, \"http status(%d) != 200 : %s\" % (\n r.status, content\n )\n return [int(i) for i in content.split(\",\")]\n except Exception as e:\n logger.warn(\"%s %s %s\", conn, url, e)\n conn = self.pick_conn(new=True)\n retries -= 1\n if retries < 0:\n raise", "def register_requests(fn):\n @wraps(fn)\n def inner(self, *args, **kwargs):\n if not self._was_setup_called:\n self.dm_setup()\n\n response = self._get_response(args, kwargs)\n response_id = self._get_response_id(response)\n response.meta['__id'] = response_id\n\n result = fn(self, *args, **kwargs)\n if not result:\n return\n\n # Save original type to return the same results from ``fn``\n original_type = type(result)\n\n if isinstance(result, Request):\n result = [result]\n\n request_list = []\n for r in result:\n if isinstance(r, Request):\n r = self._add_identifiers_to_request(r, response_id)\n self._increase_counter(response)\n\n request_list.append(r)\n\n if original_type in (list, types.GeneratorType):\n return request_list\n else:\n return request_list[0]\n\n return inner", "def prepare(self):\n for scenario_result, scenario_pass, case_pass in self.iterate():\n for step_result in scenario_result.step_results:\n step_pass = step_result.success\n url, method = step_result.fetch.url, step_result.fetch.method\n params = step_result.fetch.kwargs.get(\"params\")\n method_report = self.get_method_report(url, method)\n if method_report:\n method_report.add(\n case_pass, scenario_pass, step_pass, params\n )", "def add_responses(self, response):\n self.responses = self.responses.union(set(response) if type(response) is not set else response)\n # return Post(self.title, self.timestamp, self.subject, self.content, self.resto,\n # self.responses.union(set(response) if type(response) is not set else response))", "def tags(self, request, tag_list, group):\n return tag_list", "def artifact_get_prep_req(user_id, artifact_ids):\n samples = {}\n\n for aid in sorted(artifact_ids):\n artifact = Artifact(aid)\n access_error = check_access(artifact.study.id, user_id)\n if access_error:\n return access_error\n\n samples[aid] = list(chain(\n *[sorted(pt.keys()) for pt in Artifact(aid).prep_templates]))\n\n return {'status': 'success', 'msg': '', 'data': samples}", "def prepare_data(self):", "def _transform_data(self):\n parser = self.parse_xml(self._data_src, 'Chemical')\n for chemical in parser:\n if 'displayName' not in chemical.attrib:\n continue\n\n # initial setup and get label\n display_name = chemical.attrib['displayName']\n if not display_name or not re.search(TAGS_REGEX, display_name):\n continue\n label = re.sub(TAGS_REGEX, '', display_name)\n params = {\n 'label': label\n }\n\n # get concept ID\n reg_no = chemical.find('NumberList').find(\"CASRegistryNumber\")\n if not reg_no:\n continue\n params['concept_id'] = \\\n f'{NamespacePrefix.CASREGISTRY.value}:{reg_no.text}'\n\n # get aliases\n aliases = []\n label_l = label.lower()\n name_list = chemical.find('NameList')\n if name_list:\n for name in name_list.findall('NameOfSubstance'):\n text = name.text\n if text != display_name and text.lower() != label_l:\n aliases.append(re.sub(TAGS_REGEX, '', text))\n params['aliases'] = aliases\n\n # get xrefs and associated_with\n params['xrefs'] = []\n params['associated_with'] = []\n locator_list = chemical.find('LocatorList')\n if locator_list:\n for loc in locator_list.findall('InternetLocator'):\n if loc.text == 'DrugBank':\n db = f'{NamespacePrefix.DRUGBANK.value}:' \\\n f'{loc.attrib[\"url\"].split(\"/\")[-1]}'\n params['xrefs'].append(db)\n elif loc.text == 'FDA SRS':\n fda = f'{NamespacePrefix.FDA.value}:' \\\n f'{loc.attrib[\"url\"].split(\"/\")[-1]}'\n params['associated_with'].append(fda)\n\n # double-check and load full record\n assert Drug(**params)\n self._load_therapy(params)", "def get_batch_list(request):\n\n\n json_resp = {}\n json_resp['batch_list'] = []\n\n usecase = request.GET.get('usecase',None)\n # print(usecase)\n if usecase is None:\n batch = Report.objects.all().exclude(institute='PUBMED').values('batch')\n else:\n use_obj = UseCase.objects.get(name=usecase)\n batch = Report.objects.filter(name=use_obj).exclude(institute = 'PUBMED').values('batch')\n\n for el in batch:\n if el['batch'] not in json_resp['batch_list']:\n json_resp['batch_list'].append( el['batch'])\n # print(json_resp['batch_list'])\n json_resp['batch_list'] = sorted(json_resp['batch_list'])\n # print(json_resp)\n return JsonResponse(json_resp)", "def get_items_from_response(self, response):\n raise NotImplementedError" ]
[ "0.5429554", "0.5371432", "0.52643687", "0.5249134", "0.5248237", "0.52403224", "0.52280647", "0.52162", "0.51670784", "0.5164601", "0.5159841", "0.51428187", "0.5142124", "0.5130368", "0.51142645", "0.5101176", "0.50706816", "0.5053283", "0.504867", "0.5047648", "0.50421995", "0.5023641", "0.4978361", "0.49763533", "0.49462476", "0.49134943", "0.4908626", "0.48917386", "0.4890182", "0.48839635", "0.48721257", "0.48588854", "0.48493612", "0.48475844", "0.48437408", "0.48234308", "0.4821815", "0.4819585", "0.48160425", "0.47954953", "0.47944403", "0.47815737", "0.47453168", "0.47437376", "0.4741724", "0.47390383", "0.47300616", "0.47264928", "0.4724343", "0.46991223", "0.469755", "0.46972173", "0.46906206", "0.46884418", "0.46876127", "0.46876127", "0.4683426", "0.46794873", "0.4676435", "0.4671283", "0.4670898", "0.46706286", "0.4666058", "0.4644374", "0.46412262", "0.46386984", "0.46367848", "0.46354702", "0.46321183", "0.46300673", "0.4616628", "0.46070752", "0.46024087", "0.46023092", "0.45942765", "0.45898536", "0.45895666", "0.45744273", "0.45724276", "0.45713165", "0.45706227", "0.45675567", "0.45618898", "0.45618412", "0.45600995", "0.45570177", "0.45521417", "0.45469552", "0.45429173", "0.45417413", "0.4539784", "0.4536555", "0.45342204", "0.45306367", "0.4529855", "0.45273742", "0.4526602", "0.45264506", "0.45221767", "0.45183352", "0.4516654" ]
0.0
-1
Check if the specified instance matches the service's model.
def _isinstance(self, instance, raise_error=True): if isinstance(instance, self.__model__): return True elif raise_error: raise ValueError('{} is not of type {}.'.format( instance, self.__model__, )) else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkModel(self, model):\n # TODO", "def test_valid_model(self):\n model_cls = ModelContainer(APP_LABEL, TestModel2._meta.db_table).model_cls\n self.assertTrue(model_cls.__class__.__name__ is models.Model.__class__.__name__)", "def have_this_instance(self, instance):\n for i in self.all_instances:\n if i == instance:\n print(\"YES ITS ME!\")\n return True\n print(\"NO S.B. ELSE\")\n return False", "def test_instance_BaseModel(self):\n self.assertTrue(isinstance(self.my_object, BaseModel))", "def hasModel(self, model):\n if model in self.models:\n return S_OK()\n else:\n return S_ERROR(\"Model %s is not defined, use any of %s\" % (model, self.models.keys()))", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def is_model(self):\n return self.model_name() is not None", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False", "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['thing']) is pyperry.association.HasOne)", "def test_instance(self):\n b = Review()\n self.assertIsInstance(b, Review)\n self.assertTrue(issubclass(type(b), BaseModel))", "def test_instance(self):\n self.assertIsInstance(self.test1, BaseModel)", "def conforms(self, instance, format):\r\n\r\n try:\r\n self.check(instance, format)\r\n except FormatError:\r\n return False\r\n else:\r\n return True", "def is_valid(self, data_model: DataModel) -> bool:", "def __eq__(self, other):\n if not isinstance(other, LookmlModel):\n return False\n\n return self.__dict__ == other.__dict__", "def match(self, cls):\n return isinstance(self, cls)", "def is_for(self, model_type: str, version: Version):\n return model_type == self.model_type and version in self.version_spec", "def instance_valid(instance):\n return zope.interface.verify.verifyObject(IKeyValueDB, instance)", "def match(self, data_instance: Dict[str, Any]) -> bool:", "def __contains__(self, instance: object) -> bool:\n try:\n state = attributes.instance_state(instance)\n except exc.NO_STATE as err:\n raise exc.UnmappedInstanceError(instance) from err\n return self._contains_state(state)", "def check_model(expected_model, actual_model):\n assert (expected_model == actual_model), \\\n \"Not Compare model: Expected model:\\n {0}\\nActual model:\\n {1}\".format(expected_model, actual_model)", "def test_instance_equality(self):\n class EqualityModel(Model):\n pk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel(pk=0)\n m1 = EqualityModel(pk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)", "def __eq__(self, other):\n if not isinstance(other, ServerModel):\n return False\n\n return self.__dict__ == other.__dict__", "def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False", "def model_is_valid(self, model: OscalBaseModel) -> bool:\n oscal_version = model.metadata.oscal_version.__root__\n p = re.compile(OSCAL_VERSION_REGEX)\n matched = p.match(oscal_version)\n return matched is not None", "def _isinstance(self, obj, raise_error=True):\n rv = isinstance(obj, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (obj, self.__model__))\n return rv", "def __eq__(self, other):\n if not isinstance(other, Service):\n return False\n\n return self.__dict__ == other.__dict__", "def test_instance_equality(self):\r\n class EqualityModel(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel(pk=0)\r\n m1 = EqualityModel(pk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)", "def is_instance(self,instance):\n\t\tinst_attributes = instance.getAttributes()\n\t\tfor attribute in self.utility.av_counts.keys():\n\t\t\tif attribute not in inst_attributes:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tfor value in self.utility.av_counts[attribute]:\n\t\t\t\t\tif (self.utility.av_counts[attribute][value] / self.utility.count) != 1.0:\n\t\t\t\t\t\treturn False\n\t\t\t\t\tif inst_attributes[attribute] != value:\n\t\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t\tif inst_attributes[attribute] != self.utility.av_counts[attribute]['numerically_valued_attribute'] / self.utility.count:\n\t\t\t\t\t\treturn False\n\t\t\n\t\tfor attribute in instance:\n\t\t\tif attribute not in self.utility.av_counts:\n\t\t\t\treturn False\n\t\t\tif type(inst_attributes[attribute]) == dict:\n\t\t\t\tif inst_attributes[attribute] not in self.utility.av_counts[attribute]:\n\t\t\t\t\treturn False\n\t\t\t\tif ((self.utility.av_counts[attribute][inst_attributes[attribute]] / self.utility.count) != 1.0):\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif len(self.utility.av_counts[attribute].keys()) != 1 or self.utility.av_counts[attribute].get('numerically_valued_attribute', 0) == 0:\n\t\t\t\t\treturn False\n\t\t\n\t\treturn True", "def is_instance(instance, expected_types):\n for expected_type in expected_types:\n if isinstance(instance, expected_type):\n return True\n\n return False", "def is_peewee_model(obj) -> bool:\n return (inspect.isclass(obj) and\n issubclass(obj, peewee.Model) and\n not obj == peewee.Model and\n not obj.__name__.startswith('_'))", "def is_model_type(obj: Any) -> bool: # pragma: no cover\n pass", "def __eq__(self, other):\n if not isinstance(other, VirtualService):\n return False\n\n return self.__dict__ == other.__dict__", "def verify_instance(**kwargs):\n instance = kwargs.get('instance')\n\n from amelie.claudia.models import Mapping\n\n transaction.on_commit(lambda: tasks.verify_instance.delay(Mapping.get_type(instance), instance.id))", "def test_user_model():\n assert isinstance(user_model_instance, UserModel), \"user model not correctly instatiating\"\n # TODO add more tests here to confirm user_model_instance", "def __eq__(self, other):\n if not isinstance(other, InstanceCreate):\n return False\n\n return self.__dict__ == other.__dict__", "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['something']) is pyperry.association.BelongsTo)", "def test_issubclass(self):\n self.assertTrue(issubclass(self.rev.__class__, BaseModel), True)", "def __contains__(self, key):\n if isinstance(key, Model):\n key = key.get_id()\n return (str(key) in self.get_models())", "def __eq__(self, other):\n if not isinstance(other, V1InstancetypeMatcher):\n return False\n\n return self.__dict__ == other.__dict__", "def check(cls, control_instance):\n pass", "def __eq__(self, other):\n if not isinstance(other, ModelDto):\n return False\n\n return self.__dict__ == other.__dict__", "def hasMetaModel(self, metaModel):\r\n if self.getClass() == metaModel: return 1\t\t\t\t# if the meta model is the actual class\r\n for mmodels in self.mergedASG:\t\t\t\t\t# else check the merged meta-models\r\n if mmodels.getClass() == metaModel: return 1\r\n return 0", "def test_instance_Review(self):\n obj = Review()\n self.assertIsInstance(obj, BaseModel)", "def is_instance(self, thing: Any) -> bool:\n return isinstance(thing, self.underlying)", "def is_model_valid(self):\n try:\n payload = {\n \"modelurl\": self.model_builder_url + self.model_uuid,\n \"api_key\": self.web2nl_api_key\n }\n\n response = requests.get(self.web2nl_url + \"validations\", params=payload)\n if response.status_code is requests.codes.no_content:\n return True\n elif response.status_code is requests.codes.bad_request:\n self.logger.error(\"Model validation failed: \" + response.text)\n return False\n else:\n self.logger.error(\"Failed while validating model. Will retry in some time\")\n raise RuntimeError(\"Failed while validating model\")\n except requests.exceptions.ConnectionError as errc:\n self.logger.error(\"Error Connecting:\", errc)", "def check_model_exists(class_name):\n if path.exists(settings.get('FALAFEL_DIR') + settings.get('MODELS_DIR') + '/' + class_name + '.py'):\n return True\n else:\n return False", "def test_instance_created(self):\n\n base_model = BaseModel()\n self.assertIsInstance(base_model, BaseModel)\n self.assertTrue(hasattr(base_model, \"created_at\"))\n self.assertTrue(hasattr(base_model, \"updated_at\"))", "def models_compatible(model_a: ModuleModel, model_b: ModuleModel) -> bool:\n if model_a == model_b:\n return True\n return model_b.value in _load_v2_module_def(model_a)['compatibleWith']", "def assert_valid(self, instance, value=None):\n valid = super(Instance, self).assert_valid(instance, value)\n if not valid:\n return False\n if value is None:\n value = instance._get(self.name)\n if isinstance(value, HasProperties):\n value.validate()\n return True", "def __instancecheck__(cls, instance):\r\n # Inline the cache checking when it's simple.\r\n subclass = getattr(instance, '__class__', None)\r\n if subclass in cls._abc_cache:\r\n return True\r\n subtype = type(instance)\r\n if subtype is subclass or subclass is None:\r\n if (cls._abc_negative_cache_version ==\r\n ABCMeta._abc_invalidation_counter and\r\n subtype in cls._abc_negative_cache):\r\n return False\r\n # Fall back to the subclass check.\r\n return cls.__subclasscheck__(subtype)\r\n return (cls.__subclasscheck__(subclass) or\r\n cls.__subclasscheck__(subtype))", "def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False", "def test_instance_method(self):\n self.assertEqual(self.Test.save.im_class, self.Test)", "def __eq__(self, other):\n return (self.app_id == other.app_id and\n dict.__eq__(self._model_sigs, other._model_sigs))", "def test_isinstance(self):\n obj = BaseModel()\n self.assertIsInstance(obj, BaseModel)", "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['things']) is pyperry.association.HasMany)", "def is_model(thing):\n return (isinstance(thing, sqlalchemy.ext.declarative.api.DeclarativeMeta)\n and hasattr(thing, '__table__')) # disard sqlalchemy.ext.declarative.declarative_base()", "def is_live(self, obj):\n most_appropriate_object = get_appropriate_object_from_model(self.model)\n if most_appropriate_object == obj:\n return True\n return False", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def test_is_instance(self):\n self.assertIsInstance(self.obj, Rectangle, \"created obj is not an \" +\n \"instance of Rectangle class.\")", "def should_update(self, instance, **kwargs):\n return True", "def test_User_instance(self):\n obj = User()\n self.assertIsInstance(obj, User)\n self.assertIsInstance(obj, BaseModel)", "def __eq__(self, other):\n if not isinstance(other, ShowInstanceDetailResponse):\n return False\n\n return self.__dict__ == other.__dict__", "def compare_response_to_model_instance(self, response, model_instance):\n parsed_response = json_decode(response)\n headers = parsed_response['headers']\n data = parsed_response['data']\n self.assertEquals(len(data), len(model_instance))\n for i in range(len(data)):\n datum = self.deserialize(headers, data[i])\n self.compare_model_instance(datum, model_instance[i])", "def is_distributed_model(model):\n try:\n get_tenant_field(model)\n return True\n except ValueError:\n return False", "def supports(self, model: str) -> bool:\n model = model.lower().replace(\"#slash#\", \"/\")\n\n if self._model.lower() == model:\n return True\n\n # @todo implement Regex/Json path\n for alias in self.aliases:\n if alias.lower() == model:\n return True\n\n # Also try to match model ID between parentheses.\n if match := re.search(r\"\\(([^\\(\\)]+)\\)$\", model):\n return self.supports(match.group(1))\n\n return False", "def test_basemodel_basic_instance(self):\n B = BaseModel()\n self.assertIsInstance(B, BaseModel)\n self.assertEqual(BaseModel, type(BaseModel()))\n self.assertTrue(hasattr(B, \"id\"))\n self.assertTrue(hasattr(B, \"created_at\"))\n self.assertTrue(hasattr(B, \"updated_at\"))", "def __eq__(self, other):\n if not isinstance(other, SolidModel):\n return False\n\n return self.to_dict() == other.to_dict()", "def check_model(self, model):\n self.check_initial_conditions(model)\n self.check_variables(model)", "def __is_hard_match(self, obj):\n for attr in self.list:\n try:\n if getattr(obj, attr) != getattr(self, attr):\n return False\n except AttributeError:\n pass\n return True", "def __eq__(self, other):\n if not isinstance(other, ViewResourceModel):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n return (\n isinstance(other, Query)\n and self.constraints == other.constraints\n and self.model == other.model\n )", "def pickling_check(instance):\n pkled_instance = pickle.loads(pickle.dumps(instance))\n equality_check(instance, pkled_instance)", "def issub_class(self):\n insta = Amenity()\n self.assertIsInstance(insta, BaseModel)\n self.assertTrue(hasattr(insta, \"id\"))\n self.assertTrue(hasattr(insta, \"created_at\"))\n self.assertTrue(hasattr(insta, \"update_at\"))", "def singularity_exists(self):\n instances = Client.instances(quiet=self.quiet)\n for instance in instances:\n if self.pid in instance.name:\n return True\n return False", "def test_matcher_on_instance(self):\n\n skill = _TestSkill(None, None)\n self.assertTrue(hasattr(skill.hello_skill, \"matchers\"))", "def verify_is_instance(self, obj, cls, msg=\"\"):\r\n try:\r\n self.assert_is_instance(obj, cls, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def __eq__(self, other):\n if not isinstance(other, LandslideViewModel):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ModelTrainingInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def matches(self, aModel):\n\n params_bis = list(map(aModel.string_to_param,self.grid_params))\n for param1, param2 in zip(self.params, params_bis):\n if (abs(param1/param2 - 1.0) > eps): return False\n return True", "def __eq__(self, other) -> bool:\n return (\n isinstance(other, DataModel)\n and self.name == other.name\n and self.attributes == other.attributes\n )", "def device_matches_object(self, obj=None):\n\n\t\treturn self.device_is_configured and self.config_match(obj=obj)", "def check_get_query_result_if_exists(self, model, *args, **kwargs):\n # Allows dynamic get querysets\n queryset = _get_queryset(model)\n\n try:\n # Put the args and kwargs in the filter for filtering\n exists = queryset.get(*args, **kwargs)\n return True\n except queryset.model.DoesNotExist as e:\n # If queryset does not exist. Return False\n return False", "def __eq__(self, other):\n if not isinstance(other, Model):\n return False\n return self.graph == other.graph", "def test_model_equality(self):\n class EqualityModel0(Model):\n pk = columns.Integer(primary_key=True)\n\n class EqualityModel1(Model):\n kk = columns.Integer(primary_key=True)\n\n m0 = EqualityModel0(pk=0)\n m1 = EqualityModel1(kk=1)\n\n self.assertEqual(m0, m0)\n self.assertNotEqual(m0, m1)", "def modelfor(model, table):\n try:\n return model.__tablename__ == table\n except AttributeError:\n return False", "def _old_request_model(self, instance, success):\n coll = self.get_collection('_model')\n callback = partial(self._do_validate,\n instance=instance,\n success=success)\n try:\n instance['_model']\n except KeyError:\n raise tornado.web.HTTPError(400, 'Missing model key')\n coll.find_one({'_id': instance['_model']},\n callback=callback)", "def is_satisfied(self, item: Any) -> bool:", "def is_instance_of(self, rule, instantiation_map=None):\n\n if len(rule.assumptions) != len(self.assumptions):\n return False\n\n instantiation_map = {} if instantiation_map is None else instantiation_map\n for i in range(len(rule.assumptions)):\n if not InferenceRule._update_instantiation_map(self.assumptions[i], rule.assumptions[i], instantiation_map):\n return False\n\n if not InferenceRule._update_instantiation_map(self.conclusion, rule.conclusion, instantiation_map):\n instantiation_map.clear()\n return False\n\n return True", "def is_valid(self, data_model: DataModel) -> bool:\n return self.constraint.is_valid(data_model)", "def __eq__(self, other: 'ModelParameters') -> bool:\n if not isinstance(other, ModelParameters) or len(self) != len(other):\n return False\n else:\n return all(torch.equal(p_self, p_other) for p_self, p_other in zip(self.parameters, other.parameters))", "def test_issubclass(self):\n self.assertTrue(issubclass(User()), BaseModel)", "def test_model_endpoint(request, client, fixture_name, detail_attr, comparison_attr):\n instance = request.getfixturevalue(fixture_name)\n model = instance.__class__\n resource_name = model.__name__.lower()\n\n # test list endpoint\n response = client.get(api_reverse(\"%s-list\" % resource_name))\n check_response(response)\n results = response.json()['results']\n assert results\n assert len(results) == model.objects.count()\n\n # test detail endpoint\n response = client.get(api_reverse(\"%s-detail\" % resource_name, args=[getattr(instance, detail_attr)]))\n check_response(response)\n results = response.json()\n assert results[comparison_attr] == getattr(instance, comparison_attr)", "def __eq__(self, other):\n if not isinstance(other, ServiceAvailabilityOption):\n return False\n\n return self.__dict__ == other.__dict__", "def validate_instance(instance: Any) -> Any:\n attr.validate(instance)", "def _check_vm_record(self):\n instances = self.conn.list_instances()\n self.assertEquals(len(instances), 1)\n\n # Get Nova record for VM\n vm_info = self.conn.get_info(1)\n\n # Get record for VM\n vms = vmwareapi_fake._get_objects(\"VirtualMachine\")\n vm = vms[0]\n\n # Check that m1.large above turned into the right thing.\n mem_kib = long(self.type_data['memory_mb']) << 10\n vcpus = self.type_data['vcpus']\n self.assertEquals(vm_info['max_mem'], mem_kib)\n self.assertEquals(vm_info['mem'], mem_kib)\n self.assertEquals(vm.get(\"summary.config.numCpu\"), vcpus)\n self.assertEquals(vm.get(\"summary.config.memorySizeMB\"),\n self.type_data['memory_mb'])\n\n # Check that the VM is running according to Nova\n self.assertEquals(vm_info['state'], power_state.RUNNING)\n\n # Check that the VM is running according to vSphere API.\n self.assertEquals(vm.get(\"runtime.powerState\"), 'poweredOn')", "def has_api(instance, T):\n rtn = False\n if instance is not None and T is not None:\n if inspect.isclass(instance):\n if hasattr(instance, \"__implements\"):\n if T in instance.__implements:\n rtn = True\n else:\n if hasattr(instance.__class__, \"__implements\"):\n if T in instance.__class__.__implements:\n rtn = True\n return rtn", "def test_instance_method(self):\n self.assertEqual(self.Test.update_attributes.im_class, self.Test)", "def __eq__(self, other):\n if not isinstance(other, UpdateVehicleRequest):\n return False\n\n return self.to_dict() == other.to_dict()", "def test_model_equality(self):\r\n class EqualityModel0(Model):\r\n pk = columns.Integer(primary_key=True)\r\n\r\n class EqualityModel1(Model):\r\n kk = columns.Integer(primary_key=True)\r\n\r\n m0 = EqualityModel0(pk=0)\r\n m1 = EqualityModel1(kk=1)\r\n\r\n self.assertEqual(m0, m0)\r\n self.assertNotEqual(m0, m1)", "def __eq__(self, other):\n if not isinstance(other, GroupModel):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.6658003", "0.6365422", "0.63516694", "0.59845364", "0.59756815", "0.59461135", "0.5938544", "0.59184885", "0.5915191", "0.59148186", "0.5906111", "0.59015507", "0.5884941", "0.585603", "0.5847129", "0.58132994", "0.57774276", "0.57701457", "0.5730501", "0.57222885", "0.57215434", "0.57187676", "0.5705217", "0.5691113", "0.56890965", "0.5675528", "0.56570476", "0.5643482", "0.56290567", "0.5621421", "0.561771", "0.56096655", "0.56021893", "0.55726606", "0.5564059", "0.5550342", "0.55455434", "0.5527465", "0.5514005", "0.5503802", "0.549288", "0.5491399", "0.54768085", "0.5472506", "0.54358876", "0.5435616", "0.5417704", "0.54174924", "0.5412963", "0.5412873", "0.5401246", "0.53949475", "0.5392349", "0.5356484", "0.53536195", "0.5349261", "0.5344781", "0.5344317", "0.53334457", "0.5328952", "0.5324539", "0.53199214", "0.53159", "0.53000003", "0.52997845", "0.52948004", "0.5292631", "0.5278795", "0.5275653", "0.52700853", "0.52672875", "0.526369", "0.5262645", "0.5257359", "0.52561957", "0.5255734", "0.52323496", "0.52317643", "0.5219822", "0.521245", "0.5212253", "0.52079767", "0.52037036", "0.52028", "0.52020234", "0.5199712", "0.51988083", "0.5186385", "0.51837206", "0.5178875", "0.51710415", "0.5168029", "0.51462805", "0.51396686", "0.5134994", "0.51336175", "0.513157", "0.513052", "0.5127984", "0.5124074" ]
0.650044
1
Return a generator containing all instances of the model.
def all(self): return self.__model__.query.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_model_instances(self) -> Iterator['panda_core_data.model.Model']:\n for current_type in self.all_models:\n for current_instance in current_type.all_instances:\n yield current_instance", "def fetchall(self):\n rows = self.cursor.fetchall()\n\n if self.model.single:\n for row in rows:\n yield self.__instance_from_db(self.model, row)\n else:\n for row in rows:\n yield tuple(self.__instance_from_db(m, row) for m in self.model.models)", "def __iter__(self):\n return iter(self.model)", "def iter_models(self):\n return iter(self.model_list)", "def instances(self):\n for d in os.listdir(self.directory):\n yield self.instance(self.directory, d)", "def getinstances(cls):\n\t\t\tdead = set()\n\t\t\tfor ref in cls._instances:\n\t\t\t\tobj = ref()\n\t\t\t\tif obj is not None:\n\t\t\t\t\tyield obj\n\t\t\t\telse:\n\t\t\t\t\tdead.add(ref)\n\t\t\tcls._instances -= dead", "def __iter__(self):\n return self.new_generator()", "def __iter__(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(list())", "def get_instances(cls):\n raise NotImplementedError", "def all(cls):\n for x in cls._dbag:\n yield cls(**cls._dbag[x])", "def models(self, model=None):\n for query in self.__queries:\n if isinstance(query, orb.Query):\n yield query.model(model)\n else:\n for model in query.models(model):\n yield model", "def __iter__(self):\n\n return [self]", "def __iter__(self):\n\n return [self]", "def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []", "def __iter__(self):\n yield from self.gen", "def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()", "def batch(model: Type[Model], num: int, **kwargs: Any) -> Iterator[Model]:\n for _i in range(num):\n yield modelfactory_factory(model)(**kwargs)", "def __iter__(self):\n for classresult in self.classresults:\n yield classresult", "def iterator(self):\n yield", "def __iter__(self) -> Generator:\n\t\treturn (article for article in self._articles)", "def __iter__(self):\n yield self", "def _get_model_iterator(self, model=None):\n if model is None:\n model = self.model\n\n return model._sa_class_manager.mapper.iterate_properties", "def __iter__(self):\n for instresult in self.instresults:\n yield instresult", "def __iter__(self):\n for o in self._iter:\n yield o", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)", "def __iter__(self):\n counter = 0\n while True:\n if counter < len(self.all_records):\n yield self.all_records[counter]\n else:\n yield self.next()\n counter += 1", "def iter_all_atoms(self):\n for model in self.iter_models():\n for atm in model.iter_all_atoms():\n yield atm", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def __iter__(self):\n return self._database.select(self.as_sql(), self._model_cls)", "def __iter__(self):\n for benchinst in sorted(self.instances.values()):\n yield benchinst", "def __iter__(self):\n for index in range(len(self)):\n yield self[index]", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n\n return self", "def __iter__(self):\n return self._product_generator()", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def iter(self):\n return []", "def __iter__(self) -> object:\n return self", "def __iter__(self):\n items = self._fetch()\n for item in items:\n yield item", "def all(self):\n return (self.__objects)", "def Vrep_generator(self):\n for V in self.Vrepresentation():\n yield V", "def examples(self):\n for obj_ind in range(len(self.objects)):\n yield self.get_object_intent_by_index(obj_ind)", "def __iter__(self):\n for atom in self.iter_atoms():\n yield atom", "def fetch_all(self):\n return list(iter(self))", "def __iter__(self):\n for r in self.cursor:\n yield r", "def GetObjects(self): \r\n return self.model.GetObjects()", "async def get_instances(self, **kwargs) -> List[ApiResource]:\n raw_resources = await self.get_resources(**kwargs)\n _instances = [\n self._resource_factory(_raw)\n for _raw in self._loop_raw(raw_resources)\n ]\n return _instances", "def __iter__(self):\r\n return self", "def get_all_cached_instances(cls):\n return list(cls.__dbclass__.__instance_cache__.values())", "def __iter__(self):\n return iter(self.to_list())", "def __iter__(self):\n return iter(self.__iter())", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def __iter__(self):\n return (self.get_node(node_id) for node_id in self._collection.all_keys())", "def __iter__(self):\n for value in self.__dict__.values():\n yield value", "def empty_model() -> Model:\n yield Model()", "def get_all(cls):\n return DataStore.get_all_instance(cls)", "def __iter__(self) -> Iterator[Batch]:\n return self.get_iterator()", "def list_instances(self):\n # list instances\n self._list_instances()", "def generators(self):\n return self._generators", "def __iter__(self):\n for run in self.runs:\n yield run", "def enum_instances(self, env, model, keys_only):\n\n logger = env.get_logger()\n logger.log_debug('Entering %s.enum_instances()' % self.__class__.__name__)\n \n # Prime model.path with knowledge of the keys, so key values on\n # the CIMInstanceName (model.path) will automatically be set when\n # we set property values on the model. \n model.path.update({'CreationClassName': None, 'SettingID': None,\n 'SystemName': None, 'SystemCreationClassName': None})\n \n while False: # TODO more instances?\n # TODO fetch system resource\n # Key properties \n #model['SettingID'] = '' # TODO (type = unicode) \n #model['SystemName'] = '' # TODO (type = unicode) \n model['CreationClassName'] = 'RPATH_Configuration' \n #model['SystemCreationClassName'] = '' # TODO (type = unicode)\n if keys_only:\n yield model\n else:\n try:\n yield self.get_instance(env, model)\n except pywbem.CIMError, (num, msg):\n if num not in (pywbem.CIM_ERR_NOT_FOUND, \n pywbem.CIM_ERR_ACCESS_DENIED):\n raise", "def peak_all_models(self) -> List:\n models = list(self.meta.name)\n print(models)\n return models", "def vnf_instances(self) -> Iterator[VnfInstance]:\n return self._get_related_instance(VnfInstance, \"generic-vnf\")", "def fetchObjects(self):\n try:\n for i in service.Service.get_workers():\n yield i\n except Exception as e:\n Events.Status.emit(f\"unable to fetch worker information: {e}\")", "def __next__(self):\n for child in self.children:\n yield child", "def __iter__(self):\n self._fetch_all()\n return iter(self._result_cache)", "def __iter__(self):\n return iter(vars(self.obj))", "def iter_recursive_objects(self):\n from noc.inv.models.interface import Interface\n\n for i in Interface.objects.filter(managed_object=self.id):\n yield i", "def __iter__(self):\n for this_document in self.documents:\n yield this_document", "def __iter__(self):\n for child in self.children:\n yield child", "def all(self):\n return list(self.iterator())", "def __iter__(self):\n for feature in self.features:\n yield feature", "def _objects(self):\n for d in self._dicts_with_ids():\n yield d['id'], tuple(d[k] for k in self.fields)" ]
[ "0.8044509", "0.7411966", "0.734941", "0.7186237", "0.71022946", "0.70422894", "0.68495035", "0.66786873", "0.66777647", "0.6615131", "0.653405", "0.65155786", "0.65155786", "0.64561963", "0.6453592", "0.64051664", "0.6363701", "0.6355945", "0.6348062", "0.6272981", "0.62439245", "0.6234747", "0.6209201", "0.6202444", "0.6187548", "0.61691314", "0.616544", "0.61638653", "0.61638653", "0.61638653", "0.61638653", "0.61638653", "0.61638653", "0.6161022", "0.61439985", "0.61421275", "0.6115856", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61122864", "0.61070764", "0.61042", "0.6084426", "0.6084426", "0.60817975", "0.60763615", "0.60743904", "0.60592306", "0.60517013", "0.6051394", "0.60466194", "0.6032683", "0.60181767", "0.6016713", "0.6012402", "0.6009354", "0.6006035", "0.6003147", "0.6001329", "0.5996949", "0.59832233", "0.5982698", "0.5979878", "0.59769416", "0.5973323", "0.59690666", "0.59623826", "0.5962132", "0.5945816", "0.5944706", "0.59351474", "0.59165657", "0.5895405", "0.589472", "0.5893322", "0.58853275", "0.5872264", "0.58694226", "0.58684343", "0.5868299", "0.58661944" ]
0.5920896
90
Commit the instance to the database and return it.
def save(self, instance, commit=True): self._isinstance(instance) db.session.add(instance) if commit: db.session.commit() return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commit(self):\n if not getattr(self, '_id', None):\n return self._create()\n res = self._update()\n self._dirty = False\n return res", "def commit(self):\n try:\n DBSESSION.add(self)\n DBSESSION.commit()\n return self\n except IntegrityError:\n DBSESSION.rollback()\n raise", "def commit(self):\n self.create()\n return self.refresh()", "def commit(self):\n return self.conn.commit()", "def commit(self):\n return self.connection.commit", "def commit(self):\n #main.signals['exit'].disconnect(self.commit)\n self._dirty = False\n with self._lock:\n self._db.commit()", "def save(self):\n\t\tdb.session.add(self)\n\t\tdb.session.commit()\n\t\treturn self", "def commit(self):\n return self.connection.commit()", "def commit(self):\n db.session.add(self)\n db.session.commit()", "def _write_to_db(self, instance: DBModelInstance) -> None:\n self.db.session.add(instance)\n self.db.session.commit()", "def save(self):\n db.session.add(self)\n self.__commit()\n return self", "def save(self):\r\n s = self.get_session()\r\n s.add(self)\r\n s.commit()\r\n return self", "def commit(self):\n raise NotImplementedError", "def commit(self):\n self.DB.commit()", "def commit(self):\n self.lock.acquire()\n self.__Session.commit()\n self.lock.release()", "def save(self):\n self.__db.commit()", "def commit(self):\n self.session.commit()", "def commit(self):\n try:\n db.session.commit()\n except:\n db.session.rollback()\n raise", "def commit(self):\n return", "def commit(self):\n self.db.commit()", "def Save(self) -> None:\n self.__conn.commit()", "def commit(self):\n self.getSession().commit()", "def save(self):\n db.session.commit()", "def _commit(self):\n if self.__session is not None:\n self.__session.commit()", "def save(self):\n try:\n self.__session.commit()\n except Exception:\n self.__session.rollback()\n finally:\n self.__session.close()", "def save(self):\n # type: () -> bool\n\n return self.query.commit(self.id, self)", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def commit(self):", "def save(self):\n self.db.commit()", "def save(self):\n try:\n db.session.add(self)\n db.session.flush()\n except Exception:\n db.session.rollback()\n raise Exception", "def save(self):\n\n self.__session.commit()", "def save(self):\n\n self.__session.commit()", "def __commit(self):\n from sqlalchemy.exc import IntegrityError\n\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()", "async def commit(self):\n if await self.is_valid():\n await self.update(committed=True).apply()", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n self._session.commit()\n self._session.close()\n\n return True", "def save(self):\n\t\tdb.session.add(self)\n\t\tdb.session.commit()", "def commit(self):\n model_handle = ctypes.c_void_p()\n _check_call(_LIB.TreeliteModelBuilderCommitModel(\n self.handle,\n ctypes.byref(model_handle)))\n return Model(model_handle)", "def save(self):\n if self._deleted:\n raise DBObjectSaveError, \"Cannot save a previously deleted object.\"\n\n def _save(isValid):\n if self.id is None and isValid:\n return self._create()\n elif isValid:\n return self._update()\n return self\n return self.isValid().addCallback(_save)", "def save(self, commit=True):\n \n \n try:\n\n db = getDatabase()\n connection = db.connect()\n\n connection.add(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def commit(self):\n conn = self.threadingLocal.connection\n if isinstance(conn, Transaction):\n self.threadingLocal.connection.commit()", "def save(self):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n if not self._retrieved:\n self.insert()\n self._retrieved = True\n else:\n self.update()", "def save(self):\n \n db.session.add(self)\n db.session.commit()", "def commit(self) -> None:\n pass", "def commit_transaction(self):\n cursor = self._cursor()\n cursor.close()\n self._db.commit()\n self._end_transaction()", "def commit(self):\n\t\t#firstly, get all variables and values of this model\n\t\tcontent = self.__dict__.copy() \n\t\t#if '_rev' is one of the variables of this model instance,\n\t\t#it means this user is retrived from database. \n\t\t#We are actually going to update the model document in database\n\t\t#instead of creating a new user document.\n\t\tres = dbop.update_create_user_in_database(self._id, content) \n\t\tself._id = res['id']\n\t\tself._rev = res['rev']", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n # try:\n # db.session.add(self)\n # db.session.commit()\n # except exc.IntegrityError:\n # db.session.rollback()", "def save(self):\n self.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def commit(obj):\n db.session.add(obj)\n db.session.commit()\n db.session.refresh(obj)\n return obj", "def save(self, instance):\n return instance", "def commit(self):\n self.__connection.commit()", "def commit(self):\n try:\n self.session.commit()\n except:\n self.session.rollback()\n raise\n finally:\n self.session.remove()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def commit(self):\n self.success = True\n self.close()", "def __call__(self):\n session = self._session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def save(self):\n logging.debug(\"sychronizing db\")\n self._db.sync()", "def save(self):\r\n db.session.add(self)\r\n db.session.commit()", "def save(self) -> \"BaseModel\":\n try:\n db.session.add(self)\n db.session.commit()\n return self\n\n except Exception as e:\n db.session.rollback()\n descripted_exception_logger(e)\n raise", "def commit(self):\n if self.transaction:\n self.conn.commit()\n self.transaction = False", "def save(self, commit=True):\n db.session.add(self)\n if commit:\n db.session.commit()\n logger.info(\n '{} {} saved'.format(self.__class__.__name__, self.id))\n return self", "def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n return True\n except SQLAlchemyError as e:\n db.session.rollback()\n logger.error(\"database operation error: \", e)\n return False", "def save(self):\n if self.get('_id'):\n return self.connection.update({'_id': self.get('_id')}, {'$set': self._export(without_id=True)})\n else:\n return self.connection.insert(self._export())", "def commit(self):\n self.sql_session.commit()", "def save(self):\n return api.put([self])", "def save(self, instance: BaseModel):\n # If this is a new unsaved object, it'll likely have an\n # id of None, which RethinkDB won't like. So if it's None,\n # generate a UUID for it. If the save fails, we should re-set\n # it to None.\n if instance.id is None:\n instance.id = str(uuid.uuid4())\n elif isinstance(instance.id, uuid.UUID):\n instance.id = str(instance.id)\n\n instance = self._fix_uuids(instance)\n with rconnect() as conn:\n try:\n query = self.q.insert(\n instance.to_primitive(),\n conflict=\"replace\"\n )\n rv = query.run(conn) # NOQA\n # console.debug(rv)\n except Exception as e:\n console.error(e)\n instance.id = None\n raise\n else:\n return instance", "def save_db(self) -> None:\n self.connection.commit()", "def commit(self, transaction):\n raise NotImplementedError", "def save(self):\n \n logger.debug(\"saving self to the database\")\n \n # there should be a session\n # because a Sequence can not be created\n # without an already created Project instance\n \n if self not in db.session:\n db.session.add(self)\n \n db.session.commit()", "def commit(self):\n self.connection.commit()", "def commit(self):\n self.conn.commit()", "def commit(self):\n self.execute_sql(sql.commit)\n self.under_transaction = False", "def _commit_now(self):\n self._database.commit()", "def commit():\n get_db().commit()", "def save(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.insert(self.__class__.__name__, data)\n\n self.__dict__.update(saved_data)", "def commit(self) -> None:\n trans = self._transaction\n if trans is None:\n trans = self._autobegin_t()\n\n trans.commit(_to_root=True)" ]
[ "0.8033429", "0.7885448", "0.7634681", "0.73922426", "0.7373556", "0.7361513", "0.7327505", "0.7292507", "0.72648495", "0.71835274", "0.7170746", "0.71401006", "0.71388215", "0.71369076", "0.71341854", "0.7112524", "0.70495015", "0.70453817", "0.700578", "0.6989802", "0.6986537", "0.6977208", "0.69718766", "0.6967655", "0.6960081", "0.69557035", "0.694341", "0.694341", "0.694341", "0.694341", "0.694341", "0.694341", "0.694341", "0.694341", "0.69399464", "0.69354105", "0.6924078", "0.69195855", "0.69195855", "0.691913", "0.6897971", "0.68910086", "0.68910086", "0.68910086", "0.68910086", "0.68910086", "0.6865123", "0.68631935", "0.68583965", "0.68190134", "0.67974013", "0.6791662", "0.67916304", "0.67857", "0.6785072", "0.6782623", "0.67785776", "0.6772731", "0.6769653", "0.66896594", "0.66896594", "0.66896594", "0.66896594", "0.6685546", "0.66654223", "0.66556066", "0.6652138", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.66444284", "0.6632503", "0.66319", "0.6623084", "0.66113484", "0.66075903", "0.6595847", "0.6585509", "0.65683717", "0.65645665", "0.6555282", "0.65548617", "0.6513649", "0.6496549", "0.6491094", "0.64885515", "0.6465814", "0.64564914", "0.6444114", "0.64394575", "0.6437974", "0.6437777", "0.643181" ]
0.7144451
11
Reads the spectrial thermal conductivity information
def read_spectral_k(filename="tc_dos_l.dat"): # column headers for the data #tcdosl_labels = [ # "wavelength", # "k_xx_raw","k_xx_smooth", # "k_yy_raw","k_yy_smooth", # "k_zz_raw","k_zz_smooth"] tcdosl_labels = [ "wavelength", "k_xx_raw","k_yy_raw","k_zz_raw", "k_xx_smooth","k_yy_smooth","k_zz_smooth"] def subselect_table_block(i_start,lines): i = i_start + 1 table = [] while(lines[i].strip() != ""): args = lines[i].split() args = [arg.strip() for arg in args] args = [float(arg) for arg in args] table.append(args) i += 1 return np.array(table) line = None # initialize with open(filename,'r') as f: lines = f.readlines() lines = [s.strip() for s in lines] temperatures = [] tcdosl_dict = OrderedDict() for il,line in enumerate(lines): if line.startswith('# Temp:'): args = line.split(':') T = int(float(args[1].strip())) temperatures.append(T) tcdosl_dict[T] = subselect_table_block(il,lines) tcdosl_df_dict = OrderedDict() for temp in temperatures: tcdosl_df_dict[temp] = pd.DataFrame( copy.deepcopy(tcdosl_dict[temp]), columns=list(tcdosl_labels)) return {k:v.copy() for k,v in tcdosl_df_dict.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_chip_temperature(self):\n self.check_validity()\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_GET_CHIP_TEMPERATURE, (), '', 10, 'h')", "def ccdtemp(n=2):\n temp = camera.GetTemperature()\n camera.status.update()\n mesg, f1, f2, f3, f4 = camera.GetTemperatureStatus()\n print \"Sensor Temp=%6.1f, TargetTemp=%6.1f, AmbientTemp=%6.1f, CoolerVolts=%6.2f\" % (f1,f2,f3,f4)\n return temp", "def cpsd(self):\r\n self.welch_method = self.method\r\n self.welch_method['this_method'] = 'welch'\r\n self.welch_method['Fs'] = self.input.sampling_rate\r\n f, spectrum_welch = tsa.get_spectra(self.input.data,\r\n method=self.welch_method)\r\n\r\n return f, spectrum_welch", "def thermalConductivity(self, Tk=None, Tc=None):\n Tk = getTk(Tc, Tk)\n self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n thermalConductivity = (\n 2.13014e-08 * Tk**3\n - 6.31916e-05 * Tk**2\n + 1.11629e-01 * Tk\n - 2.00043e00\n )\n return thermalConductivity * 1e-3", "def read_T(self):\n # Read the temerature low-pass filtered with a time constant of 1000 milisecond\n tc = 1000\n raw_t = self._raw_T()\n t = time.ticks_ms()\n e = math.exp(time.ticks_diff(self._filter_time, t)/tc)\n self._filter = (e * self._filter) + ((1-e) * raw_t)\n self._filter_time = t\n return self._filter", "def read_core_temp(self) -> float:", "def thermal(isatom, freq, scalfac,linnonlin,T):\n if isatom != \"true\":\n nfreq = len(freq)\n\n vib_temp = []\n for ifreq in range(nfreq):\n freq[ifreq] = float(freq[ifreq]) * float(scalfac)\n vib_temp_new = c * 100.0 * h * float(freq[ifreq]) / kB\n vib_temp.append(vib_temp_new)\n\n dE_vib = 0\n for ifreq in range(nfreq):\n dE_vib = dE_vib + kB * vib_temp[ifreq] * j2au * ( 0.5 + 1 / ( np.exp(vib_temp[ifreq]/T) - 1) )\n\n dE_ZPE = 0.5 * sum(freq) * cmi2au\n\n if linnonlin == \"L\":\n dE_rot = kB * T * j2au\n elif linnonlin == \"NL\":\n dE_rot = kB * T * j2au * (3.0/2.0)\n else:\n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"ERROR: unknown entry for linear/nonlinear\")\n else:\n dE_ZPE = 0\n dE_vib = 0\n dE_rot = 0\n\n dE_tra = kB * T * j2au * (3.0/2.0)\n dE_thermal = (dE_vib - dE_ZPE) + dE_rot + dE_tra\n\n return(dE_ZPE, dE_vib, dE_rot, dE_tra, dE_thermal)", "def read_temperature(self):\n data = self.ag.read_bytes(Register.OUT_TEMP_L, 2)\n return lsm9ds1.to_int16(data)", "def readtemperature(self, cTemp):\r\n\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\twhile (data & 0x01) != 0 :\r\n\t\t\tdata = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_STATUS)\r\n\t\tdata1 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\tdata2 = bus.read_byte_data(SI7015_DEFAULT_ADDRESS, SI7015_REG_DATAH)\r\n\t\t\r\n\t\t# Convert the data to 14-bits\r\n\t\tcTemp = (((data1 * 256.0) + data2) / 4.0)\r\n\t\t\r\n\t\tif cTemp < 0x0140 :\r\n\t\t\tcTemp = 0x0140\r\n\t\telif cTemp > 0x12C0 :\r\n\t\t\tcTemp = 0x12C0\r\n\t\telse :\r\n\t\t\tcTemp = cTemp\r\n\t\t\r\n\t\tcTemp = (cTemp / 32.0) - 50.0\r\n\t\tfTemp = cTemp * 1.8 + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "def getCl(filename):\n powSpec = pf.getdata(filename,1)\n temps = powSpec.field('TEMPERATURE')\n ell = np.arange(temps.size)\n return ell,temps", "def read_temp(self, ctrl_pin):\n bytes_ = self.read_bytes(ctrl_pin)\n int_ = struct.unpack('>H', bytes_)[0]\n if int_ & 0x04 > 1:\n temp_celsius = -1\n else:\n temp_celsius = (int_ >> 3) * 0.25\n return temp_celsius", "def read_ambient_temperatureC(self, ):\n return self._read_temperature(MLX90614_TA)", "def read_temperature(self):\n self._force_read(False)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n\n return float(self._compensate_temperature(tempADC))", "def read_spectrum(specfile):\n hdu = pyfits.open(specfile)\n w = [a[0] for a in hdu[0].data]\n f = [a[1] for a in hdu[0].data]\n if 'cassis' in specfile.name:\n ef = [a[2] for a in hdu[0].data]\n colS = 'b'\n elif 'sws' in specfile.name:\n ef = [a[3] for a in hdu[0].data]\n colS = 'g'\n \n f2, ef2 = [], []\n for i in range(0, len(f)):\n f2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[0])\n ef2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[1])\n \n wvlen = [a[0] for a in sorted(zip(w,f2))]\n flux = [a[1] for a in sorted(zip(w,f2))]\n eflux = [a[1] for a in sorted(zip(w,ef2))]\n \n return wvlen,flux,eflux,colS", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def read_temperature(self):\n tRaw = self._read_multiple_bytes_as_array(self.BME280_TEMP_MSB, 3)\n\n return float(self._compensate_temperature((tRaw[0] << 12) + (tRaw[1] << 4) + (tRaw[2] >> 4)))", "def get_temperature(self):\r\n\r\n\t# get current resolution\r\n\r\n\tconf = self.read_config()\r\n\tmask = 0x60 # 0110 0000\r\n\tres = conf & mask # extract resolution from config register\r\n\t# get temperature from register\r\n \r\n self.write('\\x00')\r\n data = self.read(2)\r\n t_raw = struct.unpack('>h', data)\r\n\tt_raw = t_raw[0]\r\n\r\n#\tmsb = 0b11110101\r\n#\tlsb = 0b11100000\r\n#\tdata = struct.pack('BB', msb, lsb)\r\n # t_raw = struct.unpack('>h', data)\r\n#\tt_raw = t_raw[0]\r\n#\tprint t_raw\r\n\t\r\n # return t_raw\r\n\t# t_raw = ((msb << 8) + lsb) # convert to 2 Byte Integer\r\n\r\n\tif (res == 0x00): # 9 bit resolution 0.5 degree\r\n\t print \"res: 0.5\"\r\n\t return (t_raw >> 7) * 0.5\r\n\r\n\tif (res == 0x20): # 10 bit resolution 0.25 degree\r\n\t print \"res: 0.25\"\r\n\t return (t_raw >> 6) * 0.25\r\n\r\n\tif (res == 0x40): # 11 bit resolution 0.125 degree\r\n\t print \"res: 0.125\"\r\n\t return (t_raw >> 5) * 0.125\r\n\r\n\tif (res == 0x60): # l2 bit resolution 0.0625 degree\r\n\t print \"res: 0.0625\"\r\n\t return (t_raw >> 4) * 0.0625", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def thermal_conductivity(temperature):\n a0 = -4.1236\n a1 = 13.788\n a2 = -26.068\n a3 = 26.272\n a4 = -14.663\n a5 = 4.4954\n a6 = -0.6905\n a7 = 0.0397\n log_t = math.log10(temperature)\n f_exp = a0 + a1*log_t + a2*log_t**2.0 + a3*log_t**3.0 + a4*log_t**4.0 + \\\n a5*log_t**5.0 + a6*log_t**6.0 + a7*log_t**7\n g10_thermal_conductivity = 10.0**f_exp\n return g10_thermal_conductivity", "def get_d65_spectrum():\n\n filename = os.path.dirname(os.path.abspath(__file__))\\\n + os.path.normpath(\"/data/d65_spectrum.csv\")\n data = np.loadtxt(filename, delimiter=',', skiprows=1).T\n\n return np.uint16(data[0]), data[1]", "def present_temperature(self):\n return self._read(MX_PRESENT_TEMPERATURE)", "def f2c_file_read_function():\n with open('data.txt', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n F = float(data[-1][-1]) # last item in data should be value\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content", "def C_T(self):\n return self.generic_getter(\n get_sound_speed_temperature, \"C_T\", \"convert_sound_speed\"\n )", "def get_ft_sensor_data(self):\r\n return self._arm.get_ft_sensor_data()", "def get_spectrum(self):\n\n self.sock.send('Q')\n self.sock.send(str(100 * self.center_wl))\n\n response = self.sock.recv(7)\n if not response:\n raise InstrumentError(\n 'No response from Labview client, try reconnecting')\n\n datalen = int(response)\n data = ''\n\n while datalen > 0:\n # read data in chunks\n dt = self.sock.recv(datalen)\n data += dt\n datalen -= len(dt)\n\n data = data.split(\"\\n\")[:-1]\n for i in range(len(data)):\n data[i] = data[i].split(\"\\t\")\n\n data = n.array(data,dtype=float)\n\n wl = data[0]\n ccd = data[1:]\n\n return wl,ccd\n\n #self.sock.close()", "def get_cpu_temperature():\n process = Popen(['vcgencmd', 'measure_temp'], stdout=PIPE)\n output, _error = process.communicate()\n return float(output[output.index('=') + 1:output.rindex(\"'\")])", "def get_freq_details(diagnostics_dir, verbose=False):\n metafile_science = find_metadata_file(diagnostics_dir, 'mslist-scienceData*txt', verbose=False)\n if not metafile_science:\n return None, None, None\n\n with open(metafile_science, 'r') as mslist_file:\n lines = mslist_file.readlines()\n\n in_spw_block = False\n for line in lines:\n if in_spw_block:\n parts = line.split()\n chan_width = float(parts[10])*1000. # convert kHz to Hz\n cfreq = parts[12] #MHz\n nchan = parts[7]\n break\n else:\n in_spw_block = line.find('Frame') >= 0\n\n return chan_width, cfreq, nchan", "def get_TESS_data(filename, fluxtype = 'PDCSAP_FLUX'):\n # Manipulate the fits file:\n data = fits.getdata(filename)\n\n # Identify zero-flux values to take them out of the data arrays:\n idx = np.where((data[fluxtype]!=0.)&(~np.isnan(data[fluxtype])))[0]\n\n # Return median-normalized flux:\n return data['TIME'][idx],data[fluxtype][idx]/np.median(data[fluxtype][idx]), \\\n data[fluxtype+'_ERR'][idx]/np.median(data[fluxtype][idx])", "def read_tph(self):\n resultsTPH = [ 0.0, 0.0, 0.0 ]\n\n self._force_read(False)\n\n tempADC = (self._read_register_1ubyte(self.BME680_TEMP_MSB) << 12) | (self._read_register_1ubyte(self.BME680_TEMP_LSB) << 4) | (self._read_register_1ubyte(self.BME680_TEMP_XLSB) >> 4)\n presADC = (self._read_register_1ubyte(self.BME680_PRESS_MSB) << 12) | (self._read_register_1ubyte(self.BME680_PRESS_LSB) << 4) | (self._read_register_1ubyte(self.BME680_PRESS_XLSB) >> 4)\n humADC = (self._read_register_1ubyte(self.BME680_HUM_MSB) << 8) | (self._read_register_1ubyte(self.BME680_HUM_LSB))\n\n resultsTPH[0] = float(self._compensate_temperature(tempADC))\n resultsTPH[1] = float(self._compensate_pressure(presADC))\n resultsTPH[2] = float(self._compensate_humidity(humADC))\n\n return resultsTPH", "def __getRawTemperature(self):\n t1 = self.read_byte_data(self.address, 0x03)\n t2 = self.read_byte_data(self.address, 0x04)\n t3 = self.read_byte_data(self.address, 0x05)\n t = (t1 << 16) | (t2 << 8) | t3\n t = getTwosComplement(t, 24)\n return t", "def test_str_thermal_conductivity(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"thermal_conductivity\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xC5,\n 0x9C,\n 0x4D,\n 0x22,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -5001.6416015625)\n self.assertEqual(sensor.unit_of_measurement(), \"W/mK\")\n self.assertEqual(sensor.ha_device_class(), None)", "def read_electrons(self):\n for iline, line in enumerate(self.lines):\n fstring = 'number_of_electrons'\n if line.find(fstring) >= 0:\n index_ele = iline + 1\n break\n return float(self.lines[index_ele].split('\\n')[0])", "def read_object_temperatureC(self, ):\n return self._read_temperature(MLX90614_TOBJ1)", "def readWaveform(self):\n # prepare data holder\n y = [ 0 for j in range(4) ]\n # in case of previous errors\n self.flushInput()\n for ch in self.chs:\n # mostly for TDS\n self.setCh(ch)\n # calibration factor we will need soon\n (vmult, voff) = self.calibV()\n # read and calibrate data\n data = (numpy.array(self.readData()) - voff) * vmult\n # This is from the formula in TDS manual, without the\n # \"vzero\" in it---I couldn't figure out when that wouldn't\n # be exactly zero.\n y[ch-1]=data[:]\n\n (hstep, hoff) = self.calibH()\n # initialize time array\n t = numpy.array(range(len(y[0])))\n t = (t * hstep) + hoff\n\n # update the sequence number (... for isUpdated())\n self.seq = self.readSeq()\n\n return (t, y)", "def readAmesDustySpectrum(fname=''):\n print('Reading : ', fname)\n\n # Get the effective temperature, logg and metallicity from the file name\n ind = fname.find('lte')\n fname_tags = fname[ind+3:ind+13].split('-')\n teff = np.float(fname_tags[0]) * 100.\n logg = np.float(fname_tags[1]) * 100.\n mph = np.float(fname_tags[2]) * 100.\n\n wav = []\n inu = []\n bnu = []\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n while dum != '':\n dum = str(dum).replace('D', 'E')\n sdum = dum.split()\n wav.append(np.float(sdum[0]))\n inu.append(np.float(sdum[1]))\n bnu.append(np.float(sdum[2]))\n dum = rfile.readline()\n\n wav = np.array(wav)\n inu = np.array(inu)\n bnu = np.array(bnu)\n ii = wav.argsort()\n\n wav = wav[ii]\n inu = inu[ii]\n bnu = bnu[ii]\n\n # \"Decode\" the intensity arrays\n inu = 10.**(inu - 8.0) * wav\n bnu = 10.**(bnu - 8.0) * wav\n\n # Convert the wavelength to micron from Angstrom\n wav /= 1e4\n nwav = wav.shape[0]\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}", "def getTemperatureMeasurements(self):\n # self.board.readline()\n self.stop = False\n times = []\n temps = [[], [], []]\n \n # A synchronisation string containing the characters tx is sent before each set of measurements,\n # we ensure correct reading of the measurements by waiting for this string\n while str(self.board.readline()).strip('b\\'\\\\rn') != 'tx':\n pass\n \n while not self.stop:\n # A synchronisation string containing the characters tx is sent before each set of measurements\n tx = self.board.readline()\n if str(tx).strip('b\\'\\\\rn') == 'tx':\n rawData1 = self.board.readline()\n rawData2 = self.board.readline()\n rawData3 = self.board.readline()\n rawData4 = self.board.readline()\n \n \n timeStamp = str(rawData1).strip('b\\'\\\\rn')\n temp1 = str(rawData2).strip('b\\'\\\\rn')\n temp2 = str(rawData3).strip('b\\'\\\\rn')\n temp3 = str(rawData4).strip('b\\'\\\\rn')\n try:\n times.append(float(timeStamp) / 1000)\n temps[0].append(float(temp1) / 128)\n temps[1].append(float(temp2) / 128)\n temps[2].append(float(temp3) / 128)\n # print(f'\\rtime: {float(timeStamp) / 1000:.2f} s, Temperature measured on sensor 1: {float(temp1) / 128:.2f} °C,'\n # f'sensor 2: {float(temp2) / 128:.2f} °C, sensor 3: {float(temp3) / 128:.2f} °C', sep='', end='', flush=True)\n except:\n print(rawData1, rawData2, rawData3, rawData4)\n \n \n if self.stop:\n print('\\nMeasurement finished...')\n \n self.data_stack[self.fetch_kinds[0]] = times\n self.data_stack[self.fetch_kinds[1]] = temps[0]\n self.data_stack[self.fetch_kinds[2]] = temps[1]\n self.data_stack[self.fetch_kinds[3]] = temps[2]\n \n if (len(self.data_stack['Sensor 1 Temp']) != len(times) or len(self.data_stack['Sensor 2 Temp']) != len(times) or len(self.data_stack['Sensor 3 Temp']) != len(times)):\n print(\"Warning: There may be some missing values!\")", "def __getTemperatureCalibrationCoefficients(self):\n src10 = self.read_byte_data(self.address, 0x10)\n src11 = self.read_byte_data(self.address, 0x11)\n src12 = self.read_byte_data(self.address, 0x12)\n c0 = (src10 << 4) | (src11 >> 4)\n c0 = getTwosComplement(c0, 12)\n c1 = ((src11 & 0x0F) << 8) | src12\n c1 = getTwosComplement(c1, 12)\n return c0, c1", "def read_object_temperatureF(self, ):\n return self.read_object_temperatureC() * (9.0/5.0) + 32.0", "def GetTempCPU():\n tPath = \"/sys/class/thermal/thermal_zone0/temp\"\n tFile = open(tPath)\n temp = tFile.read()\n tFile.close()\n return (float(temp)*0.0018 + 32)", "def read_tph(self):\n resultsTPH = [ 0.0, 0.0, 0.0 ]\n\n tRaw = self._read_multiple_bytes_as_array(self.BME280_TEMP_MSB, 3)\n pRaw = self._read_multiple_bytes_as_array(self.BME280_PRESS_MSB, 3)\n hRaw = self._read_multiple_bytes_as_array(self.BME280_HUM_MSB, 2)\n\n resultsTPH[0] = float(self._compensate_temperature((tRaw[0] << 12) + (tRaw[1] << 4) + (tRaw[2] >> 4)))\n resultsTPH[1] = float(self._compensate_pressure((pRaw[0] << 12) + (pRaw[1] << 4) + (pRaw[2] >> 4)))\n resultsTPH[2] = float(self._compensate_humidity(hRaw[0] << 8) + hRaw[1])\n\n return resultsTPH", "def read_core_vbat(self) -> float:", "def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646", "def internal_temp_c(self) -> int:\n return int(self._device_info[\"Temperature\"])", "def Temperature(self):\n try:\n self.__acqiris_QuantroDLL1.Temperature(self.__instrumentID,byref(self.__temperature),c_bool(True))\n except:\n print \"Could not read temperature\"\n self.__temperature=c_int32(-1)\n #self.notify(\"temperature\",self.__temperature.value) # possible automatic notification to a Frontpanel\n return self.__temperature.value", "def read_ambient_temperatureF(self, ):\n return self.read_ambient_temperatureC() * (9.0/5.0) + 32.0", "def readsrfdis96_old_stable(stdout, waves, types, modes, freqs):\n\n # try:\n # A = np.genfromtxt(StringIO(unicode(stdout)), dtype = float)\n # except:\n # raise CPiSError('could not understand stdout \\n%s' % stdout)\n\n A = np.asarray((\" \".join(stdout.strip().rstrip('\\n').split('\\n'))).split(), float)\n A = A.reshape((len(A) / 6, 6))\n\n # A[:, 0] (itst) wave type 1 = Love, 2 = Rayleigh\n # A[:, 1] (iq-1) mode number, 0 = fundamental\n # A[:, 2] (t1a) t1 if phase only else lower period = t1/(1+h), in s\n # A[:, 3] (t1b) 0. if phase only else upper period = t1/(1-h), in s;\n # A[:, 4] (cc0) phase velocity at t1 if phase only else at t1a, in km/s;\n # A[:, 5] (cc1) phase velocity at t1 if phase only else at t1b, in km/s;\n\n W = A[:, 0]\n M = A[:, 1]\n I = A[:, 3] == 0. # True means phase only\n nI = ~I\n n = A.shape[0]\n T, C, U = np.zeros(n, float), np.zeros(n, float), np.zeros(n, float) * np.nan\n\n if I.any():\n T[I] = A[I,2]\n C[I] = A[I,4]\n\n if nI.any():\n # np.sqrt(A[nI,2] * A[nI,3]) #Jeffreys average #A[nI,2:4].mean(axis = 1)\n T[nI] = A[nI, 2] * A[nI, 3] / (A[nI, 2:4].mean(axis=1))\n\n C[nI] = np.sqrt(A[nI,4] * A[nI,5]) # Jeffreys average # A[nI,4:6].mean(axis = 1)\n\n LnI = (log(A[nI,5]) - log(A[nI,4])) / (log(A[nI,2]) - log(A[nI,3]))\n U[nI] = C[nI] / (1. - LnI)\n\n L, R = (W == 1), (W == 2)\n umodes = np.arange(max(modes) + 1)\n RMs = [R & (M == m) for m in umodes]\n LMs = [L & (M == m) for m in umodes]\n RMs = [rms if rms.any() else None for rms in RMs]\n LMs = [lms if lms.any() else None for lms in LMs]\n\n values = np.zeros(len(waves), float) * np.nan\n for n, (w, t, m, f) in enumerate(zip(waves, types, modes, freqs)):\n\n if w == \"R\":\n S = RMs[m]\n else:\n S = LMs[m] # elif w == \"L\"\n if S is None:\n continue\n\n p = 1. / f\n TS = T[S]\n iS = np.abs(TS - p).argmin()\n per = TS[iS]\n if abs(per - p) / p > 0.01: continue\n\n if t == 'C':\n val = C[S][iS]\n\n else:\n val = U[S][iS] #elif t == \"U\"\n # else:\n # raise ValueError('')\n # if val <= 0: #NON, je met une penalite dans la fonction cout\n # raise CPiSError('encountered negative dispersion velocity')\n\n values[n] = val\n return values", "def get_cpu_temperature():\n\n output = \"\"\n\n try:\n process = Popen(['vcgencmd', 'measure_temp'], stdout=PIPE)\n output, _error = process.communicate()\n output = str(output)\n except Exception:\n logger.error(\"Exception while reading cpu temp\")\n\n float_temp = 0\n\n try:\n float_temp = float(output[output.index('=') + 1:output.rindex(\"'\")])\n if debug==True:\n logger.info(\"Temp: \" + str(float_temp))\n except ValueError:\n logger.error(\"Temp value is not float\")\n\n return float_temp", "def getTEMP(self):\r\n\t\ttemp_H = self.read(0x41)\r\n\t\ttemp_L = self.read(0x42)\r\n\t\ttemp = self.twos_comp(val = (temp_H*256 + temp_L),bits=16)\r\n\t\ttempC = (temp/340.0)+36.53\r\n\t\ttempF = tempC*(9.0/5) + 32\r\n\t\treturn tempC,tempF ,temp", "def readHAL_refTemp(self):\r\n return self.hal['ref-temp']", "def _read_coefficients(self):\r\n coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24)\r\n coeff = list(struct.unpack('<HhhHhhhhhhhh', bytes(coeff)))\r\n coeff = [float(i) for i in coeff]\r\n self._temp_calib = coeff[:3]\r\n self._pressure_calib = coeff[3:]\r\n\r\n self._humidity_calib = [0]*6\r\n self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1)\r\n coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7)\r\n coeff = list(struct.unpack('<hBBBBb', bytes(coeff)))\r\n self._humidity_calib[1] = float(coeff[0])\r\n self._humidity_calib[2] = float(coeff[1])\r\n self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))\r\n self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))\r\n self._humidity_calib[5] = float(coeff[5])", "def get_dtc(self):\n r = self.sensor(1)\n num = r[0]\n # get all DTC, 3 per mesg response\n self.send_command(GET_DTC_COMMAND)\n #for i in range(0, ceil(num/3.0)):\n res = self.get_result()\n print res\n return res\n # fixme: finish", "def thermodynamic_temperature(frequency, T_cmb=None):\n nu = frequency.to(si.GHz, spectral())\n\n if T_cmb is None:\n from astropy.cosmology import default_cosmology\n\n T_cmb = default_cosmology.get().Tcmb0\n\n def f(nu, T_cmb=T_cmb):\n x = _si.h * nu / _si.k_B / T_cmb\n return x**2 * np.exp(x) / np.expm1(x) ** 2\n\n def convert_Jy_to_K(x_jybm):\n factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(\n astrophys.Jy\n )\n return x_jybm / factor\n\n def convert_K_to_Jy(x_K):\n factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value(\n si.K\n )\n return x_K / factor\n\n return Equivalency(\n [(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)],\n \"thermodynamic_temperature\",\n {\"frequency\": frequency, \"T_cmb\": T_cmb},\n )", "def read_spe(spefilename, verbose=False):\n \n # open SPE file as binary input\n spe = open(spefilename, \"rb\")\n \n # Header length is a fixed number\n nBytesInHeader = 4100\n\n # Read the entire header\n header = spe.read(nBytesInHeader)\n \n # version of WinView used\n swversion = struct.unpack_from(\"16s\", header, offset=688)[0]\n \n # version of header used\n # Eventually, need to adjust the header unpacking\n # based on the headerVersion. \n headerVersion = struct.unpack_from(\"f\", header, offset=1992)[0]\n \n # which camera controller was used?\n controllerVersion = struct.unpack_from(\"h\", header, offset=0)[0]\n if verbose:\n print (\"swversion = \", swversion)\n print (\"headerVersion = \", headerVersion)\n print (\"controllerVersion = \", controllerVersion)\n \n # Date of the observation\n # (format is DDMONYYYY e.g. 27Jan2009)\n date = struct.unpack_from(\"9s\", header, offset=20)[0]\n \n # Exposure time (float)\n exp_sec = struct.unpack_from(\"f\", header, offset=10)[0]\n \n # Intensifier gain\n pimaxGain = struct.unpack_from(\"h\", header, offset=148)[0]\n\n # Not sure which \"gain\" this is\n gain = struct.unpack_from(\"H\", header, offset=198)[0]\n \n # Data type (0=float, 1=long integer, 2=integer, 3=unsigned int)\n data_type = struct.unpack_from(\"h\", header, offset=108)[0]\n\n comments = struct.unpack_from(\"400s\", header, offset=200)[0]\n\n # CCD Chip Temperature (Degrees C)\n detectorTemperature = struct.unpack_from(\"f\", header, offset=36)[0]\n\n # The following get read but are not used\n # (this part is only lightly tested...)\n analogGain = struct.unpack_from(\"h\", header, offset=4092)[0]\n noscan = struct.unpack_from(\"h\", header, offset=34)[0]\n pimaxUsed = struct.unpack_from(\"h\", header, offset=144)[0]\n pimaxMode = struct.unpack_from(\"h\", header, offset=146)[0]\n\n ########### here's from Kasey\n #int avgexp 2 number of accumulations per scan (why don't they call this \"accumulations\"?)\n #TODO: this isn't actually accumulations, so fix it... \n accumulations = struct.unpack_from(\"h\", header, offset=668)[0]\n if accumulations == -1:\n # if > 32767, set to -1 and \n # see lavgexp below (668) \n #accumulations = struct.unpack_from(\"l\", header, offset=668)[0]\n # or should it be DWORD, NumExpAccums (1422): Number of Time experiment accumulated \n accumulations = struct.unpack_from(\"l\", header, offset=1422)[0]\n \n \"\"\"Start of X Calibration Structure (although I added things to it that I thought were relevant,\n like the center wavelength...\"\"\"\n xcalib = {}\n \n #SHORT SpecAutoSpectroMode 70 T/F Spectrograph Used\n xcalib['SpecAutoSpectroMode'] = bool( struct.unpack_from(\"h\", header, offset=70)[0] )\n\n #float SpecCenterWlNm # 72 Center Wavelength in Nm\n xcalib['SpecCenterWlNm'] = struct.unpack_from(\"f\", header, offset=72)[0]\n \n #SHORT SpecGlueFlag 76 T/F File is Glued\n xcalib['SpecGlueFlag'] = bool( struct.unpack_from(\"h\", header, offset=76)[0] )\n\n #float SpecGlueStartWlNm 78 Starting Wavelength in Nm\n xcalib['SpecGlueStartWlNm'] = struct.unpack_from(\"f\", header, offset=78)[0]\n\n #float SpecGlueEndWlNm 82 Starting Wavelength in Nm\n xcalib['SpecGlueEndWlNm'] = struct.unpack_from(\"f\", header, offset=82)[0]\n\n #float SpecGlueMinOvrlpNm 86 Minimum Overlap in Nm\n xcalib['SpecGlueMinOvrlpNm'] = struct.unpack_from(\"f\", header, offset=86)[0]\n\n #float SpecGlueFinalResNm 90 Final Resolution in Nm\n xcalib['SpecGlueFinalResNm'] = struct.unpack_from(\"f\", header, offset=90)[0]\n\n # short BackGrndApplied 150 1 if background subtraction done\n xcalib['BackgroundApplied'] = struct.unpack_from(\"h\", header, offset=150)[0]\n BackgroundApplied=False\n if xcalib['BackgroundApplied']==1: BackgroundApplied=True\n\n # float SpecGrooves 650 Spectrograph Grating Grooves\n xcalib['SpecGrooves'] = struct.unpack_from(\"f\", header, offset=650)[0]\n\n # short flatFieldApplied 706 1 if flat field was applied.\n xcalib['flatFieldApplied'] = struct.unpack_from(\"h\", header, offset=706)[0]\n flatFieldApplied=False\n if xcalib['flatFieldApplied']==1: flatFieldApplied=True\n \n #double offset # 3000 offset for absolute data scaling */\n xcalib['offset'] = struct.unpack_from(\"d\", header, offset=3000)[0]\n\n #double factor # 3008 factor for absolute data scaling */\n xcalib['factor'] = struct.unpack_from(\"d\", header, offset=3008)[0]\n \n #char current_unit # 3016 selected scaling unit */\n xcalib['current_unit'] = struct.unpack_from(\"c\", header, offset=3016)[0]\n\n #char reserved1 # 3017 reserved */\n xcalib['reserved1'] = struct.unpack_from(\"c\", header, offset=3017)[0]\n\n #char string[40] # 3018 special string for scaling */\n xcalib['string'] = struct.unpack_from(\"40c\", header, offset=3018)\n \n #char reserved2[40] # 3058 reserved */\n xcalib['reserved2'] = struct.unpack_from(\"40c\", header, offset=3058)\n\n #char calib_valid # 3098 flag if calibration is valid */\n xcalib['calib_valid'] = struct.unpack_from(\"c\", header, offset=3098)[0]\n\n #char input_unit # 3099 current input units for */\n xcalib['input_unit'] = struct.unpack_from(\"c\", header, offset=3099)[0]\n \"\"\"/* \"calib_value\" */\"\"\"\n\n #char polynom_unit # 3100 linear UNIT and used */\n xcalib['polynom_unit'] = struct.unpack_from(\"c\", header, offset=3100)[0]\n \"\"\"/* in the \"polynom_coeff\" */\"\"\"\n\n #char polynom_order # 3101 ORDER of calibration POLYNOM */\n xcalib['polynom_order'] = struct.unpack_from(\"c\", header, offset=3101)[0]\n\n #char calib_count # 3102 valid calibration data pairs */\n xcalib['calib_count'] = struct.unpack_from(\"c\", header, offset=3102)[0]\n\n #double pixel_position[10];/* 3103 pixel pos. of calibration data */\n xcalib['pixel_position'] = struct.unpack_from(\"10d\", header, offset=3103)\n\n #double calib_value[10] # 3183 calibration VALUE at above pos */\n xcalib['calib_value'] = struct.unpack_from(\"10d\", header, offset=3183)\n\n #double polynom_coeff[6] # 3263 polynom COEFFICIENTS */\n xcalib['polynom_coeff'] = struct.unpack_from(\"6d\", header, offset=3263)\n\n #double laser_position # 3311 laser wavenumber for relativ WN */\n xcalib['laser_position'] = struct.unpack_from(\"d\", header, offset=3311)[0]\n\n #char reserved3 # 3319 reserved */\n xcalib['reserved3'] = struct.unpack_from(\"c\", header, offset=3319)[0]\n\n #unsigned char new_calib_flag # 3320 If set to 200, valid label below */\n #xcalib['calib_value'] = struct.unpack_from(\"BYTE\", header, offset=3320)[0] # how to do this?\n\n #char calib_label[81] # 3321 Calibration label (NULL term'd) */\n xcalib['calib_label'] = struct.unpack_from(\"81c\", header, offset=3321)\n\n #char expansion[87] # 3402 Calibration Expansion area */\n xcalib['expansion'] = struct.unpack_from(\"87c\", header, offset=3402)\n ########### end of Kasey's addition\n\n if verbose:\n print (\"date = [\"+date+\"]\")\n print (\"exp_sec = \", exp_sec)\n print (\"pimaxGain = \", pimaxGain)\n print (\"gain (?) = \", gain)\n print (\"data_type = \", data_type)\n print (\"comments = [\"+comments+\"]\")\n print (\"analogGain = \", analogGain)\n print (\"noscan = \", noscan)\n print (\"detectorTemperature [C] = \", detectorTemperature)\n print (\"pimaxUsed = \", pimaxUsed)\n\n # Determine the data type format string for\n # upcoming struct.unpack_from() calls\n if data_type == 0:\n # float (4 bytes)\n dataTypeStr = \"f\" #untested\n bytesPerPixel = 4\n dtype = \"float32\"\n elif data_type == 1:\n # long (4 bytes)\n dataTypeStr = \"l\" #untested\n bytesPerPixel = 4\n dtype = \"int32\"\n elif data_type == 2:\n # short (2 bytes)\n dataTypeStr = \"h\" #untested\n bytesPerPixel = 2\n dtype = \"int32\"\n elif data_type == 3: \n # unsigned short (2 bytes)\n dataTypeStr = \"H\" # 16 bits in python on intel mac\n bytesPerPixel = 2\n dtype = \"int32\" # for numpy.array().\n # other options include:\n # IntN, UintN, where N = 8,16,32 or 64\n # and Float32, Float64, Complex64, Complex128\n # but need to verify that pyfits._ImageBaseHDU.ImgCode cna handle it\n # right now, ImgCode must be float32, float64, int16, int32, int64 or uint8\n else:\n print (\"unknown data type\")\n print (\"returning...\")\n sys.exit()\n \n # Number of pixels on x-axis and y-axis\n nx = struct.unpack_from(\"H\", header, offset=42)[0]\n ny = struct.unpack_from(\"H\", header, offset=656)[0]\n \n # Number of image frames in this SPE file\n nframes = struct.unpack_from(\"l\", header, offset=1446)[0]\n\n if verbose:\n print (\"nx, ny, nframes = \", nx, \", \", ny, \", \", nframes)\n \n npixels = nx*ny\n npixStr = str(npixels)\n fmtStr = npixStr+dataTypeStr\n if verbose:\n print (\"fmtStr = \", fmtStr)\n \n # How many bytes per image?\n nbytesPerFrame = npixels*bytesPerPixel\n if verbose:\n print (\"nbytesPerFrame = \", nbytesPerFrame)\n\n # Create a dictionary that holds some header information\n # and contains a placeholder for the image data\n spedict = {'data':[], # can have more than one image frame per SPE file\n 'IGAIN':pimaxGain,\n 'EXPOSURE':exp_sec,\n 'SPEFNAME':spefilename,\n 'OBSDATE':date,\n 'CHIPTEMP':detectorTemperature,\n 'COMMENTS':comments,\n 'XCALIB':xcalib,\n 'ACCUMULATIONS':accumulations,\n 'FLATFIELD':flatFieldApplied,\n 'BACKGROUND':BackgroundApplied\n }\n \n # Now read in the image data\n # Loop over each image frame in the image\n if verbose:\n print (\"Reading image frames number \"),\n for ii in range(nframes):\n iistr = str(ii)\n data = spe.read(nbytesPerFrame)\n if verbose:\n print (iistr,\" \",)\n \n # read pixel values into a 1-D numpy array. the \"=\" forces it to use\n # standard python datatype size (4bytes for 'l') rather than native\n # (which on 64bit is 8bytes for 'l', for example).\n # See http://docs.python.org/library/struct.html\n dataArr = np.array(struct.unpack_from(\"=\"+fmtStr, data, offset=0),\n dtype=dtype)\n\n # Resize array to nx by ny pixels\n # notice order... (y,x)\n dataArr.resize((ny, nx))\n #print dataArr.shape\n\n # Push this image frame data onto the end of the list of images\n # but first cast the datatype to float (if it's not already)\n # this isn't necessary, but shouldn't hurt and could save me\n # from doing integer math when i really meant floating-point...\n spedict['data'].append( dataArr.astype(float) )\n\n if verbose:\n print (\"\")\n \n return spedict", "def tetracam_modtran(tape7, reflectance, transmission1, transmission2, transmission3, transmission4, transmission5, transmission6):\n\n #Read in tape7.scn\n f = open(tape7, 'r')\n wavelengths = []\n TOTAL_RAD = []\n\n i=0\n for line in f:\n if i < 11:\n pass\n elif i > 224:\n pass\n else:\n data = line.rstrip().split(' ')\n wavelengths.append(data[4])\n TOTAL_RAD.append(data[23])\n i += 1\n\n #Make gathered data into arrays that can be used for calculations\n TOTAL_RAD = numpy.asarray(TOTAL_RAD).astype(numpy.float64)\n wavelengths = numpy.asarray(wavelengths).astype(numpy.float64) * 1000 #get to [nm]\n\n #Read in reflectance spectra\n spectrum = color.SpectrumFactory.create_from_file(reflectance,\n color.SAMPLE_REFLECTANCE)\n\n reflectance = spectrum.values\n refWavelengths = spectrum.wavelengths\n\n #Read in filter 1 transmission\n f3 = open(transmission1, 'r')\n t1Wavelengths = []\n t1Transmission = []\n lines = f3.readlines()\n data = lines[0].rstrip().split('\\r')\n for i in range(len(data)):\n d = data[i].rstrip().split(',')\n t1Wavelengths.append(d[0])\n t1Transmission.append(d[1])\n\n #Make gathered data into arrays that can be used for calculations\n t1Wavelengths = numpy.asarray(t1Wavelengths).astype(numpy.float64)\n t1Transmission = numpy.asarray(t1Transmission).astype(numpy.float64)\n\n #Read in filter 2 transmission\n f4 = open(transmission2, 'r')\n t2Wavelengths = []\n t2Transmission = []\n lines = f4.readlines()\n data = lines[0].rstrip().split('\\r')\n for i in range(len(data)):\n d = data[i].rstrip().split(',')\n t2Wavelengths.append(d[0])\n t2Transmission.append(d[1])\n\n #Make gathered data into arrays that can be used for calculations\n t2Wavelengths = numpy.asarray(t2Wavelengths).astype(numpy.float64)\n t2Transmission = numpy.asarray(t2Transmission).astype(numpy.float64)\n\n #Read in filter 3 transmission\n f5 = open(transmission3, 'r')\n t3Wavelengths = []\n t3Transmission = []\n lines = f5.readlines()\n data = lines[0].rstrip().split('\\r')\n for i in range(len(data)):\n d = data[i].rstrip().split(',')\n t3Wavelengths.append(d[0])\n t3Transmission.append(d[1])\n\n #Make gathered data into arrays that can be used for calculations\n t3Wavelengths = numpy.asarray(t3Wavelengths).astype(numpy.float64)\n t3Transmission = numpy.asarray(t3Transmission).astype(numpy.float64)\n\n #Read in filter 4 transmission\n f6 = open(transmission4, 'r')\n t4Wavelengths = []\n t4Transmission = []\n lines = f6.readlines()\n data = lines[0].rstrip().split('\\r')\n for i in range(len(data)):\n d = data[i].rstrip().split(',')\n t4Wavelengths.append(d[0])\n t4Transmission.append(d[1])\n\n #Make gathered data into arrays that can be used for calculations\n t4Wavelengths = numpy.asarray(t4Wavelengths).astype(numpy.float64)\n t4Transmission = numpy.asarray(t4Transmission).astype(numpy.float64)\n\n #Read in filter 5 transmission\n f7 = open(transmission5, 'r')\n t5Wavelengths = []\n t5Transmission = []\n lines = f7.readlines()\n data = lines[0].rstrip().split('\\r')\n for i in range(len(data)):\n d = data[i].rstrip().split(',')\n t5Wavelengths.append(d[0])\n t5Transmission.append(d[1])\n\n #Make gathered data into arrays that can be used for calculations\n t5Wavelengths = numpy.asarray(t5Wavelengths).astype(numpy.float64)\n t5Transmission = numpy.asarray(t5Transmission).astype(numpy.float64)\n\n #Read in filter 6 transmission\n f8 = open(transmission6, 'r')\n t6Wavelengths = []\n t6Transmission = []\n lines = f8.readlines()\n data = lines[0].rstrip().split('\\r')\n for i in range(len(data)):\n d = data[i].rstrip().split(',')\n t6Wavelengths.append(d[0])\n t6Transmission.append(d[1])\n\n #Make gathered data into arrays that can be used for calculations\n t6Wavelengths = numpy.asarray(t6Wavelengths).astype(numpy.float64)\n t6Transmission = numpy.asarray(t6Transmission).astype(numpy.float64)\n\n masterWavelengths = numpy.arange(334.0, 2510.0, 0.1)\n\n TOTAL_RAD = numerical.interpolate.interp1(wavelengths,\n TOTAL_RAD,\n masterWavelengths,\n order=1,\n extrapolate=True)\n\n reflectance = numerical.interpolate.interp1(refWavelengths,\n reflectance,\n masterWavelengths,\n order=1,\n extrapolate=True)\n\n t1Transmission = numerical.interpolate.interp1(t1Wavelengths,\n t1Transmission,\n masterWavelengths,\n order=1,\n extrapolate=False)\n\n t2Transmission = numerical.interpolate.interp1(t2Wavelengths,\n t2Transmission,\n masterWavelengths,\n order=1,\n extrapolate=False)\n\n t3Transmission = numerical.interpolate.interp1(t3Wavelengths,\n t3Transmission,\n masterWavelengths,\n order=1,\n extrapolate=False)\n\n t4Transmission = numerical.interpolate.interp1(t4Wavelengths,\n t4Transmission,\n masterWavelengths,\n order=1,\n extrapolate=False)\n\n t5Transmission = numerical.interpolate.interp1(t5Wavelengths,\n t5Transmission,\n masterWavelengths,\n order=1,\n extrapolate=False)\n\n t6Transmission = numerical.interpolate.interp1(t6Wavelengths,\n t6Transmission,\n masterWavelengths,\n order=1,\n extrapolate=False)\n\n radiance = TOTAL_RAD * reflectance\n\n radiance1 = t1Transmission * radiance\n radiance2 = t2Transmission * radiance\n radiance3 = t3Transmission * radiance\n radiance4 = t4Transmission * radiance\n radiance5 = t5Transmission * radiance\n radiance6 = t6Transmission * radiance\n\n integratedRadiance1 = numpy.trapz(radiance1,x=masterWavelengths) #B\n integratedRadiance2 = numpy.trapz(radiance2,x=masterWavelengths) #G\n integratedRadiance3 = numpy.trapz(radiance3,x=masterWavelengths) #R\n integratedRadiance4 = numpy.trapz(radiance4,x=masterWavelengths) #IR 720\n integratedRadiance5 = numpy.trapz(radiance5,x=masterWavelengths) #IR 800\n integratedRadiance6 = numpy.trapz(radiance6,x=masterWavelengths) #IR 900\n\n ndvi = (integratedRadiance5 - integratedRadiance3) / \\\n float(integratedRadiance5 + integratedRadiance3)\n\n print ndvi\n\n return masterWavelengths, TOTAL_RAD, reflectance, radiance, radiance1, \\\n radiance2, radiance3, radiance4, radiance5, radiance6, \\\n t1Transmission, t2Transmission, t3Transmission, t4Transmission, \\\n t5Transmission, t6Transmission", "def get_brightnesstemperature(self, channel):\n K1 = {\n \"10\": 3040.136402, # Constant K1 [W m-2 um-1].\n \"11\": 2482.375199,\n \"12\": 1935.060183,\n \"13\": 866.468575,\n \"14\": 641.326517,\n }\n\n K2 = {\n \"10\": 1735.337945, # Constant K2 [K].\n \"11\": 1666.398761,\n \"12\": 1585.420044,\n \"13\": 1350.069147,\n \"14\": 1271.221673,\n }\n\n return K2[channel] / np.log((K1[channel] / self.get_radiance(channel)) + 1)", "def temperature() -> float:", "def temp(self):\n if self.temp_sensor is None:\n return None\n else:\n if self.temp_scale.lower() in ['f', 'fahrenheit']:\n return self.temp_sensor.temp_f\n elif self.temp_scale.lower() in ['c', 'celsius']:\n return self.temp_sensor.temp_c", "def get_temperature(self):\n pass", "def hp34401a_read_voltage(hp_meter):\n hp_meter.write(\"MEAS:VOLT:DC? DEF,DEF\")\n return float(hp_meter.read())", "def compute_td_spectral_function(self):\n nomegase = self.nomegase\n nkpt = self.nkpt\n nband = self.nband\n ntemp = self.ntemp\n\n self.spectral_function_T = np.zeros((nomegase, ntemp, nkpt, nband),\n dtype=float)\n\n omega = np.einsum('ijt,l->ijlt',\n np.ones((nkpt, nband, ntemp)), self.omegase)\n\n self.spectral_function_T = (\n (1 / np.pi) * np.abs(self.self_energy_T.imag) /\n ((omega - self.self_energy_T.real) ** 2\n + self.self_energy_T.imag ** 2)\n )", "def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)", "def _therm_cond(self):\n xy = dict() # used to label the components e.g 1->CO2,2->N2\n for (i, j) in enumerate(self.component_list, 1):\n xy[i] = j\n\n k_vap = 0\n for i in range(1, len(self.component_list) + 1):\n sumij = 0\n for j in range(1, len(self.component_list) + 1):\n Aij = (1 + (self.visc_d_comp[xy[i]] / self.visc_d_comp[xy[j]])**0.5 *\n (self.mw_comp[xy[j]] / self.mw_comp[xy[i]])**0.25)**2 *\\\n (8 * (1 + self.mw_comp[xy[i]] / self.mw_comp[xy[j]]))**-0.5\n sumij += self.mole_frac_comp[xy[j]] * Aij\n k_vap += self.mole_frac_comp[xy[i]] * self.therm_cond_comp[xy[i]] / sumij\n\n try:\n self.therm_cond = Expression(expr=k_vap,\n doc='Vapor thermal'\n 'conductivity [J/(m.K.s)]')\n except AttributeError:\n self.del_component(self.therm_cond)\n raise", "def get_temperature(self):\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_GET_TEMPERATURE, (), '', 'i')", "def one_transition_spectrum_cd(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n rr = tr[\"rr\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = rr*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def therm_Es(self):\n try:\n return self._therm_Es\n except AttributeError:\n try:\n self._therm_Es = pd.read_csv(\n os.path.join(self.loc, self.thermo_out),\n index_col=0)\n print(\"Reading thermodynamic values from %s.\"\n % self.thermo_out)\n except OSError:\n self._therm_Es = self.thermo_all()\n return self._therm_Es", "def get_thermo(self, structure):\n dir = IO(dir=os.path.join(self.loc, structure))\n values = dir.get_values(\n structure, \"freq.log\", self.freqvalid, self.thermo_vals)\n \n return values", "def read_voltage(self):\n self.write(':FETC?')\n msg = self.read()\n #print ('dmm msg = ', msg)\n v = msg.split(',')[0].rstrip('NVDC').strip()\n if v[-1] == 'R':\n return float(v[:-1])\n else:\n return float(v)", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def get_internal_energy(filename):\n # --------------- helper functions --------------- #\n def parse_data(block):\n \"\"\"\n Parse the line(s) to get the data.\n \"\"\"\n rval = {\n 'Total' : None,\n 'Electronic' : None,\n 'Translational' : None,\n 'Rotational' : None,\n 'Vibrational' : None\n }\n for line in block.splitlines():\n if re.match(r'^\\s*Total', line):\n key = 'Total'\n elif re.match(r'^\\s*Electronic', line):\n key = 'Electronic'\n elif re.match(r'^\\s*Translational', line):\n key = 'Translational'\n elif re.match(r'^\\s*Rotational', line):\n key = 'Rotational'\n elif re.match(r'^\\s*Vibrational', line):\n key = 'Vibrational'\n else:\n key = None\n if key:\n words = line.strip().split()\n try:\n rval[key] = float(words[1])\n except ValueError:\n raise ValueError('Invalid thermodynamic format.')\n return rval\n # ------------- end helper functions ------------- #\n # open the file, if a string\n if isinstance(filename, str):\n ifs = open(filename, 'r')\n else:\n ifs = filename\n # extract the relevent lines\n start = r'^\\s*E\\s+\\(Thermal\\)'\n stop = r'^\\s*Vibrational'\n rre = RegexRangeExtractor(start, stop,\n include_start=True,\n include_stop=True)\n block = rre(ifs)[0]\n # close file\n if ifs is not filename:\n ifs.close()\n # parse data\n #+ single value/file\n rval = parse_data(block)\n return rval", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def ReadData(self, tstep):\n fname = self.fname[tstep]\n t0 = self.tind[tstep]\n \n print 'Reading SUNTANS data at time: %s...'%datetime.strftime(self.timei[tstep],'%Y-%m-%d %H:%M:%S') \n nc = Dataset(fname)\n \n self.time = nc.variables['time'][t0]\n \n self.temp = nc.variables['temp'][t0,:,:]\n self.salt = nc.variables['salt'][t0,:,:]\n self.uc = nc.variables['uc'][t0,:,:]\n self.vc = nc.variables['vc'][t0,:,:]\n self.nu_v = nc.variables['nu_v'][t0,:,:]\n self.rho = nc.variables['rho'][t0,:,:]\n self.tau_x = nc.variables['tau_x'][t0,:]\n self.tau_y = nc.variables['tau_y'][t0,:]\n self.eta = nc.variables['eta'][t0,:]", "def read_temp(self):\n return 19.0\n data = self.read(_TEMP_REG, 2)\n temp = ((data[0] * 256) + data[1]) / 16\n if temp > 2047:\n temp -= 4096\n return temp * 0.0625", "def read_values(self):\n temp, acc, gyro = self.read_ag_data()\n tempc = lsm9ds1.TEMPC_0 + temp * lsm9ds1.TEMP_SENSOR_SCALE\n tempf = (tempc * 9/5) + 32\n acc = [c * lsm9ds1.ACC_SENSOR_SCALE for c in acc]\n gyro = [g * lsm9ds1.DPS_SENSOR_SCALE for g in gyro]\n return tempf, acc, gyro", "def test_get_dcTemperature(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, DC_TEMPERATURE_IDX, DC_TEMPERATURE_SUB, to_plc=True)\n param_obj = self.__dict__[servo_type]._get_dcTemperature()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in dcTemperature...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def spectrum(self):\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return spectrum", "def getTemperature(self):\n return self.temperature", "def get_data():\n loopstate = get_loopstate()\n loudness = grovepi.analogRead(LOUDNESS_SENSOR)\n [temp, hum] = grovepi.dht(TEMP_HUM_SENSOR, module_type=0)\n return [loopstate, loudness, temp, hum]", "def get_all_thermals(self):\n return self._thermal_list", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def get_temps(self):\n try:\n cmos = self.cmos_temp\n except Exception:\n cmos = None\n try:\n pcb = self.pcb_temp\n except Exception:\n pcb = None\n return cmos, pcb", "def thermal_state(self, T):\n return unvectorize(\n np.diag(thermal_dist(t, self.ev)) \\\n .astype(settings.DTYPE_COMPLEX)\n for t in vectorize(T)\n )", "def digital_temp_data(self): # This function will give the initial digital format for temperature data \n self._bus.write_byte(self._addr, 0x58) \n time.sleep(0.05) \n tempadcbytes = self._bus.read_i2c_block_data(self._addr, 0x00) \n time.sleep(0.05) \n self.tempadc=tempadcbytes[0]*65536.0+tempadcbytes[1]*256.0+tempadcbytes[2]", "def read_ceilometer_file(self, calibration_factor: float | None = None) -> None:\n header, data_lines = self._read_common_header_part()\n header.append(self._read_header_line_3(data_lines[3]))\n self.metadata = self._handle_metadata(header)\n self.data[\"range\"] = self._calc_range()\n hex_profiles = self._parse_hex_profiles(data_lines[4:20])\n self.data[\"beta_raw\"] = self._read_backscatter(hex_profiles)\n self.data[\"calibration_factor\"] = calibration_factor or 1.0\n self.data[\"beta_raw\"] *= self.data[\"calibration_factor\"]\n self.data[\"zenith_angle\"] = np.median(self.metadata[\"zenith_angle\"])", "def getAllSpectrumMeasurements(self): \n return self.spectrum", "def temperature(self):\n temp = ct.c_float()\n self.lib.GetTemperatureF(ct.pointer(temp))\n return temp.value", "def _read_pha(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n header_for_livetime = hdul[0].header\n\n return data['channel'], data['counts'], header_for_livetime['LIVETIME']", "def get_kwh_reading(self):\n\n svc = \"urn:micasaverde-com:serviceId:EnergyMetering1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"KWHReading\")", "def read_ceilometer_file(self, calibration_factor: float | None = None) -> None:\n header, data_lines = self._read_common_header_part()\n header.append(self._read_header_line_4(data_lines[-3]))\n self.metadata = self._handle_metadata(header)\n self.data[\"range\"] = self._calc_range()\n self.data[\"beta_raw\"] = self._read_backscatter(data_lines[-2])\n self.data[\"calibration_factor\"] = calibration_factor or 1.0\n self.data[\"beta_raw\"] *= self.data[\"calibration_factor\"]\n self.data[\"zenith_angle\"] = np.median(self.metadata[\"zenith_angle\"])\n self._store_ceilometer_info()\n self._sort_time()", "def getdata(self):\n return self.cwt", "def state(self):\n return self.device.device_data[self.device_id]['temperature']", "def get_ir_sensor_temperature(self) -> float:\n self.serial.write(b\"T!\")\n ir_sensor_temp = self.__extract_int(self.__read_response(1)[0], b\"!2\")\n\n return round(ir_sensor_temp / 100, 2)", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def get_conductivity(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[0]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'get_conductivity error: {err}')\n return -1", "def read_sdss(name):\n flux=py.getdata(name,0)\n wdel=py.getval(name,'CD1_1',0)\n w0=py.getval(name,'CRVAL1',0)\n wave= 10.0**(w0+wdel*np.arange(len(flux[0])))\n \n return(wave,flux[0]*1e-17)", "def get_temperature(self):\n \n # Get temp readings from both sensors\n humidity_temp = self._sense_hat.get_temperature_from_humidity()\n pressure_temp = self._sense_hat.get_temperature_from_pressure()\n \n # avg_temp becomes the average of the temperatures from both sensors\n # We need to check for pressure_temp value is not 0, to not ruin avg_temp calculation\n avg_temp = (humidity_temp + pressure_temp) / 2 if pressure_temp else humidity_temp\n \n # Get the CPU temperature\n cpu_temp = self._get_cpu_temp()\n \n # Calculate temperature compensating for CPU heating\n adj_temp = avg_temp - (cpu_temp - avg_temp) / 1.5\n \n # Average out value across the last three readings\n return self._get_smooth(adj_temp)", "def get_temperature(self, sensor: int = 0) -> float:\n\n return self.send(self.cmd.GET_HEATING_ACT)", "def get_temperature(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? (.*?) .*? .*? . .*? .*? . . . .*?'\n temperature = float(re.findall(pattern,summary).pop())\n return temperature" ]
[ "0.61936766", "0.6150327", "0.60563195", "0.60492754", "0.59827816", "0.5981612", "0.5974009", "0.5960181", "0.5955466", "0.5945766", "0.5935459", "0.5898904", "0.58918184", "0.58871585", "0.58717936", "0.58078927", "0.5802359", "0.5800536", "0.5767256", "0.5763488", "0.57244587", "0.56753165", "0.56577086", "0.56299245", "0.56284976", "0.5616202", "0.5591989", "0.55913717", "0.55902463", "0.5588386", "0.5576983", "0.5564847", "0.5558081", "0.55577695", "0.55574083", "0.5555966", "0.55546147", "0.555131", "0.55380595", "0.5534868", "0.5531146", "0.55290645", "0.552858", "0.5527072", "0.55201447", "0.55062646", "0.55017257", "0.54982656", "0.54877377", "0.5475504", "0.54552627", "0.54531866", "0.5438984", "0.54354674", "0.54302335", "0.5428069", "0.54155356", "0.54142994", "0.5395958", "0.5391488", "0.53881055", "0.538213", "0.5362634", "0.53621817", "0.53612816", "0.53584814", "0.5357859", "0.5354894", "0.5354621", "0.5352825", "0.53501195", "0.5346719", "0.5340611", "0.5333993", "0.53318214", "0.5330949", "0.5325893", "0.5315902", "0.5302759", "0.5299048", "0.5297869", "0.52951616", "0.5292743", "0.5289986", "0.52824706", "0.5278254", "0.5269865", "0.526843", "0.52682555", "0.5260503", "0.52498186", "0.5238926", "0.5237132", "0.5230524", "0.52143556", "0.5210871", "0.52086794", "0.5207829", "0.52037644", "0.5200334" ]
0.6091845
2
Verifies simple ordering. IE '1' < '2' < '10' < '11' < '20' < '21'
def test_sort(): data = ["filename_{}.py".format(i) for i in range(200)] temp = data[:] random.shuffle(temp) assert data == sort(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sorting():\n string = [\"A\", \"B\", \"C\"]\n sorted_string = bubble_sort(string)\n for small, large in zip(sorted_string[:-1], sorted_string[1:]):\n assert small <= large\n\n negative_numbers = [-3, -5, -1, -99, -34, -33]\n sorted_negative_numbers = bubble_sort(negative_numbers)\n for small, large in zip(sorted_negative_numbers[:-1],\n sorted_negative_numbers[1:]):\n assert small <= large\n\n odd_length_list = [3, 5, 1, 99, 34, 33, -2]\n odd_length_list_sorted = bubble_sort(odd_length_list)\n for small, large in zip(odd_length_list_sorted[:-1],\n odd_length_list_sorted[1:]):\n assert small <= large", "def test_lessThan(self):\n self.assertTrue(Comparable(0) < Comparable(3))\n self.assertFalse(Comparable(2) < Comparable(0))", "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def __lt__(self, other):\n\n return self._ordinals < other.ordinal()", "def test_lessThan(self):\n self.assertEqual(cmp(0.1, 2.3), -1)\n self.assertEqual(cmp(b\"a\", b\"d\"), -1)", "def test_sort_all_equal():\n equal_data = [1, 1, 1, 1, 1]\n sorted_list = bubble_sort(equal_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large", "def test_sort_sorted():\n sorted_data = [1, 2, 3, 4, 5]\n sorted_list = bubble_sort(sorted_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large", "def test_python_3_compatibility(self):\n assert natsort(['1', 'a']) == ['1', 'a']", "def testSortOrder(self):\n timestamp = time.time()\n comment_id1 = Comment.ConstructCommentId(timestamp, 0, 0)\n comment_id2 = Comment.ConstructCommentId(timestamp + 1, 0, 0)\n self.assertGreater(comment_id2, comment_id1)", "def is_valid(n):\n\tif type(n) == int:\n\t\tn = str(n)\n\tfor index, c in enumerate(n):\n\t\tif index == 0:\n\t\t\tcontinue\n\t\tif n[index - 1] > n[index]:\n\t\t\treturn False\n\treturn True", "def test_quick(self):\n integers = quick_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_reversed_version_sorting(self):\n assert natsort(['1', '5', '10', '50'], reverse=True) == ['50', '10', '5', '1']", "def cmp_numcite( a, b ) :\n return cmp( int(b['Z9']), int(a['Z9']) )", "def cmp(a, b):\n return (a > b) - (a < b)", "def __le__(self, other):\n return self._ordinals <= other.ordinal()", "def cmp(x, y):\n return (x > y) - (x < y)", "def __ge__(self, other):\n return self._ordinals >= other.ordinal()", "def __gt__(self, other):\n return self._ordinals > other.ordinal()", "def comparator(a, b):\n a = re.split(\"[_=]\", a)[-2]\n b = re.split(\"[_=]\", b)[-2]\n if a > b:\n return 1\n elif a < b:\n return -1\n else:\n return 0", "def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")", "def verify(array):\r\n for i in range(1, len(array)):\r\n if array[i - 1] > array[i]:\r\n raise ValueError('Unsorted list detected. (' + str(array[i - 1]) + ' > ' + str(array[i]) + ')')", "def _cmp(a, b): # pylint: disable=invalid-name\n return (a > b) - (a < b)", "def test_find_sequential_ordering():\n example = {\n \"C\": [],\n \"A\": [\"C\"],\n \"F\": [\"C\"],\n \"B\": [\"A\"],\n \"D\": [\"A\"],\n \"E\": [\"B\", \"D\", \"F\"],\n }\n assert find_sequential_ordering(example) == \"CABDFE\"", "def test_lt(self):\n assert self.app2 < self.app1\n assert self.app3 > self.app2", "def test_pre_order_0_4(bst_wiki):\n assert tuple(bst_wiki.pre_order()) == (7, 4, 2, 1, 3, 6, 5, 9, 8)", "def test_lessThanOrEqual(self):\n self.assertTrue(Comparable(3) <= Comparable(3))\n self.assertTrue(Comparable(0) <= Comparable(3))\n self.assertFalse(Comparable(2) <= Comparable(0))", "def is_sorted(seq):\n return all(seq[i-1] < seq[i] for i in range(1, len(seq)))", "def __le__(self, other):\n return other >= self._cmpkey()", "def test_sort_ids(self):\r\n\r\n mapping = {\"1\": [\"0\", \"2\", \"5\", \"6\"],\r\n \"3\": [],\r\n \"4\": [],\r\n \"11\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\r\n \"8\": [\"7\"]}\r\n\r\n self.assertEqual(sort_ids([\"1\", \"3\", \"4\", \"8\", \"11\"], mapping),\r\n [\"11\", \"1\", \"8\", \"4\", \"3\"])", "def test_sort_ids(self):\r\n\r\n mapping = {\"1\": [\"0\", \"2\", \"5\", \"6\"],\r\n \"3\": [],\r\n \"4\": [],\r\n \"11\": [1, 2, 3, 4, 5, 6, 7, 8, 9],\r\n \"8\": [\"7\"]}\r\n\r\n self.assertEqual(sort_ids([\"1\", \"3\", \"4\", \"8\", \"11\"], mapping),\r\n [\"11\", \"1\", \"8\", \"4\", \"3\"])", "def testCmp(self):\n\n item1 = models.Room(id=1,\n name=\"Test Room\",\n roomTypeId = 1)\n\n item2 = models.Room(id=1,\n name=\"Test Room\",\n roomTypeId = 1)\n \n self.assertEqual(item1,item2)\n \n #Order On Name\n item2.name = \"A_Test\"\n self.assertGreater(item1,item2)\n\n item2.name = \"Z_Test\"\n self.assertLess(item1,item2)\n\n item2.name = item1.name\n item2.roomTypeId = 0\n self.assertGreater(item1,item2)\n\n item2.roomTypeId = 2\n self.assertLess(item1,item2)", "def testin_order_0_4(bst_wiki):\n assert tuple(bst_wiki.in_order()) == (1, 2, 3, 4, 5, 6, 7, 8, 9)", "def cmp(x, y):\n return (x > y) - (x < y)", "def test_random_100_in_order(bst_100_rand):\n assert tuple(bst_100_rand.in_order()) == tuple(x for x in range(100))", "def test_primer_exceeds_mismatches(self):\r\n primers = ['AAAA', 'TTTT']\r\n exact = 'AAAA'\r\n mismatch_ok = 'AAAT'\r\n mismatch_bad = 'GGGG'\r\n self.assertEqual(primer_exceeds_mismatches(exact, primers, 0), False)\r\n self.assertEqual(primer_exceeds_mismatches(mismatch_ok, primers, 1),\r\n False)\r\n self.assertEqual(primer_exceeds_mismatches(mismatch_bad, primers, 2),\r\n True)", "def test_input_order_irrelevant(self):\n sorted_strings = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']\n mutable_copy = list(sorted_strings)\n for i in range(10000):\n random.shuffle(mutable_copy)\n assert natsort(mutable_copy) == sorted_strings", "def __ge__(self, seq):\n return not self.__lt__(seq)", "def ascend(x):\n return alltrue(greater_equal(x[1:],x[0:-1]))", "def test_sort_all_equal():\n assert bubble_sort([1, 1, 1, 3, 4, 10, 2, 3]) == [1, 1, 1, 2, 3, 3, 4, 10]", "def _compare(a, b):\n a = _split(a)\n b = _split(b)\n if a[0] != b[0]:\n if a[0] > b[0]:\n return 1\n else:\n return -1\n max_len = max(len(a[1]), len(b[1]))\n for i in range(max_len):\n if i > len(b[1]):\n return 1\n elif i > len(a[1]):\n return -1\n schar = a[1][i]\n ochar = b[1][i]\n if schar > ochar:\n return 1\n elif schar < ochar:\n return -1", "def compare(self) -> int:", "def compare_entities(e1, e2):\n sp1 = e1.sorting_priority\n sp2 = e2.sorting_priority\n if sp1 > sp2:\n return 1\n elif sp1 == sp2:\n return 0\n else:\n return -1", "def checkPossibility_665_2(self, nums):\n opt_one = nums[:]\n opt_two = nums[:]\n\n for i in range(len(nums)-1):\n if nums[i] > nums[i+1]:\n opt_one[i] = nums[i+1]\n opt_two[i+1] = nums[i]\n break\n\n return opt_one == sorted(opt_one) or opt_two == sorted(opt_two)", "def test_graphid_operator_lt():\n for xstr, ystr in itertools.product([\"g1\", \"g2\", \"y7\", \"z123\"], repeat=2):\n x = _ir.GraphId(xstr)\n y = _ir.GraphId(ystr)\n\n x_le_y = x < y\n y_le_x = y < x\n\n # We can't violate assymetry\n assert not (x_le_y and y_le_x)\n\n if xstr == ystr:\n # Expect irreflexivity: neither x < y or y < x\n assert (not x_le_y) and (not y_le_x)\n else:\n # Expect totality: one of x < y or y < x\n assert x_le_y or y_le_x", "def order(num1, num2, num3):\n num123 = int(num1+num2+num3)\n num132 = int(num1+num3+num2)\n num213 = int(num2+num1+num3)\n num231 = int(num2+num3+num1)\n num312 = int(num3+num1+num2)\n num321 = int(num3+num2+num1)\n if num123 >= num132 and num123 >= num213 and num123 >= num231\\\n and num123 >= num312 and num123 >= num321:\n print(num123)\n elif num132 >= num123 and num132 >= num213 and num132 >= num231\\\n and num132 >= num312 and num132 >= num321:\n print(num132)\n elif num213 >= num132 and num213 >= num123 and num213 >= num231\\\n and num213 >= num312 and num213 >= num321:\n print(num213)\n elif num231 >= num132 and num231 >= num213 and num231 >= num123\\\n and num231 >= num312 and num231 >= num321:\n print(num231)\n elif num312 >= num132 and num312 >= num213 and num312 >= num231\\\n and num312 >= num123 and num312 >= num321:\n print(num312)\n elif num321 >= num132 and num321 >= num213 and num321 >= num231\\\n and num321 >= num312 and num321 >= num123:\n print(num321)\n elif num1 == \"0\" and num2 == \"0\" and num3 == \"0\":\n print(\"0\")", "def check_order(lst):\n order = ''\n\n prev = None\n for a in lst:\n if prev is not None and a > prev:\n if order == 'descending':\n return 'unknown'\n order = 'ascending'\n elif prev is not None and a < prev:\n if order == 'ascending':\n return 'unknown'\n order = 'descending'\n prev = a\n\n return order", "def test_token_order(self):\n tokens = [Token(1), Token(2), Token(3), Token(4)]\n tokens_equal = [Token(1), Token(1)]\n self._check_sequence_consistency(tokens)\n self._check_sequence_consistency(tokens_equal, equal=True)", "def is_sorted(x):\n l = len(x)\n for i in range(l-1):\n if x[i+1] < x[i]:\n return False\n return True", "def is_sorted(l: list):\n for idx, num in enumerate(l):\n if idx is 0:\n continue\n elif l[idx-1] <= num:\n continue\n\n return idx\n\n\n return \"SORTED\"", "def _canonical_order(node_chunk_a: node_chunk, node_chunk_b: node_chunk) -> int:\n na, prec_a, slotsA = node_chunk_a\n nb, prec_b, slotsB = node_chunk_b\n\n # compare based on node precedence\n if prec_a > prec_b:\n return -1\n elif prec_b > prec_a:\n return 1\n\n # compare based on slots\n else:\n # slots are equivalent\n if slotsA == slotsB:\n return 0\n\n # a is subset of b\n aWithoutB = slotsA - slotsB\n if not aWithoutB:\n return 1\n\n # b is subset of a\n bWithoutA = slotsB - slotsA\n if not bWithoutA:\n return -1\n\n # compare based on slots\n aMin = min(aWithoutB)\n bMin = min(bWithoutA)\n return -1 if aMin < bMin else 1", "def test_signed_sort(self):\r\n\r\n # an empty list must be returned when an empty list needs to be sorted\r\n self.assertEqual(signed_natsort([]), [])\r\n\r\n # tuples that can be sorted by type-casting the first element\r\n test_list = [('9', 'SampleA'), ('-1', 'SampleD'), ('7', 'SampleC'),\r\n ('-2', 'SampleE'), ('-0.11',\r\n 'SampleF'), ('17.11', 'SampleB'),\r\n ('100', 'SampleG'), ('13', 'SampleH')]\r\n expected_result = [('-2', 'SampleE'), ('-1', 'SampleD'),\r\n ('-0.11', 'SampleF'), ('7',\r\n 'SampleC'), ('9', 'SampleA'),\r\n ('13', 'SampleH'), ('17.11', 'SampleB'), ('100', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # tuples that must be sorted alphabetically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('Hydra', 'SampleF'),\r\n ('Carina', 'SampleB'), ('Orion', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('Auriga', 'SampleC'), ('Carina', 'SampleB'),\r\n ('Cepheus', 'SampleD'), ('Cygnus',\r\n 'SampleA'), ('Grus', 'SampleE'),\r\n ('Hydra', 'SampleF'), ('Lynx', 'SampleH'), ('Orion', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case, tuples will be sorted alpha-numerically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('-0.11', 'SampleF'),\r\n ('17.11', 'SampleB'), ('100', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('17.11', 'SampleB'), ('100', 'SampleG'),\r\n ('-0.11', 'SampleF'), ('Auriga',\r\n 'SampleC'), ('Cepheus', 'SampleD'),\r\n ('Cygnus', 'SampleA'), ('Grus', 'SampleE'), ('Lynx', 'SampleH')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case just a list\r\n test_list = ['foo', 'bar', '-100', '12', 'spam', '4', '-1']\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # list of elements that can be type-casted\r\n test_list = ['0', '1', '14', '12', '-15', '4', '-1']\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed dict case\r\n test_dict = {\r\n 'foo': 'a', 'bar': 'b', '-100': '1', '12': '11', 'spam': 'q',\r\n '4': '11', '-1': 'e'}\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)\r\n\r\n # dict where the keys can be type-casted\r\n test_dict = {\r\n '0': 'foo', '1': 'bar', '14': 'stand', '12': 'eggs', '-15': 'q',\r\n '4': 'b', '-1': 'h'}\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)", "def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))", "def test_random_100_in_order_again(bst_100_rand):\n assert tuple(bst_100_rand.in_order()) == tuple(x for x in range(100))", "def patched_cmp(self, other):\n # First: Obsolete test\n if self.obsolete != other.obsolete:\n if self.obsolete:\n return -1\n else:\n return 1\n # Work on a copy to protect original\n occ1 = sorted(self.occurrences[:])\n occ2 = sorted(other.occurrences[:])\n pos = 0\n if occ1 > occ2:\n return 1\n if occ1 < occ2:\n return -1\n # Compare context\n msgctxt = self.msgctxt or \"0\"\n othermsgctxt = other.msgctxt or \"0\"\n if msgctxt > othermsgctxt:\n return 1\n elif msgctxt < othermsgctxt:\n return -1\n # Compare msgid_plural\n msgid_plural = self.msgid_plural or \"0\"\n othermsgid_plural = other.msgid_plural or \"0\"\n if msgid_plural > othermsgid_plural:\n return 1\n elif msgid_plural < othermsgid_plural:\n return -1\n # Compare msgstr_plural\n msgstr_plural = self.msgstr_plural or \"0\"\n othermsgstr_plural = other.msgstr_plural or \"0\"\n if msgstr_plural > othermsgstr_plural:\n return 1\n elif msgstr_plural < othermsgstr_plural:\n return -1\n # Compare msgid\n if self.msgid > other.msgid:\n return 1\n elif self.msgid < other.msgid:\n return -1\n return 0", "def test_stability():\n from quick_sort import quick_sort\n lst = [(2, 'ab'), (1, 'ba'), (3, 'ab'), (2, 'ba'), (5, 'ab')]\n one = lst[0]\n two = lst[3]\n sort_lst = quick_sort(lst)\n assert sort_lst == [(1, 'ba'), (2, 'ab'), (2, 'ba'), (3, 'ab'), (5, 'ab')]\n assert sort_lst[1] is one\n assert sort_lst[2] is two", "def cmp(x, y):\n if x + y > y + x: return 1\n elif x + y == y + x: return 0\n else: return -1", "def is_sorted(items):\n # Check that all adjacent items are in order, return early if so\n\n # RANGE \n current = 0\n right = 1\n while right < len(items):\n if items[current] > items[right]:\n return False\n else:\n current += 1\n right += 1\n return True", "def _less_than_or_equal_to_op(spec):", "def natural_sort_comparison(value1, value2):\n return cmp(_natural_sort_key(value1), _natural_sort_key(value2))", "def __lt__(self, other):\n return other > self._cmpkey()", "def test_heap_sort(self):\n integers = heap_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_insertion(self):\n integers = insertion_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def assert_property_xsorted_produces_ordered_iterable(_xsorted, things, reverse):\n actual = list(_xsorted(things, reverse=reverse))\n actual = reversed(actual) if reverse else actual\n assert all(a <= b for a, b in sliding_window(2, actual))", "def is_ascending2(lst):\n for i in range(len(lst) - 1):\n if lst[i] > lst[i + 1]:\n return False\n return True", "def test03_comparison_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n assert (number(20) > number(10)) == True\n assert (number(20) < number(10)) == False\n assert (number(20) >= number(20)) == True\n assert (number(20) <= number(10)) == False\n assert (number(20) != number(10)) == True\n assert (number(20) == number(10)) == False", "def compare(*args: Any) -> int:\n length = len(args)\n\n if length == 2:\n return cmp(args[0], args[1], False)\n if length == 3:\n return cmp(args[0], args[1], args[2])\n if length == 4:\n return cmp(args[0], args[1], args[2])\n if length == 5:\n return cmp(\n args[0][args[1] : args[4] + 1], args[2][args[3] : args[4] + 1], False\n )\n if length == 6:\n return cmp(\n args[0][args[1] : args[4] + 1], args[2][args[3] : args[4] + 1], args[5]\n )\n if length == 7:\n return cmp(\n args[0][args[1] : args[4] + 1], args[2][args[3] : args[4] + 1], args[5]\n )\n raise Exception(\"String.compare: Unsupported number of parameters\")", "def test_number_compare(self):\n self.assertEqual(functions.number_compare(1, 1), \"Numbers are equal\")\n self.assertEqual(functions.number_compare(1, 0), \"First is greater\")\n self.assertEqual(functions.number_compare(2, 4), \"Second is greater\")", "def test_binarytree_in_order_correct_on_given(given_list, capsys):\n expected = [11, 12, 14, 18, 19, 20, 22, 31, 33, 40]\n given_list.in_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual", "def test_single():\n assert bubble_sort([1]) == [1]", "def test_single():\n assert bubble_sort([1]) == [1]", "def test_shell(self):\n integers = shell_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def is_sorted(items):\n # TODO: Check that all adjacent items are in order, return early if so\n for x in range(len(items)):\n if x < len(items)-1:\n if items[x+1] < items[x]:\n return False\n return True", "def test_ordered(self):\n o_list = [1, 2, 3, 4, 5]\n self.assertEqual(max_integer(o_list), 5)", "def _compile_order(self, orderings):\n to_apply = []\n for o in orderings:\n descending = False\n if o.startswith(\"-\"):\n descending = True\n o = o[1:]\n to_apply.append((o, descending))\n\n def compare(res1, res2):\n # res1 and res2 are attribute dictionaries\n # Apply each comparison in order\n # Note that we consider None to be bigger than anything else (i.e.\n # in an ascending sort, None comes after everything else)\n for attr, descending in to_apply:\n if descending:\n x, y = res2.get(attr, []), res1.get(attr, [])\n else:\n x, y = res1.get(attr, []), res2.get(attr, [])\n if x < y:\n return -1\n elif x > y:\n return 1\n return 0\n\n return compare", "def test_greaterThan(self):\n self.assertEqual(cmp(4, 0), 1)\n self.assertEqual(cmp(b\"z\", b\"a\"), 1)", "def test_binarytree_pre_order_on_given(given_list, capsys):\n expected = [20, 18, 12, 11, 14, 19, 40, 31, 22, 33]\n given_list.pre_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual", "def is_sorted(self):\n previous = 0 # Setting to 0 shouldn't be an issue aslong as MIN_VALUE is at least 0\n for value in self.data:\n if value < previous:\n return False\n previous = value\n return True", "def __lt__(self, other):\n return self.sequence < other.sequence", "def test_sort_all_equal():\n test_data = [1, 1, 1]\n sorted_data = bubble_sort(test_data)\n assert sorted_data == [1, 1, 1]", "def test_greaterThan(self):\n self.assertTrue(Comparable(2) > Comparable(1))\n self.assertFalse(Comparable(0) > Comparable(3))", "def test_equals(self):\n self.assertEqual(cmp(u\"a\", u\"a\"), 0)\n self.assertEqual(cmp(1, 1), 0)\n self.assertEqual(cmp([1], [1]), 0)", "def test_sort_reversed():\n sorted_data = [5, 4, 3, 2, 1]\n sorted_list = bubble_sort(sorted_data)\n\n for small, large in zip(sorted_list[:-1], sorted_list[1:]):\n assert small <= large", "def __lt__(self, other: OidValue) -> bool:\n return self.value < to_int_tuple(other)", "def cmp(x, y):\n if x == y:\n return 0\n elif x is None:\n if y is None:\n return 0\n else:\n return -1\n elif y is None:\n return 1\n else:\n # TODO: consider casting the values to string or int or floats?\n # note that this is the minimal replacement function\n return (x > y) - (x < y)", "def test_radix_sort_verbose():\n from radixsort import radixsort\n # test on 100 lists\n for i in range(100):\n # generate random length of list\n list_length = random.randint(0, 100)\n unsorted_list = []\n for x in range(list_length):\n # generate random numbers for random length list\n unsorted_list.append(random.randint(0, 100))\n\n # test that list is sorted\n assert radixsort(unsorted_list) == sorted(unsorted_list)", "def testSort(self):\n numlist = [6,4.78,1.2,5]\n numlist.sort()\n self.assertEqual([1.2,4.78,5,6],numlist)\n \n strlist = [\"kgb\",\"mss\",\"cheka\"]\n strlist.sort()\n self.assertEqual([\"cheka\",\"kgb\",\"mss\"],strlist) \n \n # ------------ reverse sort\n numlist.sort(reverse = True)\n self.assertEqual([6,5,4.78,1.2],numlist)", "def test_string_unequal_comparisons():\n for (operands, operators) in (\n (['draft', u'draft'], [tools.assert_less]),\n (['published', u'published'], [tools.assert_less_equal, tools.assert_greater_equal]),\n (['archived', u'archived'], [tools.assert_greater]),\n ):\n for operand in operands:\n for operator in operators:\n yield (operator, operand, BlogPostStatus.published)", "def test_lt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert a < b", "def test_more_complex_versions(self):\n sorted_versions = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']\n random_versions = ['1534-44658', '1536-44935', '1532-44349', '1538-44920', '1536-44582', '1538-44874']\n assert sorted_versions == natsort(random_versions)", "def __lt__(self, other):\r\n assert isinstance(other, Order)\r\n return self - other < 0", "def test_sorting():\n assert bubble_sort([8, 5, 3, 4, 2, 6, 1, 7]) == [1, 2, 3, 4, 5, 6, 7, 8]\n assert bubble_sort('sorted letters') == [' ', 'd', 'e', 'e', 'e', 'l', 'o',\n 'r', 'r', 's', 's', 't', 't', 't']\n assert bubble_sort(['s', 'o', 'r', 't', 'e', 'd']) == ['d', 'e', 'o', 'r',\n 's', 't']", "def test_radix(self):\n integers = radix_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def less(x1, x2):\n return compare_chararrays(x1, x2, '<', True)", "def check_valid(x2,x1):\r\n if x2 < x1 or x2==x1:\r\n return True", "def __lt__(self, other):\n return not (self.unsplitable or self.split_necessity < other.split_necessity)", "def test_cmp(self, tmpdir, treantclass):\n with tmpdir.as_cwd():\n c1 = treantclass('a')\n c2 = treantclass('b')\n c3 = treantclass('c')\n\n assert sorted([c3, c2, c1]) == [c1, c2, c3]\n assert c1 <= c2 < c3\n assert c3 >= c2 > c1", "def test_radix_sort():\n from radixsort import radixsort\n nums = [5, 3, 2, 7, 9, 4, 0, 1]\n assert radixsort(nums) == [0, 1, 2, 3, 4, 5, 7, 9]", "def compare(a,b):\r\n if a>b:\r\n return 1\r\n elif a==b:\r\n return 0\r\n else:\r\n return -1", "def check_sort(self):\n if self.list == []:\n return True\n seg_iter = iter(self.list)\n last = next(seg_iter)\n for segment in seg_iter:\n if last > segment:\n raise Exception('non trié')\n last = segment\n return True", "def eval_ordering(ordering, prefs):\n score=0\n cnt=len(ordering)\n for i in range(cnt-1):\n for j in range(i+1,cnt):\n e1,e2=ordering[i],ordering[j]\n if e1<e2:\n if (e1,e2) in prefs:\n score+=prefs[(e1,e2)]\n else:\n if (e2,e1) in prefs:\n score-=prefs[(e2,e1)]\n return score" ]
[ "0.66505706", "0.6519151", "0.64865875", "0.6440262", "0.6426011", "0.6351435", "0.63446116", "0.6320897", "0.6317372", "0.6254106", "0.6245488", "0.6240697", "0.62373954", "0.62215376", "0.62139773", "0.6196602", "0.6195784", "0.6189653", "0.6173531", "0.61667407", "0.61640793", "0.6154696", "0.6152839", "0.6136332", "0.6127491", "0.61247236", "0.6118849", "0.6087507", "0.60771143", "0.60771143", "0.6069604", "0.6066058", "0.6062663", "0.6054328", "0.6052449", "0.6046725", "0.60458577", "0.60394645", "0.603848", "0.6036046", "0.6019032", "0.60143495", "0.60079294", "0.6005291", "0.59815764", "0.5973358", "0.59657675", "0.59558004", "0.59539014", "0.5949627", "0.5942751", "0.59409374", "0.593626", "0.592195", "0.59210134", "0.59209275", "0.59120464", "0.5909308", "0.59067494", "0.58961254", "0.5888661", "0.58881944", "0.58856523", "0.5870125", "0.58655506", "0.58615345", "0.5855724", "0.5855353", "0.58497345", "0.5844161", "0.5844161", "0.5839885", "0.5829145", "0.5813196", "0.5812791", "0.58125293", "0.5812417", "0.57984644", "0.57829183", "0.57787466", "0.57774734", "0.5776723", "0.57742167", "0.5773962", "0.57687914", "0.5764324", "0.5762825", "0.57604045", "0.5744575", "0.5737428", "0.573532", "0.5733875", "0.57330924", "0.5730373", "0.5729224", "0.5721881", "0.5718559", "0.5717555", "0.5716244", "0.5709538", "0.5707537" ]
0.0
-1
Ensures proper order is preserved with multiple formats
def test_multi_template(): data = [] data.extend(["{}_data.json".format(i) for i in range(50)]) data.extend(["{}_log.csv".format(i) for i in range(50)]) data.extend(["filename_{}.py".format(i) for i in range(50)]) data.extend(["stuff_{}.py".format(i) for i in range(50)]) temp = data[:] random.shuffle(temp) assert data == sort(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def sortChoices(self):\n self.formatList.sort()", "def reset_format(self):\n ## Formatters\n self._format_setters(*self.format_set_info)\n self._format_getters(*self.format_get_info)\n self._format_joining_functions()", "def get_valid_output_format(self, idx, virt_format_list, explicit_format_list, specified_image_format):\n output_image_format = \"\"\n if self.debug_mode:\n print \"\\nwanted output image format is: \" + str(specified_image_format)\n if specified_image_format == \"VIRT\":\n virt_image_format = virt_format_list[0][idx] #Should be reduced to a unique list now (so we can index by 0)\n if self.debug_mode:\n print \"output virt resulting image format is: \" + virt_image_format\n output_image_format = virt_image_format.strip('VIRT->')\n if self.debug_mode:\n print \"output image format is: \" + output_image_format\n else:\n for image_format in explicit_format_list:\n if specified_image_format == image_format[idx]:\n output_image_format = specified_image_format #Should be unique value in the list\n if self.debug_mode:\n print \"(explicit) output image format is: \" + str(output_image_format)\n\n return output_image_format", "def test_format_variations(test_format, expect_format):\n format_schema = sd.DeploymentFormat()\n try:\n result_format = format_schema.deserialize(test_format)\n result_format.pop(\"$schema\", None)\n assert result_format == expect_format\n except colander.Invalid:\n pytest.fail(f\"Expected format to be valid: [{test_format}]\")", "def asformat(self, format):", "def validate_format(self):\n raise NotImplementedError()", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def _infer_undefined_format_fields(self, variant, defined_headers):\n # type: (vcfio.Variant, vcf_header_io.VcfHeader) -> Dict[str, Format]\n formats = {}\n for call in variant.calls:\n for format_key, format_value in call.info.iteritems():\n if not defined_headers or format_key not in defined_headers.formats:\n if format_key in formats:\n raise ValueError(\n 'Invalid VCF file. Duplicate FORMAT field in variant {}'.format(\n variant))\n formats[format_key] = Format(format_key,\n self._get_field_count(format_value),\n self._get_field_type(format_value),\n '') # NO_DESCRIPTION\n # No point in proceeding. All other calls have the same FORMAT.\n break\n return formats", "def check_archive_formats(formats):\n for format in formats:\n if format not in ARCHIVE_FORMATS:\n return format\n return None", "def initFormat(self):\n self.formatList = []", "def format_to_extension(self, format):", "def test_headerFormatter(self):\n cases = [\n ({'Header1': 'Value1', 'Header2': 'Value2'},\n b'Header2: Value2\\r\\nHeader1: Value1\\r\\n'),\n ]\n\n for (input, expected) in cases:\n output = imap4._formatHeaders(input)\n self.assertEqual(sorted(output.splitlines(True)),\n sorted(expected.splitlines(True)))", "def _processOutfmtArg(outfmt, stderr, gb_record_fmtdict, gb_cds_fmtdict) :\n outfmt_keys = outfmt.split(\",\")\n records_keys = set(gb_record_fmtdict.keys())\n cds_keys = set(gb_cds_fmtdict.keys())\n assert records_keys & cds_keys == set()\n if not all([x in records_keys | cds_keys for x in outfmt_keys]) :\n wrong_keys = [x for x in outfmt_keys if x not in records_keys | cds_keys]\n stderr.write(\"Bad outfmt specifier. You provided:\\n\")\n stderr.write(str(sorted(outfmt_keys)) + \"\\n\")\n stderr.write(\"Wrong specifier(s):\\n\")\n stderr.write(str(sorted(wrong_keys)) + \"\\n\")\n stderr.write(\"Allowed values are:\\n\")\n stderr.write(str(sorted(list(records_keys | cds_keys))) + \"\\n\")\n return outfmt_keys", "def check_validity_input_formats(input_formats):\n from invenio.search_engine import get_available_output_formats\n valid_formats = get_available_output_formats()\n\n # let's to extract the values of the available formats\n format_values = []\n for aformat in valid_formats:\n format_values.append(aformat['value'])\n\n invalid_format = ''\n for aformat in input_formats:\n if aformat.lower() not in format_values:\n invalid_format = aformat.lower()\n break\n return invalid_format", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def test_handle_deleted_metadata_format_deletes_metadata_format(self):\n # Arrange\n index = 2\n removed_metadata_formats = self.fixture.oai_metadata_formats[:index]\n metadata_formats_response = self.fixture.oai_metadata_formats[index:]\n metadata_formats_count = len(self.fixture.oai_metadata_formats)\n\n # Act\n oai_registry_api._handle_deleted_metadata_format(\n self.fixture.registry.id, metadata_formats_response\n )\n\n # Assert\n record_in_database = (\n oai_harvester_metadata_format_api.get_all_by_registry_id(\n self.fixture.registry.id\n )\n )\n self.assertTrue(\n len(record_in_database) == (metadata_formats_count - index)\n )\n self.assertTrue(\n x.metadata_prefix\n not in [y.metadata_prefix for y in record_in_database]\n for x in removed_metadata_formats\n )", "def test_xyz_file_format_to_xyz(self):\n xyz1 = converter.xyz_file_format_to_xyz(xyz_file=self.xyz1['file'])\n xyz2 = converter.xyz_file_format_to_xyz(xyz_file=self.xyz2['file'])\n xyz6 = converter.xyz_file_format_to_xyz(xyz_file=self.xyz6['file'])\n self.assertEqual(xyz1, self.xyz1['dict'])\n self.assertEqual(xyz2, self.xyz2['dict'])\n self.assertEqual(xyz6, self.xyz6['dict'])", "def reformat():\n toolkit.reformat()", "def test_validation_get_valid_formats(self):\n self.assertIsInstance(api.validation.fetch_formats(), dict)", "def test_available_output_formats():\n assert set([\"GTiff\", \"PNG\", \"PNG_hillshade\", \"GeoJSON\"]).issubset(\n set(available_output_formats())\n )", "def format(self):\n ...", "def initFormat(self):\n pass", "def getFormats(self):\n return self.formats", "def get_formats(self):\n return tuple(self._names.keys())", "def _set_real_format(self, fmt):\n # try to use the _nomax variant if available\n if not self._max and fmt + '_nomax' in self.formats:\n self._format = self.formats[fmt + '_nomax']\n elif fmt in self.formats:\n self._format = self.formats[fmt]\n else:\n self._format = fmt\n\n self._format_line_count = self._format.count('\\n')", "def reformat(self):\n\t\told_path = os.path.join( self.path, self.init_str )\n\t\tnew_path = os.path.join( self.path, self.reorder() )\n\t\tos.rename(old_path,new_path)", "def formats():\n return _FORMATS", "def separate_virt_from_explicit_formats(self, compatible_output_formats):\n virt_format_list = []\n explicit_format_list = []\n for output_image_format in compatible_output_formats:\n if output_image_format[0][0] == 'V':\n virt_format_list.append(output_image_format)\n else:\n explicit_format_list.append(output_image_format)\n\n return [explicit_format_list, virt_format_list]", "def get_format(self):\n pass", "def on_format_changed(self):\n\n format = self.format.currentText()\n compressions = lib.list_compressions(format)\n self.compression.clear()\n self.compression.addItems(compressions)", "def register_format(recipe):\n afr = AFMFormatRecipe(recipe)\n formats_available.append(afr)\n # suffix\n if afr.suffix not in formats_by_suffix:\n formats_by_suffix[afr.suffix] = []\n formats_by_suffix[afr.suffix].append(afr)\n # mode\n if afr.mode not in formats_by_mode:\n formats_by_mode[afr.mode] = []\n formats_by_mode[afr.mode].append(afr)\n # supported extensions\n if afr.suffix not in supported_extensions: # avoid duplucates\n supported_extensions.append(afr.suffix)\n supported_extensions.sort()", "def reformat(ctx):\n pass", "def test_unsupported_format():\n formatter = TabularOutputFormatter()\n\n with pytest.raises(ValueError):\n formatter.format_name = \"foobar\"\n\n with pytest.raises(ValueError):\n formatter.format_output((), (), format_name=\"foobar\")", "def fix_corpus_format(corpus):\n import copy\n fixed_format = copy.copy(corpus)\n testing = corpus['testing']\n training = corpus['training']\n validation = corpus['validation']\n del fixed_format['testing']\n del fixed_format['training']\n del fixed_format['validation']\n fixed_format['partition'] = {\n \"testing\": testing,\n \"training\": training,\n \"validation\": validation,\n }\n return fixed_format", "def _file_format_adapter(self):\n raise NotImplementedError", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def _convert_formats(self, meta_dict, filename=None):\n meta_dict['SDATE'] = utils.get_format_from_datetime_obj(\n meta_dict['TIMESTAMP'], '%Y-%m-%d')\n meta_dict['STIME'] = utils.get_format_from_datetime_obj(\n meta_dict['TIMESTAMP'], '%H:%M')\n\n # meta_dict['SERNO'] = str(self._running_serno).zfill(4)\n meta_dict.setdefault('PROJ', 'NOS')\n meta_dict.setdefault('ORDERER', 'HAV')\n meta_dict.setdefault('SLABO', 'SMHI')\n meta_dict.setdefault('ALABO', 'SMHI')\n meta_dict.setdefault('POSYS', 'GPS')\n if filename:\n fid_info = self._extract_filename_information(filename)\n for item, value in fid_info.items():\n meta_dict[item] = value", "def _define_formats(self, workbook):\n self.format_title_main_center = workbook.add_format({\n 'bold': True,\n 'align': 'left',\n 'font_size': 14,\n 'border': True,\n 'font_name':'Arial',\n 'align': 'Center',\n 'bg_color': '#D8D7D7',\n })\n self.format_title = workbook.add_format({\n 'align': 'left',\n 'font_size': 12,\n 'border': True,\n 'font_name':'Arial',\n 'text_wrap': True\n })\n self.format_title_noborder = workbook.add_format({\n 'align': 'left',\n 'font_size': 12,\n 'border': False,\n 'font_name':'Arial'\n })\n self.format_title_noborder_bold = workbook.add_format({\n 'align': 'left',\n 'font_size': 12,\n 'bold': True,\n 'border': False,\n 'font_name':'Arial'\n })\n self.format_title_center = workbook.add_format({\n 'align': 'left',\n 'font_size': 12,\n 'border': True,\n 'align': 'Center',\n 'font_name':'Arial'\n })\n self.format_title_bold = workbook.add_format({\n 'align': 'left',\n 'font_size': 12,\n 'border': True,\n 'font_name':'Arial',\n 'bold': True,\n })\n self.format_title_center_bold = workbook.add_format({\n 'align': 'left',\n 'font_size': 12,\n 'border': True,\n 'font_name':'Arial',\n 'align': 'Center',\n 'bold': True,\n })\n self.format_title_number = workbook.add_format({\n 'align': 'right',\n 'font_size': 12,\n 'border': True,\n 'font_name':'Arial',\n 'num_format': '#,##0.00',\n })\n self.format_title_number_bold = workbook.add_format({\n 'align': 'right',\n 'font_size': 12,\n 'border': True,\n 'font_name':'Arial',\n 'num_format': '#,##0.00',\n 'bold': True,\n 'bg_color': '#D8D7D7',\n })\n \n self.format_header = workbook.add_format({\n 'bold': True,\n 'border': True,\n 'font_name':'Arial',\n 'font_size': 12,\n 'align': 'Center',\n 'bg_color': '#D8D7D7', \n })\n\n self.merge_format = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n })", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def test_draftN_format_checker(self):\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft202012_format_checker # noqa\n\n self.assertIs(\n draft202012_format_checker,\n validators.Draft202012Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft202012_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft201909_format_checker # noqa\n\n self.assertIs(\n draft201909_format_checker,\n validators.Draft201909Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft201909_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft7_format_checker # noqa\n\n self.assertIs(\n draft7_format_checker,\n validators.Draft7Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft7_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft6_format_checker # noqa\n\n self.assertIs(\n draft6_format_checker,\n validators.Draft6Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft6_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft4_format_checker # noqa\n\n self.assertIs(\n draft4_format_checker,\n validators.Draft4Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft4_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertWarns(DeprecationWarning) as w:\n from asdf._jsonschema import draft3_format_checker # noqa\n\n self.assertIs(\n draft3_format_checker,\n validators.Draft3Validator.FORMAT_CHECKER,\n )\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Accessing asdf._jsonschema.draft3_format_checker is \",\n ),\n msg=w.warning,\n )\n\n with self.assertRaises(ImportError):\n from asdf._jsonschema import draft1234_format_checker # noqa", "def test_available_input_formats():\n assert set([\"Mapchete\", \"raster_file\", \"vector_file\"]).issubset(\n set(available_input_formats())\n )", "def test_export_evokeds_unsupported_format(fmt, ext):\n evoked = read_evokeds(fname_evoked)\n errstr = fmt.lower() if fmt != \"auto\" else \"vhdr\"\n with pytest.raises(ValueError, match=f\"Format '{errstr}' is not .*\"):\n export_evokeds(f\"output.{ext}\", evoked, fmt=fmt)", "def reorder(self):\n\t\tself.version = \".\".join(tuple(self.l_version))\t\t\n\t\tself.audio = \".\".join(tuple(self.l_audio))\n\t\ttuplz = (self.release_year , self.version , self.language, self.quality , self.src_rip ,self.audio , self.codec + '-' + self.encoder, self.extension)\n\t\tstrz = self.title \n\t\tfor elem in tuplz:\n\t\t\tif elem != \"\":\n\t\t\t\tstrz+=\".\"+elem\n\t\t#strz=\".\".join((self.title ,self.release_year , self.version , self.language, self.quality , self.src_rip ,self.audio , self.codec + '-' + self.encoder, self.extension))\n\t\treturn strz", "def GetCaptureFileFormats(self): # real signature unknown; restored from __doc__\n pass", "def _define_formats(self, workbook):\n self.format_title = workbook.add_format({\n 'bold': True,\n 'align': 'center',\n 'font_size': 12,\n 'font': 'Arial',\n 'border': False\n })\n self.format_header = workbook.add_format({\n 'bold': True,\n 'font_size': 10,\n 'align': 'center',\n 'font': 'Arial',\n #'border': True\n })\n self.content_header = workbook.add_format({\n 'bold': False,\n 'font_size': 10,\n 'align': 'center',\n 'border': True,\n 'font': 'Arial',\n })\n self.content_header_date = workbook.add_format({\n 'bold': False,\n 'font_size': 10,\n 'border': True,\n 'align': 'center',\n 'font': 'Arial',\n })\n self.line_header = workbook.add_format({\n 'bold': True,\n 'font_size': 10,\n 'align': 'center',\n 'top': True,\n 'bottom': True,\n 'font': 'Arial',\n })\n self.line_header_light = workbook.add_format({\n 'bold': False,\n 'font_size': 10,\n 'align': 'center',\n 'text_wrap': True,\n 'font': 'Arial',\n 'valign': 'top'\n })\n self.line_header_light_date = workbook.add_format({\n 'bold': False,\n 'font_size': 10,\n 'align': 'center',\n 'font': 'Arial',\n })\n self.line_header_light_initial = workbook.add_format({\n 'italic': True,\n 'font_size': 10,\n 'align': 'center',\n 'bottom': True,\n 'font': 'Arial',\n 'valign': 'top'\n })\n self.line_header_light_ending = workbook.add_format({\n 'italic': True,\n 'font_size': 10,\n 'align': 'center',\n 'top': True,\n 'font': 'Arial',\n 'valign': 'top'\n })", "def makeNamesFromFormats(formats):\n i = getIter(formats)\n if not i:\n return\n\n try:\n c = 0\n item = i.next()\n while item:\n c = c +1\n name = 'c%s' % c\n if isinstance(item, str):\n yield name\n else:\n l = []\n for a in makeNamesFromFormats(item):\n l.append(a)\n yield (name, l)\n item = i.next()\n except StopIteration:\n pass", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def test_xyz_to_xyz_file_format(self):\n xyzf1 = converter.xyz_to_xyz_file_format(xyz_dict=self.xyz1['dict'], comment='test methane xyz conversion')\n xyzf2 = converter.xyz_to_xyz_file_format(xyz_dict=self.xyz2['dict'], comment='test xyz2')\n xyzf6 = converter.xyz_to_xyz_file_format(xyz_dict=self.xyz6['dict'], comment='test xyz6')\n self.assertEqual(xyzf1, self.xyz1['file'])\n self.assertEqual(xyzf2, self.xyz2['file'])\n self.assertEqual(xyzf6, self.xyz6['file'])", "def _test_output_formatting_func(self, sample: Any):\n try:\n if not type(sample) == iter:\n self._formatting_func_return_types(format=sample)\n return True\n except Exception:\n raise ValueError(\n f\"formatting_func must return {self._formatting_func_return_types.__annotations__['format']}, not {type(sample)}\"\n )", "def set_unique_output_image_format_list(self, graph, node_info, output_image_specified_format_list, virt_format_list, explicit_format_list, PIN_ID, PIN_FORMAT):\n success = True\n for idx, specified_image_format in enumerate(output_image_specified_format_list):\n image_node = self.get_node_with_id(graph, node_info.output_image_node_ids[idx])\n output_image_format = self.get_valid_output_format(idx, virt_format_list, explicit_format_list, specified_image_format)\n if output_image_format == \"\":\n parse_common.set_text_on_node(self.validation_output_graph, image_node, \"Output image\\nformat\\n\" + specified_image_format + \"\\nerror.\", 'Red', False)\n success = False\n else:\n parse_common.set_text_on_node(self.validation_output_graph, image_node, output_image_format, 'Green', False)\n\n #Add format and image node id to processed lists.(indexing is the same as for the node_info still)\n if node_info.output_image_node_ids[idx] not in PIN_ID:\n PIN_ID.append(node_info.output_image_node_ids[idx])\n PIN_FORMAT.append(output_image_format)\n else:\n raise NameError('Duplicate ids in PIN_ID list.')\n\n return success", "def model_with_ordered_bytes(model_types, valid_bytes_128_before, valid_bytes_128_after,\n valid_bytes_80_before, valid_bytes_80_after,\n valid_bytes_48_before, valid_bytes_48_after):\n if model_types in (ulid.MemoryView, ulid.ULID):\n return model_types, valid_bytes_128_before, valid_bytes_128_after\n if model_types == ulid.Randomness:\n return model_types, valid_bytes_80_before, valid_bytes_80_after\n if model_types == ulid.Timestamp:\n return model_types, valid_bytes_48_before, valid_bytes_48_after", "def _phot_format_for_save(self, names = ('MJD', 'flux', 'flux_err', 'filter'), formats = ('.3f','.5g', '.5g', ''),\n filters = False, verbose = False, sort=False):\n\n\n if sort:\n save_table = self.phot[names]\n save_table = save_table[save_table.argsort()]\n else:\n save_table = self.phot[names]\n\n for z in zip(names, formats):\n save_table[z[0]].format = z[1]\n\n if filters:\n save_table = save_table[np.in1d(save_table[\"filter\"], filters)]\n\n if verbose:\n print(save_table)\n return save_table", "def format(self, data):", "def test_enforce_iterable():\n formatter = TabularOutputFormatter()\n loremipsum = (\n \"lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod\".split(\n \" \"\n )\n )\n\n for format_name in formatter.supported_formats:\n formatter.format_name = format_name\n try:\n formatted = next(formatter.format_output(zip(loremipsum), [\"lorem\"]))\n except TypeError:\n assert False, \"{0} doesn't return iterable\".format(format_name)", "def addIfMissing(self, format):\n self.setdefault(format.name, format)", "def test_mediatype_io_format_references(self):\n ns_json, type_json = get_cwl_file_format(CONTENT_TYPE_APP_JSON)\n namespaces = dict(list(ns_json.items()))\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"inputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"formats\": [\n {\n \"mimeType\": CONTENT_TYPE_APP_JSON,\n \"default\": True,\n }\n ]\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"formats\": [\n {\n \"mediaType\": CONTENT_TYPE_APP_JSON,\n \"default\": True,\n }\n ]\n },\n ],\n \"outputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"formats\": [{\"mediaType\": CONTENT_TYPE_APP_JSON}],\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"formats\": [{\"mediaType\": CONTENT_TYPE_APP_JSON}],\n },\n ],\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\n \"unit\": {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n ],\n \"outputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n ],\n \"$namespaces\": namespaces\n }\n }]\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n assert proc[\"inputs\"][0][\"id\"] == \"wps_format_mimeType\"\n assert proc[\"inputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"inputs\"][1][\"id\"] == \"wps_format_mediaType\"\n assert proc[\"inputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"outputs\"][0][\"id\"] == \"wps_format_mimeType\"\n assert proc[\"outputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"outputs\"][1][\"id\"] == \"wps_format_mediaType\"\n assert proc[\"outputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n\n desc = self.describe_process(self._testMethodName, describe_schema=\"OGC\")\n assert desc[\"inputs\"][\"wps_format_mimeType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"inputs\"][\"wps_format_mediaType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"outputs\"][\"wps_format_mimeType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"outputs\"][\"wps_format_mediaType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON", "def extension_to_format(self, extension):", "def flattenFormats(formats, check=False):\n i = getIter(formats)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n if isinstance(item, str):\n yield normalize_format(item)\n elif isinstance(item, list) or isinstance(item, tuple):\n for c in flattenFormats(item, check):\n yield c\n else:\n if check:\n yield None\n item = i.next()\n except StopIteration:\n pass", "def check_format(self, format, row):\n self.errors = []\n\n repo_name = row['repo_name']\n\n for key in [\"format\", \"modified\", \"size\", \"url\", \"signature\"]:\n if key not in format:\n self.log_error(\"Format container for '{0}' doesn't have '{1}'\".format(repo_name, key))\n if 'url' not in format or 'signature' not in format:\n return self.errors\n if not format['url'] or not self._url_exists(format['url']):\n self.log_error(\"{0}: url '{1}' does not exist\".format(repo_name, format['url']))\n valid_hosts = [self.cdn_bucket, self.api_bucket]\n url_info = urlparse.urlparse(format['url'])\n # TRICKY: only validate signatures on our servers\n if url_info.hostname in valid_hosts:\n if not format['signature']:\n self.log_error(\"{0}: url '{1}' has not been signed yet\".format(repo_name, format['url']))\n elif not self._url_exists(format['signature']):\n self.log_error(\"{0}: signature '{1}' does not exist\".format(repo_name, format['signature']))\n\n if 'chapters' in format and len(format['chapters']):\n # check format chapters\n for chapter in format['chapters']:\n self._check_format_chapter(chapter, row)\n\n return self.errors", "def test_noformat_tags():\n format = Format(lambda s: s.lower())\n xml = '<%s>Hello, World!</%s>'\n format_tags = 'address div h1 p quote span'.split()\n noformat_tags = 'code kbd math pre script textarea'.split()\n for tag in format_tags + noformat_tags:\n x = xml % (tag, tag)\n s = serialize(x, format)\n if tag in format_tags:\n x = x.lower()\n assert s.endswith(x)", "def test_friendly_exception_formatting_multiple_exceptions():\n ex1 = InsufficientCorrectSignatures(1, 2, {'6ouriXMZkLeHsuXrN1X1fd': '3GoEPiwhJUjALzrXmmE9tFTXAi7Emv8Y8jjSxQyQB'})\n ex2 = InsufficientSignatures(1, 3)\n ex2.__cause__ = ex1\n ex3 = SigningException()\n ex3.__cause__ = ex2\n\n expected = '{} [caused by {} [caused by {}]]'.format(ex3, ex2.reason, ex1.reason)\n formatted_exception = friendlyEx(ex3)\n\n assert formatted_exception == expected", "def test_augmentsXML(self):\n fileName = self.mktemp()\n fp = FilePath(fileName)\n fp.setContent(oldAugmentsFormat)\n upgradeAugmentsXML(fp)\n self.assertEquals(fp.getContent(), newAugmentsFormat)", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def _fix_fmts(self, labname, mapping):\n default_fmt_widths = self._default_fmt_widths\n \n indexes = [i for i in range(self._nvar) if self._lbllist[i] == labname]\n if indexes == []: return\n lab_size = max([len(v) for k, v in mapping.items()])\n fmtlist = self._fmtlist\n typlist = self._typlist\n isstrvar = self._isstrvar\n for i in indexes:\n if isstrvar(i):\n continue # string values should not be labeled\n old_fmt = fmtlist[i]\n # check match agains numerical format\n match = NUM_FMT_RE.match(old_fmt)\n if match:\n fmt_width = int(match.group(3))\n if fmt_width < lab_size:\n prefix = ('%' + (match.group(1) or '') + \n (match.group(2) or ''))\n suffix = (match.group(4) + match.group(5) + \n match.group(6) + (match.group(7) or ''))\n new_fmt = prefix + str(lab_size) + suffix\n fmtlist[i] = new_fmt\n self._changed = True\n elif TIME_FMT_RE.match(old_fmt) or TB_FMT_RE.match(old_fmt):\n continue\n else: \n # Here, some garbled format must have been entered. \n # More effort could be made to identify intended format, \n # but instead we'll just paint over it.\n fmt_width = default_fmt_widths[typlist[i]]\n fmtlist[i] = '%' + str(max((lab_size,fmt_width))) + '.0g'\n self._changed = True", "def unsuported_format(self, msg):\n raise UnsupportedError(self.file.name+\" linker map format not supported by parser:\\n \"+ msg)", "def getFormatsFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n item1 = item[1]\n if isinstance(item1, str):\n yield normalize_format(item1)\n else:\n l = []\n for j in getFormatsFromDescr(item1):\n l.append(j)\n yield l\n item = i.next()\n except StopIteration:\n pass", "def initFormat(self):\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList\n if sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '", "def test_default_read():\n # If new data formats are added to preprocess, they need to be tested\n tested_data_formats = [\"ASCII\", \"SU\", \"SAC\"]\n\n preprocess = Default()\n assert(set(tested_data_formats) ==\n set(preprocess._obs_acceptable_data_formats))\n\n st1 = preprocess.read(os.path.join(TEST_DATA, \"AA.S0001.BXY.semd\"),\n data_format=\"ascii\")\n\n st2 = preprocess.read(os.path.join(TEST_DATA, \"Uy_file_single_d.su\"),\n data_format=\"su\")\n\n st3 = preprocess.read(os.path.join(TEST_DATA, \"AA.S0001.BXY.sac\"),\n data_format=\"sac\")\n\n assert(st1[0].stats.npts == st2[0].stats.npts)\n assert(st3[0].stats.npts == st2[0].stats.npts)", "def _verifyParsing(self):\n for attrname, attr in self.__dict__.items():\n if attrname.endswith('records') and iterable(attr):\n ts = get_record_timestamps(attr)\n if not issorted(ts):\n print('Sorting %s' % attrname)\n if type(attr) == list:\n attr = list(np.asarray(attr)[ts.argsort()])\n else:\n attr = attr[ts.argsort()]\n ts = get_record_timestamps(attr)\n assert issorted(ts)\n self.__dict__[attrname] = attr # update", "def get_format_preserving(self, get_format_preserving):\n\n self._get_format_preserving = get_format_preserving", "def update_normalization_order(self):\n self._cache[\"input\"][\"order\"] = int(self.order.currentText())\n self.reset_input_style_defaults()\n self.fit_continuum(True)\n self.draw_continuum(True)\n return None", "def _fix_args(self, mdsfmt, orig_args):\n parts, args = [], []\n for arg in orig_args:\n head, sep, mdsfmt = mdsfmt.partition(self.mdsplaceholder)\n parts.append(head)\n if len(sep) == 0:\n break\n if arg is None:\n sep = '*'\n else:\n args.append(arg)\n parts.append(sep)\n\n parts.append(mdsfmt)\n mdsfmt = ''.join(parts)\n return mdsfmt, args", "def _format(self, groups):\n return [self._group_model(group) for group in self._sort(groups)]", "def guess_import_format(fname):\n\n if fname is None:\n return None, None, None, None\n\n extension = _get_extension(fname)\n\n content_encoding = _COMPRESSION_EXTENSIONS.get(extension)\n if content_encoding:\n # remove encoding extension\n extension = _get_extension(fname[: -len(extension) - 1])\n\n # get content type\n info = _IMPORT_EXTENSIONS.get(extension)\n\n content_type = info[0] if info else None\n input_type = info[1] if info else None\n separator = info[2] if info else None\n\n return content_encoding, content_type, input_type, separator", "def getConverter( format ):\n\n data = set(format.split(\"-\"))\n\n if \"one\" in data:\n if \"forward\" in data:\n if \"closed\" in data:\n return __one_forward_closed \n else:\n return __one_forward_open\n else:\n if \"closed\" in data:\n return __one_both_closed\n else:\n return __one_both_open\n else:\n if \"forward\" in data:\n if \"closed\" in data:\n return __zero_forward_closed\n else:\n return __zero_forward_open\n else:\n if \"closed\" in data:\n return __zero_both_closed\n else:\n return __zero_both_open", "def _trans_format(self):\n config_dict = vars(self._config)\n for item, value in config_dict.items():\n if value == 'None':\n config_dict[item] = None\n elif isinstance(value, str) and is_number(value):\n if value.isdigit():\n value = int(value)\n else:\n value = float(value)\n config_dict[item] = value", "def _validate_format(format_type):\n if format_type not in GeopandasWriter.formats:\n raise ValueError('Unsupported file format.')\n\n return True", "def update_placeholder_formats(self, format_string, placeholder_formats):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if (\n token.group(\"placeholder\")\n and (not token.group(\"format\"))\n and token.group(\"key\") in placeholder_formats\n ):\n output.append(f\"{{{token.group('key')}{placeholder_formats[token.group('key')]}}}\")\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)", "def path_formats():\n return set(path_loaders.keys())", "def _out_order(self, fname):\r\n # t = 1\r\n orderDict = {}\r\n order = []\r\n readWells = False\r\n lastBlock = False\r\n addOrder = False\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if readWells:\r\n if lastBlock:\r\n line = line.split('++')[0]\r\n addOrder = True\r\n lastBlock = False\r\n item = list(map(str.strip, line.split('+')))\r\n item = [e.split() for e in list(filter(None, item))]\r\n order.extend([w[1] for w in item])\r\n readWells = False\r\n if addOrder:\r\n orderDict[t] = order\r\n order = []\r\n addOrder = False\r\n # t += 1\r\n elif len(item) > 0:\r\n head = ''.join(item[2:])\r\n if 'GEMFIELDSUMMARY' in head:\r\n t = item[1]\r\n\r\n elif 'No.' in line and 'Name' in line and '+' in line:\r\n if '++' in line:\r\n lastBlock = True\r\n readWells = True\r\n next(fp)\r\n continue\r\n return orderDict", "def get_existing_file_format(data, format):\n if format in XLS_EXTENSIONS:\n existing_file_format = data.name.split(\".\")[-1]\n return existing_file_format\n return format", "def _headercorrected(hdr):\n # COM*** -> COMMENT\n i = 1\n while 'COM%03d' % i in hdr:\n value = hdr['COM%03d' % i]\n comment = hdr.cards['COM%03d' % i].comment\n hdr['COMMENT'] = '[%s] %s' % (comment, value)\n del hdr['COM%03d' % i]\n i += 1\n # HIST*** -> HISTORY\n i = 1\n while 'HIST%03d' % i in hdr:\n value = hdr['HIST%03d' % i]\n comment = hdr.cards['HIST%03d' % i].comment\n hdr['HISTORY'] = '%s (%s)' % (value, comment)\n del hdr['HIST%03d' % i]\n i += 1\n # ORIGIN -> FROM\n if 'ORIGIN' in hdr.keys():\n hdr.rename_keyword('ORIGIN', 'FROM')\n if 'ORIGIN_V' in hdr.keys():\n hdr.rename_keyword('ORIGIN_V', 'FROM_V')\n # SOURCE_V -> FORMAT\n if 'SOURCE_V' in hdr.keys():\n hdr.rename_keyword('SOURCE_V', 'FORMAT')\n # SRC_VERS -> SRC_V\n if 'SRC_VERS' in hdr.keys():\n hdr.rename_keyword('SRC_VERS', 'SRC_V')", "def renameFormats(self, nameDict):\n for item in globalref.docRef.root.descendantGen():\n item.formatName = nameDict.get(item.formatName, item.formatName)", "def setup_fmt(self, ctx):\n ctx.implicit_errors = False", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def is_input_order_important(self):", "def _determine_format(self, request):\n return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)", "def create_compatible_io_lists(self, node, valid_input_formats, valid_output_formats, input_image_format_list):\n dim_check_ok = True\n compatible_input_formats = []\n compatible_output_formats = []\n for idx, item in enumerate(valid_input_formats):\n if self.equal_ignore_order(input_image_format_list, item):\n compatible_input_formats.append(input_image_format_list)\n compatible_output_formats.append(valid_output_formats[idx])\n\n #Length checks for io image formats. output must have at least two entries, one explicit and one virtual.\n if len(compatible_input_formats) < 1:\n parse_common.set_text_on_node(self.validation_output_graph, node, \"Input image format not valid.\", 'Red', True)\n dim_check_ok = False\n elif len(compatible_output_formats) < 2:\n parse_common.set_text_on_node(self.validation_output_graph, node, \"No compatible output image format found.\", 'Red', True)\n dim_check_ok = False\n\n return [compatible_input_formats, compatible_output_formats, dim_check_ok]", "def testDMARCMixedFormatting(self):\n examples = [\n \"v=DMARC1;p=ReJect\",\n \"v = DMARC1;p=reject;\",\n \"V=DMARC1;p=reject;\"\n ]\n\n for example in examples:\n parsed_record = checkdmarc.parse_dmarc_record(example, \"\")\n self.assertIsInstance(parsed_record, OrderedDict)", "def __normalize(self, ctx: commands.Context, format: str) -> str:\n\t\t# convert to lowercase\n\t\tlower_format = format.lower()\n\t\t# check if inputted format is recognized\n\t\tif lower_format in self.formats:\n\t\t\treturn lower_format\n\t\t# check for aliases\n\t\telif lower_format in self.aliases:\n\t\t\treturn self.aliases[lower_format]\n\t\t# format is not recognized\n\t\telse:\n\t\t\traise FriendlyError(\n\t\t\t\tf\"'{format}' is not a recognized format.\", ctx.channel, ctx.author\n\t\t\t)", "def test_invalid_to_output_format(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"invalid_to_output_format.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n \"Output format type must be either html or html5.\", message\n )", "def test_format_files(self):\n shutil.copytree(\"testimages/\", \"testimages_to_format/\")\n os.chdir(\"testimages_to_format\")\n self.vimiv.quit()\n self.init_test([\"arch_001.jpg\"])\n self.vimiv[\"fileextras\"].format_files(\"formatted_\")\n files = [fil for fil in os.listdir() if \"formatted_\" in fil]\n files = sorted(files)\n expected_files = [\"formatted_001.jpg\", \"formatted_002\",\n \"formatted_003.bmp\", \"formatted_004.svg\",\n \"formatted_005.tiff\", \"formatted_006.png\"]\n self.assertEqual(files, expected_files)\n os.chdir(\"..\")\n # Should not work without a path\n self.vimiv.paths = []\n self.vimiv[\"fileextras\"].format_files(\"formatted_\")\n self.check_statusbar(\"INFO: No files in path\")\n # Should not work in library\n self.vimiv[\"library\"].focus(True)\n self.vimiv[\"fileextras\"].format_files(\"formatted_\")\n self.check_statusbar(\"INFO: Format only works on opened image files\")", "def getOrder(self, original):\n default = original.get('default', None)\n if default:\n index = default.index('image')\n if IMAGE_ALT_FIELD_NAME in default:\n default.remove(IMAGE_ALT_FIELD_NAME)\n default.insert(index+1, IMAGE_ALT_FIELD_NAME)\n return original", "def test_format_mapping_file(self):\r\n headers = ['SampleID', 'col1', 'col0', 'Description']\r\n samples =\\\r\n [['bsample', 'v1_3', 'v0_3', 'd1'],\r\n ['asample', 'aval', 'another', 'd2']]\r\n comments = ['this goes after headers', 'this too']\r\n self.assertEqual(format_mapping_file(headers, samples, comments),\r\n example_mapping_file)\r\n # need file or stringIO for roundtrip test\r\n # roundtrip = parse_mapping_file(format_mapping_file(headers,samples,comments))\r\n # self.assertEqual(roundtrip, [headers,samples,comments])\r", "def reset_structure(self, format_structure):\n assert(format_structure in pos_structure)\n _, aux1, aux2, aux3 = self.format_set_info\n self.format_set_info = format_structure, aux1, aux2, aux3\n self.reset_format()", "def extension (formatStr):\n assert False, \"TODO:\"", "def test_allow_unknown():\n template = 'name=\"{name}\" value=\"{value}\"'\n fmt = FormatTemplate(remove_unused=False)\n result = fmt(template)\n assert result == template" ]
[ "0.59663427", "0.5793017", "0.5785299", "0.57808274", "0.5741465", "0.5710367", "0.5705475", "0.5698985", "0.55878264", "0.5578431", "0.5562306", "0.55582625", "0.55492294", "0.55001897", "0.5494213", "0.5479568", "0.5436687", "0.5435134", "0.53975886", "0.53861207", "0.5359758", "0.53524786", "0.53389436", "0.5336767", "0.5322673", "0.52859336", "0.52674663", "0.5264634", "0.5256397", "0.52435094", "0.5238502", "0.523631", "0.5222628", "0.5218404", "0.5207744", "0.51928693", "0.5179071", "0.5177165", "0.5166276", "0.51563036", "0.5148043", "0.51428604", "0.5123909", "0.5119144", "0.5117274", "0.5115578", "0.50978774", "0.5094499", "0.5092104", "0.5082305", "0.50711536", "0.5065587", "0.50549775", "0.5052446", "0.50520486", "0.5051356", "0.504739", "0.50349766", "0.5029338", "0.5026998", "0.50265115", "0.5016377", "0.50150865", "0.50063807", "0.5005582", "0.499352", "0.49827093", "0.49822465", "0.49749562", "0.4968504", "0.49632332", "0.49579942", "0.4953422", "0.49506274", "0.49469292", "0.494462", "0.49442393", "0.4940336", "0.49399307", "0.49385843", "0.4932419", "0.49251983", "0.4922513", "0.49215811", "0.4910494", "0.49091157", "0.49080667", "0.4906823", "0.49048746", "0.4903438", "0.48904338", "0.48899844", "0.48870936", "0.4884485", "0.48681337", "0.4864091", "0.48631465", "0.48602608", "0.48588187", "0.4858689", "0.48509225" ]
0.0
-1
Converts the provided integer 'n' into a valid insertion point in the string 's', ie the current index locations or at the end
def gen_index_via_mod(s, n): if len(s) == 0: return 0 return n % (len(s) + 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_to_end(s, n):\n first=s[0:n]\n return s[n:] + first", "def string(self,pos_0,pos_1,n):\r\n n=int(n)\r\n if pos_0 <10:\r\n pos_0=\"00\"+str(pos_0)\r\n elif pos_0<100:\r\n pos_0=\"0\"+str(pos_0)\r\n\r\n if n <10:\r\n n=\"0\"+str((n))\r\n \r\n\r\n\r\n if pos_1 <10:\r\n pos_1=\"00\"+str(pos_1)\r\n elif pos_1<100:\r\n pos_1=\"0\"+str(pos_1)\r\n\r\n\r\n\r\n\r\n #pos\r\n c=\"\"\r\n\r\n c=str(pos_0)+str(pos_1)+str(n)\r\n #print(\"c\",c)\r\n return c", "def InfIntToStr(s, i, n):\n if i == len(s):\n return \"\"\n elif i == 0:\n return str(int(s[i])) + InfIntToStr(s, i + 1, n)\n else:\n return str(int(s[i])).zfill(n) + InfIntToStr(s, i + 1, n)", "def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)", "def esrever2(n, s):\n if n == 0:\n return s\n else:\n result = esrever2(n // 10, s * 10 + n % 10)\n return result", "def d(s):\n return s + 1", "def add_space(s,n):\n t = \"\"\n for i in xrange(len(s)):\n # Add white space after every n characters.\n if i % n == 0 and i != 0:\n t += ' '\n t += s[i]\n\n return t", "def cmd_n(self,s):\n length = 0\n node = self.start\n while node is not None:\n line = node.element\n length += len(line)\n if line.find(s):\n self.cursor = node\n self.delta = line.find(s)\n break\n node = node.next\n self.get_text()", "def stoi(self, s):\n idx = self._stoi.get(s)\n return idx + 2 if idx else self.unk_idx", "def f(n):\n\tnstr = ''\n\tfor i in range(1, n + 1):\n\t\tnstr = nstr + str(i)\n\treturn nstr", "def fn(i, s=\"\", n=0):\n if i == len(word): return ans.append(s + (str(n) if n else \"\"))\n fn(i+1, s, n+1)\n fn(i+1, s + (str(n) if n else \"\") + word[i], 0)", "def insertnln(n=1):\r\n\tidx = 0\r\n\twhile idx < n:\r\n\t\tCONSOLE.insertln()\r\n\t\tidx = idx + 1", "def left_fill(s, n, x=\"0\"):\n sl = len(s)\n zn = n - sl\n if zn > 0:\n return zn*\"0\" + s\n else:\n return s", "def splitevery(s, n):\n\treturn [s[x:x+n] for x in range(0,len(s), n)]", "def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str", "def get_string(self, n):\n pad = self.get_pad(n)\n string = pad + self.word\n string += \"\\n\" + self.children[0].get_string(n + 1)\n string += \"\\n\" + self.children[1].get_string(n + 1)\n return string", "def shift_column(code, n, s):\n def shift(s, n):\n if n == 0 or len(s) == 1:\n return s\n else:\n return shift(s[-1] + s[:-1], n-1)\n\n if type(code) is not list:\n return code\n else:\n n = int(n)\n s = int(s) % len(code)\n if s > 0 and n < len(code[0]):\n column = select_column(code, n)\n column = shift(column, s)\n for i in range(0, len(column)):\n new = list(code[i])\n new[n] = column[i]\n code[i] = ''.join(new)\n return code\n else:\n return code", "def progress_string(i, n):\n width = len(str(n))\n string = \"({0:{width}d}/{1:d})\".format(i, n, width=width)\n return string", "def cd2p(s, N):\n letter = s[0].upper()\n number = s[1:]\n col = letter_coord.index(letter) + 1\n row = (N + 1) - int(number)\n # print('row:{} col:{}'.format(row,col))\n return col + (N + 1) * row", "def line(n, str):\n\n return_value = ''\n for _ in range(n):\n return_value += str\n return return_value", "def expanding(self,pos_0,pos_1,n):\r\n cnvt_front=self.string(pos_0,pos_1,n)\r\n if int(cnvt_front) in self.expanded:\r\n\r\n a=1\r\n else:\r\n self.expanded.append(int(cnvt_front))", "def sindex(string, row, col):\r\n n = 0\r\n for _ in range(row-1):\r\n n = string.find('\\n', n) + 1\r\n return n+col-1", "def generateParenthesis(self, n):\n sol = []\n \n def dfs(cur_str, o, c):\n if o==n and o==c:\n sol.append(cur_str)\n else:\n if o < n:\n dfs(cur_str + \"(\", o + 1, c)\n if c < o:\n dfs(cur_str + \")\", o, c+1)\n dfs(\"\", 0, 0)\n return sol", "def str_fill(i, n):\r\n return str(i).zfill(n)", "def recurse(n, s):\n print(f\"recurse n -> {n}\")\n print(f\"recurse s -> {s}\")\n if n == 0:\n print(s)\n else:\n recurse(n-1, n+s)", "def encode1(s,n):\n r = \"\"\n for l in s:\n l = ord(l) # convert to ascii\n l = l - 97 # 'a' is 97 so we want to reduce so 'a'=0 'b'=1 etc\n l = l + n # add the offset\n l=l%26 # use mod so that we wrap around back to 'a' if we go past 'z'\n l=l+97 # and add back the 97\n r = r + chr(l)\n return r", "def fo_shizzle_my_nizzle(n): \n if n < 0:\n n = \"fo\"\n elif n >= 1 and n < 50: \n n = \"shizzle\"\n elif n >= 50 and n <= 100:\n n = \"my\"\n elif n % 2 == 0 and n % 3 == 0 and n > 100:\n n = \"nizzle\"\n else:\n n = \"\"\n return n", "def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]", "def hex_string(s, n=32):\n # take first n characters, reverse them and get ascii codes with ord()\n return 'X\"{0:>0{1}}\"'.format(''.join(['{0:x}'.format(ord(c)) for c in s[:n][::-1]]), n * 2)", "def fn(i, n):\n if not (n <= len(s)-i <= 3*n): return \n if i == len(s): return ans.append(\".\".join(stack))\n k = i+1 if s[i] == \"0\" else i+3\n for j in range(i+1, min(k, len(s))+1): \n if j == i+3 and s[i:j] > \"255\": continue\n stack.append(s[i:j])\n fn(j, n-1)\n stack.pop()", "def index2str(index_input, num_char=4, prepend_char='0'):\n\n index_str = str(index_input)\n num_to_prepend = num_char - len(index_str)\n new_str_index = []\n\n for i in range(num_to_prepend):\n new_str_index.append(prepend_char)\n\n new_str_index.append(index_str)\n index_str = ''.join(new_str_index)\n\n return(index_str)", "def get_string(self, n):\n pad = self.get_pad(n)\n string = pad + self.c\n return string", "def formatSI(n: float) -> str:\n s = ''\n if n < 0:\n n = -n\n s += '-'\n if type(n) is int and n < 1000:\n s = str(n) + ' '\n elif n < 1e-22:\n s = '0.00 '\n else:\n assert n < 9.99e26\n log = int(math.floor(math.log10(n)))\n i, j = divmod(log, 3)\n for _try in range(2):\n templ = '%.{}f'.format(2 - j)\n val = templ % (n * 10 ** (-3 * i))\n if val != '1000':\n break\n i += 1\n j = 0\n s += val + ' '\n if i != 0:\n s += 'yzafpnum kMGTPEZY'[i + 8]\n return s", "def convert_numtoletter(n):\r\n L = seats[0][n-1] #letter\r\n return L", "def climbing_stairs(n):\n\tdico = {0:1, 1:1}\n\n\tfor i in range(2, n+1):\n\t\tdico[i] = dico[i-1] + dico[i-2]\n\n\treturn dico[n]", "def indent_lines(s, n):\n return \"\\n\".join(map(lambda line: \" \" * n + line, \n s.split('\\n')))", "def scol(string, i):\r\n return i - string.rfind('\\n', 0, max(0, i))", "def string_times(str, n):\n if n <= 0:\n return('n has to be non-negative')\n else:\n return(str * n)", "def nth_word(value: str, n: int) -> str:\n return value.split()[n]", "def StrToInfInt(s, n):\n\n if type(s) == list: # Inf int was provided as input\n return s\n\n if(len(s) <= n):\n return [int(s)]\n\n else:\n return StrToInfInt(s[:-n], n) + [int(s[-n:])]", "def number_to_mnemonic(n):\n indices = []\n while n:\n indices.append(n % RADIX)\n n >>= RADIX_BITS\n return \" \".join(words_from_indices(indices))", "def insert(self, n, pos):\n if pos == 0:\n self.cons(n)\n else:\n prev = self.index(pos-1)\n next = prev.next\n prev.next = n\n n.next = next\n self.len += 1", "def generation(state, first_n, rules):\n\n state = \"....\" + state + \"....\"\n first_n -= 2 # adding 4 dots, but only add 2 to the first_n because we start at 2 below\n\n new_state = \"\".join([rules.get(state[i:i+5],\".\") for i in range(len(state))])\n\n first_pot = new_state.find(\"#\")\n first_n += first_pot\n\n last_pot = new_state.rfind(\"#\")\n \n# print(f\"first pot {first_pot} in {new_state}\")\n return new_state[first_pot:last_pot+1], first_n", "def rangoli(n):\r\n alphabet = string.ascii_lowercase\r\n pad = 4*n-3\r\n filler = '-'\r\n initial = [alphabet[n-1]]\r\n top = [alphabet[n-1].center(pad, filler)]\r\n\r\n for i in range(n-2, -1, -1):\r\n initial.append(alphabet[i])\r\n sub_list = initial[:-1]+[alphabet[i]]+list(reversed(initial[:-1]))\r\n sub_seq = filler.join(sub_list).center(pad, filler)\r\n top.append(sub_seq)\r\n\r\n bot = list(reversed(top[:-1]))\r\n result = '\\n'.join(top + bot)\r\n print(result)\r\n return", "def generatehamiltoniantring(n, s, onestring=None, pos=None, pad=None):\n label = []\n if onestring is None:\n if isinstance(s, str):\n for i in range(0, n):\n strs = s\n strs = strs.ljust(n-i, '0')\n strs = strs.rjust(n, '0')\n label.append(strs)\n else:\n print('Please enter string for second variable and integer for first')\n return label\n else:\n strs = s\n strs = strs.ljust(n - pos, pad)\n strs = strs.rjust(n, pad)\n return strs", "def find_nth(self,string, substring, n) -> int:\n if n == 1:\n return string.find(substring)\n else:\n return string.find(substring, self.find_nth(string, substring, n - 1) + 1)", "def GenerateNormalExpression(n):\n if int(n) != n or n < 1:\n return None\n return '+'.join(['a[%d]' % ii for ii in range(n)])", "def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2", "def natsort_key(s: str) -> str:\n # generates four types of fragments:\n # 1) strings < \"0\", stay as-is\n # 2) numbers starting with 0, fragment starts with \"A\"..\"Z\"\n # 3) numbers starting with 1..9, fragment starts with \"a\"..\"z\"\n # 4) strings > \"9\", fragment starts with \"|\"\n if \"~\" in s:\n s = s.replace(\"~\", \"\\0\")\n key: List[str] = []\n key_append = key.append\n for frag in _rc.findall(s):\n if frag < \"0\":\n key_append(frag)\n key_append(\"\\1\")\n elif frag < \"1\":\n nzeros = len(frag) - len(frag.lstrip('0'))\n mag = str(nzeros)\n mag = str(10**len(mag) - nzeros)\n key_append(chr(0x5B - len(mag))) # Z, Y, X, ...\n key_append(mag)\n key_append(frag)\n elif frag < \":\":\n mag = str(len(frag))\n key_append(chr(0x60 + len(mag))) # a, b, c, ...\n key_append(mag)\n key_append(frag)\n else:\n key_append(\"|\")\n key_append(frag)\n key_append(\"\\1\")\n if not (key and key[-1] == \"\\1\"):\n key_append(\"\\1\")\n return \"\".join(key)", "def ordinal(n):\n ord_dict = {1: \"st\", 2: \"nd\", 3: \"rd\"}\n return str(n + 1) + ord_dict.get((n + 1) if (n + 1) < 20 else (n + 1) % 10, \"th\")", "def retrieve_sub(s, n):\n subs = []\n for idx, char in enumerate(s):\n sub = char\n c = 1\n for next_char in s[idx + 1:]:\n if c >= n:\n break\n else:\n sub += next_char\n c += 1\n subs.append(sub)\n return [x for x in subs if len(x) == n]", "def first_and_last_n_chars(s, n1=30, n2=30):\n first_len = min(len(s), n1)\n first = s[:first_len]\n last_len = min(len(s) - len(first), n2)\n last = s[-last_len:] if last_len > 0 else ''\n\n if first_len == len(s):\n return first\n elif first_len + last_len == len(s):\n return \"{}{}\".format(first, last)\n else:\n return \"{}...{}\".format(first, last)", "def n_char_generate(self,char,n):\n return char*n", "def repeatedString(s, n):\n\n count = 0\n s_count_a = s.count('a')\n\n count += math.floor(n / len(s)) * s_count_a\n for _ in range(n % len(s)):\n if s[_] == 'a':\n count += 1\n\n return count", "def incr_indexstr(indexstr):\n index = int(indexstr)\n length = len(indexstr)\n\n index = index + 1\n\n # fill in leading zero's\n newindexstr = str(index).rjust(length, \"0\")\n\n # maintain original length, truncating on the right if needed\n return newindexstr[-length:]", "def _wnpos(pos: str) -> str:\n pos = pos.lower()\n wnpos = \"n\"\n\n if pos.startswith(\"j\"):\n wnpos = \"a\"\n elif pos[0] in ('n', 'r', 'v'):\n wnpos = pos[0]\n\n return wnpos", "def insert(self, n):\n # The distance from the ith cell to the jth probe.\n dij = n.XY.reshape((2,-1,1)) - self.points.reshape((2,1,-1))\n dij = (dij**2).sum(axis=0) / self.radius\n dij[dij < 1] = 1\n self.M = 1 / dij\n self.n = n", "def encode2(s,n):\n r = [ chr(((ord(x)-97+n)%26)+97) if x!=' ' else x for x in s]\n return \"\".join(r)", "def w(n):\n return '{' + n + '}'", "def split_stn_coords(U, stn):\n if stn == 101:\n return U[9], U[10], U[11], 9\n elif stn == 337:\n return U[12], U[13], U[14], 12\n elif stn == 394:\n return U[15], U[16], U[17], 15", "def indent(self, n):\n self._ind = max(0, self._ind + n)", "def strIdx(idx):\n if not isinstance(idx, (int, np.integer)):\n raise ValueError(\"Index must be an integer.\")\n\n return str(idx) if idx >= 0 else str(-idx) + u'\\u0305'", "def sumn(n):\n return n * (n + 1) // 2", "def minOperations(n):\n count = 0\n s = \"H\"\n if n < 2 or type(n) is not int:\n return 0\n while len(s) != n:\n if n % len(s) == 0:\n prev = s\n s = s * 2\n count += 2\n else:\n s = s + prev\n count += 1\n return count", "def srow(string, i):\r\n return string.count('\\n', 0, max(0, i)) + 1", "def refrm(s):\n s2 = s[5:10] + s[4] + s[0:4]\n return s2", "def naive_suffix_array(s, n):\n sa_tuple = sorted([(s[i:], i) for i in range(n)])\n return array(\"l\", map(lambda x: x[1], sa_tuple))", "def insert_at_offsets(text, offset2string):\n\n dict_as_list = list(offset2string.items())\n dict_as_list.sort(key=lambda t: t[0], reverse=True)\n\n for offset, s in dict_as_list:\n text = text[:offset] + s + text[offset:]\n\n return text", "def increment(self, n: int = 1):\n return Cursor(self.data, self.begin, self.end+n)", "def test_string_insertion(a_string, a_character):\n for position in range(0, len(a_string)+1):\n print a_string[:position] + a_character + a_string[position:]", "def look_ahead(self, n: int = 1):\n return self.data[self.end:self.end+n]", "def _generate(self, n):\n # See https://en.wikipedia.org/wiki/De_Bruijn_sequence\n\n k = len(self.alphabet)\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n db(1, 1)\n return ''.join(self.alphabet[i] for i in sequence)", "def _get_interleving(self, index):\n try:\n index = self._char_indexes[index - 1]\n except IndexError:\n return \"\"\n s = \"\"\n while True:\n index += 1\n if index in self._char_indexes:\n break\n elif index in self._code_indexes:\n s += self._raw_string[index]\n else:\n break\n return s", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def trieWithPosFromString(s, q):\n global gTrie\n\n # iterate over ref string\n for i in range(len(s) - q + 1):\n m = s[i:q]\n # if key is already there, append position\n if gTrie.has_key(m):\n gTrie[m].append(i)\n else:\n # create a list containing only current position\n gTrie[m] = [i]", "def line(n):\n\n return_value = ''\n for _ in range(n):\n return_value += '#'\n return return_value", "def spark_index(n):\n return int(round((clamp(n) - minimum) * coefficient))", "def insertion_sort(s):\n for k in range(1, len(s)):\n cur = s[k] # 建立游标\n j = k # 拿到当前游标值\n while j > 0 and s[j-1] > cur: # 当前一位值大于游标的值\n s[j] = s[j-1] # 交换位置\n j -= 1 # 游标向前移\n s[j] = cur # 获取游标值", "def find(n):\n tn = int(n / 2)\n s = 2 * tn + 1\n count = 0\n line_list = []\n for i in range(1, n + 1):\n for j in range(i + 1, n + 1):\n if i + j != s:\n line_list.append(str(i) + \" \" + str(j))\n count += 1\n\n return count, line_list", "def ordinal_label(n):\n n = int(n)\n return \"%d%s\" % (n,\"tsnrhtdd\"[(n/10%10!=1)*(n%10<4)*n%10::4])", "def split_into_n(s, n):\n return [s[k:k + n] for k in range(0, len(s), n)]", "def ordinal(n):\n if 11 <= n <= 19:\n return str(n) + \"th\"\n s = str(n)\n last = int(s[-1])\n if 1 <= last <= 3:\n return s + (\"st\", \"nd\", \"rd\")[last-1]\n return s + \"th\"", "def xo_convert(n):\n if n == 1:\n return \"X\"\n elif n == -1:\n return \"O\"\n else:\n return \" \"", "def __rshift__(self, n: int) -> 'SInt':\r\n if type(n) != int or n < 0:\r\n raise TypeError(\"Wrong type for n : positive integer needed\")\r\n n = min(n, len(self) - 1)\r\n S = SInt(self.nbBytes)\r\n S.binaire = self.signe + '0' * n + self.binaire[1:-n]\r\n return S", "def rotate(string, n):\r\n # default no change unless n is negative or positive\r\n rotated_string = string\r\n if n > 0:\r\n rotated_string = string[n:] + string[:n]\r\n elif n < 0:\r\n # calc how many letters remain after n characters are removed\r\n difference = len(string) - abs(n)\r\n # last n characters\r\n last_n = string[difference:]\r\n # remainder of string after n characters are chopped off end\r\n remainder_string = string[:difference]\r\n rotated_string = last_n + remainder_string\r\n return rotated_string", "def __replace_negative_for_n__(self, text):\n # | - __replace_negative_for_n__\n lst = [pos for pos, char in enumerate(text) if char == \"n\"]\n\n for lett in lst:\n if text[lett + 1].isdigit() is True:\n text = text[:lett] + \"-\" + text[lett + 1:]\n\n return(text)\n # __|", "def squash_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = ''.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = _expand_after_special_chars(newText)\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def n_char(self,char,n,w=1,h=1):\n for i in range(n):\n self.esprint(char,w,h)", "def _get_abs_string_index(self, idx):\r\n idx = operator.index(idx)\r\n if not (-len(self) <= idx < len(self)):\r\n raise IndexError('index {} is out of range'.format(idx))\r\n if idx < 0:\r\n idx += len(self)\r\n return str(idx)", "def encode_pos(i, j):\n return 3 * i + j", "def generateParenthesis(self, n):\n # Write your code here\n result = []\n\n def helper(cur, left, right):\n if left == right == 0:\n result.append(cur)\n if left > 0:\n helper(cur + '(', left - 1, right)\n if left < right:\n helper(cur + ')', left, right - 1)\n helper(\"\", n, n)\n return result", "def encipher(S, n):\n new = ''\n for i in S:\n c = rot(i, n)\n new = new + c\n return new", "def fn(n):\n digits = [int(x) for x in str(n)]\n for i in reversed(range(len(digits)//2+1)): \n if digits[i] < 9: break \n else: return 10*n + 11\n digits[i] = digits[~i] = digits[i] + 1\n for ii in range(i): \n digits[~ii] = digits[ii]\n for ii in range(i+1, len(digits)//2+1): \n digits[ii] = digits[~ii] = 0\n return int(\"\".join(map(str, digits)))", "def start_with_the_beggining(rna: str):\n return 0", "def calc_soma(n):\n \n # Comecamos por percorrer os caracteres de n, e juntamos a cada caracter o que estava à sua direira, do lado esquerdo, invertendo o numero. Caso um dos caracteres nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Seguidamente, percorremos a cadeia recem criada. OS caracteres nas posicoes impares da cadeia anterior (indices 0,2,4,..) vao ser multiplicados por 2. Se a multiplicacao der um resultado superior a 9, subtrai-se 9. Os caracteres nas posicoes pares vao para a nova cadeia sem qualquer alteracao.\n # Finalmente percorremos os elementos da cadeia e somamos, convertidos a inteiros.\n \n \n comp = len(n)\n num_invertido , num_invertido2 = '' , ''\n soma_luhn = 0\n \n for e in n:\n \n if '0' <= e <= '9': \n num_invertido = e + num_invertido\n \n else:\n raise ValueError ('function calc_soma(): O string recebido apenas pode conter digitos')\n \n \n for i in range(comp):\n \n if i%2 == 0:\n resultado = eval(num_invertido[i]) * 2\n \n if resultado > 9:\n num_invertido2 = num_invertido2 + str(resultado - 9)\n \n else:\n num_invertido2 = num_invertido2 + str(resultado)\n \n else:\n num_invertido2 = num_invertido2 + (num_invertido[i])\n \n\n for e in num_invertido2:\n soma_luhn = soma_luhn + eval(e)\n \n return soma_luhn", "def SolveInput(s, n):\n\n if len(s) == 0:\n return\n elif s[0] == \"\":\n SolveInput(s[1:], n)\n else:\n result = SolveLine(list(filter(lambda a: a not in ['(', ')', ','],\n LexLine(s[0].replace(' ', '')) or [])), 0, n)\n if result and not type(result) == str:\n # def TestInput(solved, inputString):\n # \"\"\"Evaluates input using the python interpreter directly. Used\n # exclusively for testing/debugging purposes to validate output\n # correctness.\n # \"\"\"\n # def multiply(a, b):\n # return a * b\n # def add(a, b):\n # return a + b\n # correctness = solved == str(eval(inputString))\n # return solved + \" \" + str(correctness)\n # print (s[0], \"=\", TestInput(InfIntToStr(result, 0, n), s[0]))\n print (s[0], \"=\", InfIntToStr(result, 0, n))\n else:\n print (\"Invalid expression:\", s[0])\n SolveInput(s[1:], n)", "def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"", "def transform(s):\r\n return 'digit ' + str(s)", "def incr_id(id, n):\n return id[:-1] + (id[-1] + n,)", "def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret" ]
[ "0.63267386", "0.6178822", "0.61630845", "0.6155788", "0.6115236", "0.6103694", "0.6012176", "0.6011193", "0.594181", "0.593987", "0.5923727", "0.5884271", "0.580508", "0.5737656", "0.5725076", "0.5623249", "0.5594806", "0.55564487", "0.5525483", "0.55181396", "0.5496168", "0.54950446", "0.5493407", "0.54928535", "0.5476004", "0.54742867", "0.5470193", "0.54574436", "0.54181755", "0.5409476", "0.53863883", "0.53831005", "0.5383051", "0.5377475", "0.5356362", "0.5346113", "0.5309631", "0.5305091", "0.53044593", "0.5293283", "0.52917284", "0.52908546", "0.52882105", "0.52796984", "0.52749825", "0.52597356", "0.5242196", "0.5240301", "0.5235121", "0.5206714", "0.5204436", "0.51991755", "0.51947016", "0.519453", "0.51911545", "0.51867455", "0.5184386", "0.5174735", "0.51739824", "0.51552165", "0.51484066", "0.51454276", "0.5137882", "0.5137472", "0.5125001", "0.512436", "0.5112909", "0.5112634", "0.5112014", "0.5102518", "0.5099514", "0.50987214", "0.508586", "0.508304", "0.50819105", "0.5067127", "0.50441843", "0.50363946", "0.50343734", "0.50338626", "0.5022842", "0.5022809", "0.50139815", "0.5010225", "0.500806", "0.50027376", "0.49925092", "0.49918362", "0.4985444", "0.49607313", "0.49594173", "0.4957751", "0.49516046", "0.49499223", "0.49452943", "0.49443296", "0.49432662", "0.49311632", "0.4930647", "0.49239063" ]
0.6202475
1
Make sure we don't insert in adjacent locations, otherwise the numbers will join together and our created ordering will be invalid, failing test.
def remove_adjacent_nums(n): output = [] for e in n: if len(output) == 0 or output[-1][0] <= e[0] - 2: output.append(e) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ordering(self):\r\n def verify_order(source_usage_key, parent_usage_key, source_position=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n parent = self.get_item_from_modulestore(parent_usage_key)\r\n children = parent.children\r\n if source_position is None:\r\n self.assertFalse(source_usage_key in children, 'source item not expected in children array')\r\n self.assertEqual(\r\n children[len(children) - 1],\r\n usage_key,\r\n \"duplicated item not at end\"\r\n )\r\n else:\r\n self.assertEqual(\r\n children[source_position],\r\n source_usage_key,\r\n \"source item at wrong position\"\r\n )\r\n self.assertEqual(\r\n children[source_position + 1],\r\n usage_key,\r\n \"duplicated item not ordered after source item\"\r\n )\r\n\r\n verify_order(self.problem_usage_key, self.seq_usage_key, 0)\r\n # 2 because duplicate of problem should be located before.\r\n verify_order(self.html_usage_key, self.seq_usage_key, 2)\r\n verify_order(self.seq_usage_key, self.chapter_usage_key, 0)\r\n\r\n # Test duplicating something into a location that is not the parent of the original item.\r\n # Duplicated item should appear at the end.\r\n verify_order(self.html_usage_key, self.usage_key)", "def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()", "def _check_location_order(self, locations):\n strand = None\n last_start = 0\n for location in locations:\n if strand is None:\n strand = location[2]\n elif strand != location[2]:\n return warnings[\"both_strand_coordinates\"]\n if strand == \"-\":\n locations = reversed(locations)\n for location in locations:\n if last_start > location[1]:\n return warnings[\"out_of_order\"]\n else:\n last_start = location[1]\n return None", "def test_insert_will_not_duplicate_value(bst_balanced):\n bst_balanced.insert(6)\n assert bst_balanced.size() == 6", "def test_insertion(self):\n integers = insertion_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_reorder_coords_errors(self):\r\n\r\n m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n in_sids = ['A', 'B', 'C']\r\n order = ['A', 'B', 'C', 'D']\r\n self.assertRaises(ValueError, reorder_coords, m, in_sids, order)", "def fixOrderBeforeInsert(cur,vID,orderNum):\n cur.execute(\"\"\"UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?\"\"\",[vID, orderNum])", "def _ensure_order_consistent(self):\r\n if self.order_sum() != self.order_triangle() or \\\r\n self.force_reset_order is True:\r\n self._reset_order()\r\n self._have_reset_order = True\r\n else:\r\n self._have_reset_order = False\r\n return self._have_reset_order", "def test_insert(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n inserted12 = insert([int1], int2)\n self.assertEqual([int12], inserted12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n self.assertEqual([int13], insert([int12], int3))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n inserted4 = insert([],int4)\n self.assertEqual([int4], inserted4)\n self.assertEqual([int13, int58], insert([int12, int3], int58))\n self.assertEqual([int13, int58], insert([int58], int13))\n self.assertEqual([int13], insert([int2, int3], int1))\n self.assertEqual([int13], insert([int1, int2, int2, int3], int12))\n self.assertEqual([int1], insert([int1], int1))\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = insert([int1], 4)\n with self.assertRaises(ValueError):\n int1 = insert([3], int1)\n with self.assertRaises(ValueError):\n int1 = insert([3], \"not an interval\")\n with self.assertRaises(ValueError):\n int1 = insert([3], \"[1,3]\")\n with self.assertRaises(ValueError):\n int1 = insert([[]], \"\")\n with self.assertRaises(ValueError):\n int1 = insert([[12, \"hi\"]], \"interval\")\n with self.assertRaises(ValueError):\n int1 = insert([int1], \"\")\n with self.assertRaises(ValueError):\n int1 = insert([[]], int2)\n print(\"insert test complete\")", "def insertion_sort(numbers):\n for index in range(0, len(numbers) - 1):\n current = numbers[index]\n j = index\n while j > 0 and numbers[j - 1] > current:\n numbers[j] = numbers[j - 1]\n j -= 1\n numbers[j] = current\n assert numbers == [1, 2, 5, 6, 7, 9], \"INSERTION SORT: The list was not sorted correctly.\"", "def testInsertLength(self):\n\n num = randint(60,180)\n for i in xrange(num):\n self.s.insert(i, None)\n self.assertEqual(len(self.s), num)\n\n #try to insert duplicates\n for i in xrange(num):\n self.s.insert(i, None)\n self.assertEqual(len(self.s), num)", "def insert(self, val):\n if val not in self.numSet:\n self.numSet.add(val)\n # add to tail first\n self.nums.append(val)\n # if the last few numbers are invalid, swap the new value to the end of the valid prefix\n self.swap(self.size, -1)\n self.valToIndex[val] = self.size\n self.size += 1\n return True\n else:\n return False", "def test_insert(empty_bucket): # pylint: disable=redefined-outer-name\n for idx in range(2):\n element_number = idx + 1\n empty_bucket.insert(f\"key {element_number}\", f\"value {element_number}\")\n\n elements = list(iter(empty_bucket))\n for idx, (key, value) in enumerate(reversed(elements)):\n element_number = idx + 1\n assert key == f\"key {element_number}\"\n assert value == f\"value {element_number}\"", "def _validate_positions():\n positions = set([field[\"position\"] for field in fields])\n if len(positions) != len(fields):\n raise IncorrectPredictionsTableOrder", "def test_insertion_recursive(self):\n integers = insertion_sort_recursive(self.actual)\n self.assertEqual(self.expected, integers)", "def insertion_optimized_alt(array):\n j = 0\n for i,val in enumerate(array):\n for j in range(i,-1,-1):\n if j>0 and val<array[j-1]:\n array[j] = array[j-1]\n else:\n break\n array[j] = val", "def test_index_geq_3(self):\n self.insert()\n data = self.tbl[6:]\n assert self.check(self.idata[2:], data)", "def insert(self, val):\n if val not in self.posFind or self.posFind[val] == -1:\n self.nums.append(val)\n self.posFind[val] = len(self.nums) - 1\n return True\n return False", "def insertion_sort(items):\n for i in range(1, len(items)):\n j = i\n while j > 0 and items[j] < items[j-1]:\n items[j], items[j-1] = items[j-1], items[j]\n j -= 1", "def check_order(current, hit, overlap = 200):\n prev_model = current[-1][2:4]\n prev_strand = current[-1][-2]\n hit_model = hit[2:4]\n hit_strand = hit[-2]\n # make sure they are on the same strand\n if prev_strand != hit_strand:\n return False\n # check for sequential hits on + strand\n if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap):\n return False\n # check for sequential hits on - strand\n if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap):\n return False\n else:\n return True", "def check_sort(self):\n if self.list == []:\n return True\n seg_iter = iter(self.list)\n last = next(seg_iter)\n for segment in seg_iter:\n if last > segment:\n raise Exception('non trié')\n last = segment\n return True", "def test_insert_if_node_value_exist(balanced_3_nodes):\n with pytest.raises(ValueError):\n balanced_3_nodes.insert(10)", "def insertion_sort(arr):\n for j in range(len(arr)):\n for i in range(j, 0, -1):\n if arr[i] >= arr[i-1]:\n continue\n arr[i], arr[i-1] = arr[i-1], arr[i]", "def insertion_optimized(array):\n for i,val in enumerate(array):\n while i>0 and val<array[i-1]:\n array[i] = array[i-1]\n i -= 1\n array[i] = val", "def test_adjacent_bomb_count(self):\n index = 0\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.LEFT_ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def InsertSort(num_list):\n for i in range(1,len(num_list)):\n for j in range (i,0,-1):\n if num_list[j]<num_list[j-1]:\n num_list[j],num_list[j-1] = num_list[j-1],num_list[j]\n return num_list", "def test_adjacent_bomb_count_3(self):\n index = 17\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def gap_insertion_sort(num_list, start, gap):\n\n # Creates sublists for the sublist gap\n for i in range(start + gap, len(num_list), gap):\n\n # New item to be inserted into the sublist gap\n current_value = num_list[i]\n position = i\n\n while position >= gap and num_list[position - gap] > current_value:\n # Shift item to current position\n num_list[position] = num_list[position - gap]\n position -= gap\n\n # Sets new position to current value\n num_list[position] = current_value", "def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"123\")\n self.assertFalse(self.phonebook.is_consistent())", "def test_insert_must_be_a_number(bst_empty):\n with pytest.raises(TypeError):\n bst_empty.insert(\"dfsdfadgasdg\")", "def test_phonebook_with_normal_entries_is_consistent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"012345\")\n self.assertTrue(self.phonebook.is_consistent())", "def insertion(array):\n for i in range(1,len(array)):\n while i>0 and array[i]<array[i-1]:\n array[i], array[i-1] = array[i-1], array[i]\n i -= 1", "def test_tree_4_nodes_right_unbalanced_return_1(balanced_3_nodes):\n balanced_3_nodes.insert(13)\n assert balanced_3_nodes.balance() == -1", "def assert_mapping_consistency(layout):\n values = sorted(layout.values())\n keys = list(layout)\n ref_keys = [\"q\" + str(i) for i in range(len(keys))]\n if keys != ref_keys:\n raise PlacementError(\"Some physical qubits in the layout may be missing or duplicated.\")\n if values != list(range(len(values))):\n raise PlacementError(\"Some logical qubits in the layout may be missing or duplicated.\")", "def insertion_sort(arr: List) -> None:\n for i in range(len(arr) - 1):\n if arr[i] > arr[i + 1]:\n for k in range(i, -1, -1):\n if arr[i+1] >= arr[k]:\n arr.insert(k + 1, arr.pop(i+1))\n break\n else:\n arr.insert(0, arr.pop(i + 1))", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def test_nearest_location_odd():\n assert nearest_location([(3, 6), (9, 13)], 7) == 0\n assert nearest_location([(3, 6), (9, 13)], 7, 1) == 1", "def reasonble_order(index_a, index_b):\n if (index_b - index_a) % 22 != 1:\n if index_b != 1:\n return False\n return True", "def testInsert(self):\n\n for i in xrange(randint(50,150)):\n self.s.insert(i, None)", "def can_combine(self, first, second):\n # Need to check out of order issues as\n # blocks are sorted by where they start in a\n mismatch_ab = (first.a_end <= second.a\n and second.b_end <= first.b)\n mismatch_ba = (second.a_end <= first.a\n and first.b_end <= second.b)\n out_of_order = mismatch_ab or mismatch_ba\n return not out_of_order and self.jump_gap(second)", "def _insert_into_clean(self, entry):\n i = entry.hash\n new_entry = self.table[i]\n while new_entry.key is not None:\n i += self.second_hash(new_entry.key)\n new_entry = self.table[i]\n new_entry.key = entry.key\n new_entry.value = entry.value\n new_entry.hash = entry.hash\n self.used += 1\n self.filled += 1", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def coord_valid(s, e):\n s = coord_fixer(s) # 0-index\n e = coord_fixer(e) # 1-index\n e = 1 if e == 0 else e # \n s, e = [s, e] if (int(s) < int(e)) else [e, s]\n return [str(s), str(e)]", "def test_adjacent_bomb_count_2(self):\n index = 9\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.RIGHT_ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def InsertUniquePoint(self, , p_int):\n ...", "def InsertUniquePoint(self, , p_int):\n ...", "def test_stable_ordering(self):\n with Graph('g') as graph:\n a = ParrotNode(['a'])\n p = a | pike.merge()\n b = ParrotNode(['b'])\n graph.source | b | p\n # Make sure that b runs before a\n if graph.nodes.index(b) > graph.nodes.index(a):\n graph.nodes.remove(b)\n graph.nodes.insert(graph.nodes.index(a), b)\n ret = graph.run()\n self.assertEqual(list(ret['default']), ['a', 'b'])", "def listOrdering(self):\r\n index = 0\r\n while( index < len(self.sortedList)-1):\r\n if(self.sortedList[index][2] > self.sortedList[index+1][2]): # positions in wrong order\r\n self.sortedList[index], self.sortedList[index+1] = self.sortedList[index+1], self.sortedList[index] # switch\r\n if(self.sortedList[index][2] == self.sortedList[index+1][2]): # Position conflict\r\n if(self.sortedList[index][1] <= self.sortedList[index+1][1]): # Already ordered by id\r\n self.sortedList[index+1][2] += 1 # position altered for second rule\r\n else:\r\n self.sortedList[index][2] += 1\r\n self.sortedList[index], self.sortedList[index+1] = self.sortedList[index+1], self.sortedList[index] # switch\r\n index += 1", "def test_four_nodes_needs_left_rotation(three_del):\n three_del.insert(40)\n three_del.delete(10)\n assert tuple(three_del.in_order()) == (20, 30, 40)\n assert tuple(three_del.breadth_first()) == (30, 20, 40)", "def assert_placement(circuit: Circuit, layout: dict) -> bool:\n assert_mapping_consistency(layout)\n if circuit.nqubits > len(layout):\n raise PlacementError(\"Layout can't be used on circuit. The circuit requires more qubits.\")\n if circuit.nqubits < len(layout):\n raise PlacementError(\"Layout can't be used on circuit. Ancillary extra qubits need to be added to the circuit.\")", "def test_get_ordered_coordinates(self):\r\n pc_lines = [\"Eigvals\\t4\",\r\n \"191.54\\t169.99\\t30.45\\t19.19\",\r\n \"\",\r\n \"Proportion explained\\t4\",\r\n \"18.13\\t16.09\\t2.88\\t1.66\",\r\n \"\",\r\n \"Species\\t0\\t0\",\r\n \"\",\r\n \"Site\\t5\\t4\",\r\n \"s1\\t-0.049\\t0.245\\t0.146\\t-0.036\",\r\n \"s5\\t-0.267\\t-0.228\\t-0.024\\t-0.095\",\r\n \"s3\\t-0.285\\t-0.260\\t-0.017\\t-0.070\",\r\n \"s2\\t-0.002\\t0.216\\t-0.052\\t-0.085\",\r\n \"s4\\t-0.328\\t-0.299\\t-0.025\\t0.051\",\r\n \"\",\r\n \"Biplot\\t0\\t0\",\r\n \"\",\r\n \"Site constraints\\t0\\t0\",\r\n \"\"]\r\n\r\n pc = parse_coords(pc_lines)\r\n expected_coords = [[-0.049, 0.245, 0.146, -0.036],\r\n [-0.002, 0.216, -0.052, -0.085],\r\n [-0.285, -0.260, -0.017, -0.070],\r\n [-0.328, -0.299, -0.025, 0.051],\r\n [-0.267, -0.228, -0.024, -0.095]]\r\n expected_sids = ['s1', 's2', 's3', 's4', 's5']\r\n actual_coords, actual_sids = get_ordered_coordinates(\r\n pc[0], pc[1], ['s1', 's2', 's3', 's4', 's5'])\r\n assert_almost_equal(actual_coords, expected_coords)\r\n self.assertEqual(actual_sids, expected_sids)\r\n\r\n pc = parse_coords(pc_lines)\r\n expected_coords = [[-0.049, 0.245, 0.146, -0.036],\r\n [-0.267, -0.228, -0.024, -0.095]]\r\n expected_sids = ['s1', 's5']\r\n actual_coords, actual_sids = get_ordered_coordinates(\r\n pc[0], pc[1], ['s1', 's5'])\r\n assert_almost_equal(actual_coords, expected_coords)\r\n self.assertEqual(actual_sids, expected_sids)\r\n\r\n pc = parse_coords(pc_lines)\r\n expected_coords = [[-0.049, 0.245, 0.146, -0.036],\r\n [-0.267, -0.228, -0.024, -0.095]]\r\n expected_sids = ['s1', 's5']\r\n actual_coords, actual_sids = get_ordered_coordinates(\r\n pc[0], pc[1], ['s1', 's6', 's5'])\r\n assert_almost_equal(actual_coords, expected_coords)\r\n self.assertEqual(actual_sids, expected_sids)\r\n\r\n pc = parse_coords(pc_lines)\r\n expected_coords = [[-0.049, 0.245, 0.146, -0.036],\r\n [-0.267, -0.228, -0.024, -0.095]]\r\n expected_sids = ['s1', 's5']\r\n self.assertRaises(ValueError, get_ordered_coordinates,\r\n pc[0], pc[1], ['s1', 's6', 's5'], strict=True)", "def test_overFill(self):\r\n high = 15\r\n for _ in range(high):\r\n self.nb.add(_)\r\n\r\n self.assertFalse(self.nb.isEmpty())\r\n self.assertTrue(self.nb.isFull())\r\n self.assertEqual(5, len(self.nb))\r\n\r\n # check all are still present\r\n for _ in range(high-1, high - SIZE-1, -1):\r\n self.assertTrue(_ in self.nb)", "def test_index_12(self):\n self.insert()\n data = self.tbl[2:6]\n assert self.check(self.idata[:2], data)", "def test_tree_2_nodes_right_unbalanced(one_t):\n one_t.insert(11)\n assert one_t.balance() == -1", "def test_input_order_irrelevant(self):\n sorted_strings = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']\n mutable_copy = list(sorted_strings)\n for i in range(10000):\n random.shuffle(mutable_copy)\n assert natsort(mutable_copy) == sorted_strings", "def insert(self, val):\n if val in self.map:\n return False\n \n self.nums.append(val)\n self.map[val] = len(self.nums) - 1\n \n return True", "def check_rows(self):\r\n for i in range(0, len(self.grid),3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+1][-1] and self.grid[i+1][-1] == self.grid[i+2][-1]:\r\n return (i, (self.grid[i], self.grid[i+2]))\r\n return (-1, None)", "def test_tree_2_nodes_left_unbalanced(one_t):\n one_t.insert(9)\n assert one_t.balance() == 1", "def gap_insertion_sort(alist, start, gap):\n for i in range(start+gap, len(alist), gap):\n current_val = alist[i]\n position = i\n\n while position >= gap and alist[position-gap] > current_val:\n alist[position] = alist[position-gap]\n position = position - gap\n\n alist[position] = current_val", "def _update_chunks(self, chunks):\n #TODO: consider replacing this with a heap\n sorted_chunks = sorted(chunks.values(), key=lambda x:-x.badness)\n got_improvement = False\n for chunk in sorted_chunks:\n if chunk.time < self.expected_time:\n f = self._maybe_add\n else:\n f = self._maybe_remove\n\n if chunk.index == 0:\n order = [\"next\"]\n elif chunk.index == self.total_chunks - 1:\n order = [\"prev\"]\n else:\n if chunk.time < self.expected_time:\n # First try to add a test from the neighboring chunk with the\n # greatest total time\n if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:\n order = [\"next\", \"prev\"]\n else:\n order = [\"prev\", \"next\"]\n else:\n # First try to remove a test and add to the neighboring chunk with the\n # lowest total time\n if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:\n order = [\"prev\", \"next\"]\n else:\n order = [\"next\", \"prev\"]\n\n for direction in order:\n if f(chunks, chunk.index, direction):\n got_improvement = True\n break\n\n if got_improvement:\n break\n\n return got_improvement", "def test_case_07_side_too_small(self):\n self.__assert_equals_test_case([(-2, 2, 3), (0, 2, 3)], 'InvalidInput')", "def testSortOrder(self):\n timestamp = time.time()\n comment_id1 = Comment.ConstructCommentId(timestamp, 0, 0)\n comment_id2 = Comment.ConstructCommentId(timestamp + 1, 0, 0)\n self.assertGreater(comment_id2, comment_id1)", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def test_index_lt_3(self):\n self.insert()\n data = self.tbl[:6]\n assert self.check(self.idata[:2], data)", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insertSort(arr):\n for i in range(1, len(arr)):\n value = arr[i]\n position = i - 1\n while position >= 0 and arr[position] < value:\n arr[position + 1] = arr[position]\n position -= 1\n arr[position + 1] = value\n return", "def packet_in_correct_order(self, a, b) -> bool:\n res = self.elements_in_order(a, b, depth=0, debug=False)\n return -1 if res else 1", "def insertionSortGap(ls, start, gap): \n n = len(ls)\n \n # passes \n # iterate sublist in increments = gap (stop n-gap since getting start + gap)\n for nPass in range(start,n-gap,gap):\n \n # iterate until comparisons done in sublist or found insertion location\n i = nPass\n new = ls[nPass + gap]\n \n while i >= start and new < ls[i]:\n \n # shift to the right\n ls[i+gap] = ls[i]\n \n i-=gap\n \n ls[i+gap] = new", "def gap_insertion_sort(a_list, start, gap):\n\n for i in range(start + gap, len(a_list), gap):\n current_value = a_list[i]\n position = i\n while position >= gap and a_list[position - gap] > current_value:\n a_list[position] = a_list[position - gap]\n position = position - gap\n\n a_list[position] = current_value", "def test_place_multiple_orders(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(len(self.orders_list.orders_list), 3)\n self.assertEqual(self.orders_list.orders_list[2].order_id, 2)", "def insertion_sort(a_list):\n \n for index in range(1, len(a_list)):\n value = a_list[index]\n position = binary_search(a_list, 0, index, value)\n\n for subIndex in range(index, position, -1):\n temp = a_list[subIndex]\n a_list[subIndex] = a_list[subIndex - 1]\n a_list[subIndex - 1] = temp", "def check_place(self, positions):\n return self.size == len(set(positions[i] + i for i in range(self.size))) == len(\n set(positions[i] - i for i in range(self.size)))", "def test_unorder_tuple() -> None:\n assert UnorderedTuple((1, 2, 3)) == (3, 2, 1)\n assert UnorderedTuple((1, 3)) != (3,)\n assert UnorderedTuple((1, 2, 3)) == (1, 2, 3)\n assert UnorderedTuple((1, 2, 3)) == (2, 3, 1)\n assert not UnorderedTuple((7, 2, 3)).__eq__((1, 2, 5))", "def test_insertSort2(self):\n\t\tsortObj=insertSort()\n\t\tself.assertNotEqual(sortObj.run_sort(self.test_2[0]),self.test_2[1])", "def _check_devices_sequ_order(df):\n dev_list = df['device'].unique()\n no_errors = True\n for dev in dev_list:\n df_d = df[df['device'] == dev]\n for i in range(1, len(df_d)):\n st_j = df_d.iloc[i-1].start_time\n et_j = df_d.iloc[i-1].end_time\n st_i = df_d.iloc[i].start_time\n # et_i = df_d.iloc[i].end_time\n # if the sequential order is violated return false\n if not (st_j < et_j) or not (et_j < st_i):\n print('~'*50)\n if st_j >= et_j:\n #raise ValueError('{}; st: {} >= et: {} |\\n {} '.format(i-1, st_j, et_j, df_d.iloc[i-1]))\n print('{}; st: {} >= et: {} |\\n {} '.format(i-1, st_j, et_j, df_d.iloc[i-1]))\n if et_j >= st_i:\n #raise ValueError('{},{}; et: {} >= st: {} |\\n{}\\n\\n{}'.format(i-1,i, et_j, st_i, df_d.iloc[i-1], df_d.iloc[i]))\n print('{},{}; et: {} >= st: {} |\\n{}\\n\\n{}'.format(i-1,i, et_j, st_i, df_d.iloc[i-1], df_d.iloc[i]))\n no_errors = False\n return no_errors", "def insert(self, e): \r\n if not e in self.vals:\r\n self.vals.append(e)", "def apply_insert_point_rules(self, coords):\n return True", "def test_nearest_location_even():\n assert nearest_location([(3, 6), (8, 13)], 6, 0) == 0\n assert nearest_location([(3, 6), (8, 13)], 6, 1) == 0\n assert nearest_location([(3, 6), (8, 13)], 7, 0) == 1\n assert nearest_location([(3, 6), (8, 13)], 7, 1) == 1", "def _valid_placement(self, i_row, i_col):\n if not self._empty_cell(i_row, i_col):\n return (False, [])\n adj_opp_cells = []\n\n if (i_row, i_col) == self._tl_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_corners, \"tl\")\n elif (i_row, i_col) == self._tr_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_rs_corners, \"tr\")\n elif (i_row, i_col) == self._bl_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_corners, \"bl\")\n elif (i_row, i_col) == self._br_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_rs_corners, \"br\")\n elif (i_row, i_col) in self._ls_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_and_rs, \"ls\")\n elif (i_row, i_col) in self._ts_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ts_and_bs, \"ts\")\n elif (i_row, i_col) in self._rs_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_and_rs, \"rs\")\n elif (i_row, i_col) in self._bs_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ts_and_bs, \"bs\")\n else:\n self._check_inner_dirs(i_row, i_col, adj_opp_cells)\n\n #print(\"\\nFOR TESTING. adj_opp_cells: \", adj_opp_cells)\n\n if adj_opp_cells == []:\n return (False, [])\n else:\n can_place, flip_lst = self._flip_dirs(adj_opp_cells)\n return (can_place, flip_lst)", "def test_four_nodes_needs_left_right_rotation(three_del):\n three_del.insert(15)\n three_del.delete(30)\n assert tuple(three_del.in_order()) == (10, 15, 20)\n assert tuple(three_del.breadth_first()) == (15, 10, 20)", "def test_insertion_for_each_element_input_list(empty_list):\n a = [5, 6, 7, 8]\n empty_list.insert(a)\n assert len(empty_list) == len(a)", "def _insert(self, node, root):\n if not root:\n root = node\n elif node.key < root.key:\n root.left = self._insert(node, root.left)\n if root.right and (root.left.height - root.right.height == 2):\n # Inserted node on the left side, check if left side is larger by 2\n # this is not allowed\n # at most 1 difference\n if node.key < root.left.key:\n root = self.rotate_with_left_child(root)\n else:\n root = self.double_with_left_child(root)\n # It's in wrong position, put it on the right\n elif node.key > root.key:\n root.right = self._insert(node, root.right)\n if root.left and (root.right.height - root.left.height == 2):\n # Inserted node on the right side, check if right side larger by 2\n # not allowed\n # max 1 difference\n if node.key > root.right.key:\n root = self.rotate_with_right_child(root)\n else:\n root = self.double_with_right_child(root)\n # It's in wrong position, put it on the left\n\n root.height = max(root.left.height if root.left else -1, root.right.height if root.right else -1) + 1\n # get root height, left or right subtree height + 1, depending which is greater\n return root", "def insertion_sort_single_alpha(arr:Sequence[AlphaList]) -> AlphaList:\n lsi = 1\n while lsi <= len(arr)-1:\n insert_into = 0\n compare = arr[lsi]\n for idx in range(lsi,-1,-1):\n if ord(compare) > ord(arr[idx]):break\n insert_into = idx\n del arr[lsi]\n arr.insert(insert_into,compare)\n lsi += 1\n return arr", "def insertion_sort(items):\n # Repeat until all items are in sorted order\n # Take first unsorted item\n # Insert it in sorted order in front of items\n sorted_index = 1\n while not is_sorted(items):\n num = items.pop(sorted_index)\n \n back_index = sorted_index - 1\n for back_num in items[sorted_index-1::-1]:\n if num > back_num:\n items.insert(back_index + 1, num)\n break\n\n back_index -= 1\n else:\n items.insert(0, num)\n \n sorted_index += 1", "def insertion_sort(new_list):\n for _i in range(1, len(new_list)): \n _point = new_list[_i]\n _j = _i-1\n while _j >=0 and _point < new_list[_j] : \n new_list[_j+1] = new_list[_j] \n _j -= 1\n new_list[_j+1] = _point", "def test_RNA_position_fail(self):\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 10 10\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(100, 150),\n (25,50),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 175 175\".split())\n \n self.assertEqual(RNA_position(tool, location_dict), (None, None))", "def test_tree_4_nodes_left_unbalanced_return_1(balanced_3_nodes):\n balanced_3_nodes.insert(8)\n assert balanced_3_nodes.balance() == 1", "def test_chromosome_invalid_coordinates(self):\n for i in (-10, -1, 0, 1, 6, 10, 11, 16, 100):\n self.assertIsNone(self.t.chromosome_coordinate_to_transcript(i))\n for i in (-10, -1, 0, 1, 2, 3, 6, 10, 11, 15, 100):\n self.assertIsNone(self.t.chromosome_coordinate_to_cds(i))", "def test_insert_WithDuplicates(self):\n\n self.bst.insert(10,1)\n self.bst.insert(10,2)\n \n self.bst.insert(5,2)\n \n self.bst.insert(20,3)\n self.bst.insert(20,4)\n \n self.bst.insert(3,4)\n self.bst.insert(7,5)\n self.bst.insert(15,6)\n self.bst.insert(14,7)\n self.bst.insert(25,8)\n\n self.bst.insert(5,123)\n self.bst.insert(14,456)\n\n self.assertEqual(self.bst.root.key, 10)\n self.assertEqual(self.bst.root.value, [1,2])\n\n # left subtree\n self.assertEqual(self.bst.root.left.key, 5)\n self.assertEqual(self.bst.root.left.value, [2,123])\n\n self.assertEqual(self.bst.root.left.left.key, 3)\n self.assertEqual(self.bst.root.left.left.value, [4])\n\n self.assertEqual(self.bst.root.left.right.key, 7)\n self.assertEqual(self.bst.root.left.right.value, [5])\n\n # right subtree\n self.assertEqual(self.bst.root.right.key, 20)\n self.assertEqual(self.bst.root.right.value, [3,4])\n\n self.assertEqual(self.bst.root.right.left.key, 15)\n self.assertEqual(self.bst.root.right.left.value, [6])\n\n self.assertEqual(self.bst.root.right.left.left.key, 14)\n self.assertEqual(self.bst.root.right.left.left.value, [7,456])\n\n self.assertEqual(self.bst.root.right.right.key, 25)\n self.assertEqual(self.bst.root.right.right.value, [8])", "def check_new_pos(self, next_pos):\n pos_x, pos_y = next_pos\n if pos_x < 0.0:\n pos_x = 0.0\n elif pos_x > self.allowed_area[0]:\n pos_x = self.allowed_area[0]\n\n if pos_y < 0.0:\n pos_y = 0.0\n elif pos_y > self.allowed_area[1]:\n pos_y = self.allowed_area[1]\n\n return (pos_x, pos_y)", "def test_chromosome_invalid_coordinates(self):\n for i in (-10, -1, 0, 1, 6, 10, 11, 15, 16, 100):\n self.assertIsNone(self.t.chromosome_coordinate_to_transcript(i))\n for i in (-10, -1, 0, 1, 2, 3, 6, 10, 11, 13, 14, 15, 100):\n self.assertIsNone(self.t.chromosome_coordinate_to_cds(i))", "def test_reorder_coords(self):\r\n m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n in_sids = ['A', 'B', 'C']\r\n order = ['A', 'B', 'C']\r\n expected = m\r\n assert_almost_equal(reorder_coords(m, in_sids, order), expected)\r\n\r\n in_sids = ['C', 'B', 'A']\r\n expected = [[7, 8, 9], [4, 5, 6], [1, 2, 3]]\r\n assert_almost_equal(reorder_coords(m, in_sids, order), expected)\r\n\r\n in_sids = ['C', 'B', 'A']\r\n expected = [[7, 8, 9], [4, 5, 6], [1, 2, 3]]\r\n assert_almost_equal(reorder_coords(m, in_sids, order), expected)\r\n\r\n # order leaves some samples out\r\n m = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\r\n in_sids = ['A', 'B', 'C']\r\n order = ['A', 'B']\r\n expected = [[1, 2, 3], [4, 5, 6]]\r\n assert_almost_equal(reorder_coords(m, in_sids, order), expected)", "def Insertion_sort(arr):\n\n for outer in range(1, len(arr)):\n current_value = arr[outer]\n current_position = outer\n while current_position > 0 and arr[current_position-1] > current_value:\n arr[current_position] = arr[current_position-1]\n current_position = current_position - 1\n arr[current_position] = current_value", "def test_nearest_location_adjacent():\n locations = [(1, 3), (3, 5)]\n\n assert nearest_location(locations, 2) == 0\n assert nearest_location(locations, 3) == 1", "def test_token_order(self):\n tokens = [Token(1), Token(2), Token(3), Token(4)]\n tokens_equal = [Token(1), Token(1)]\n self._check_sequence_consistency(tokens)\n self._check_sequence_consistency(tokens_equal, equal=True)", "def insertion_sort(a):\n\n\n for i in range(1,len(a)):\n j = i\n while j:\n if a[j] < a[j-1]:\n a[j],a[j-1] = a[j-1],a[j]\n j-=1", "def factible_route_insertion(customer, position, route, customers):\n r = copy.deepcopy(route)\n r.insert(position, [customer], customers)\n return not r.violate_windows(customers)", "def add_missing(linelist, insertion_dict):\n # Get the order of indices\n # add missing information\n # starting from largest to\n # smallest, if we insert \n # missing values in this \n # order we do not need to\n # calculate the offset of \n # new indices\n tmp_list = linelist\n indices = sorted(list(insertion_dict.keys()), reverse=True)\n for i in indices:\n # Check if multiple values\n # need to be inserted at a \n # given index\n if isinstance(insertion_dict[i], list):\n for v in reversed(insertion_dict[i]):\n tmp_list.insert(i, v)\n else:\n tmp_list.insert(i, insertion_dict[i])\n return tmp_list", "def test_RNA_position_placement(self):\n \n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 + 60 60\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"+\", \"regions\" : [(0,100),\n ] \n }\n }\n \n self.assertEqual(RNA_position(tool, location_dict), (.60, .60))\n \n tool = pybedtools.create_interval_from_list(\"chr1 50 60 ENSMUSG1_1_83;ENSMUSG1_6_83 0 - 60 60\".split())\n location_dict = {\"ENSMUSG1\" : {\"strand\" : \"-\", \"regions\" : [(0,100),\n ] \n }\n }\n \n #individual_fraction, total_fraction\n self.assertEqual(RNA_position(tool, location_dict), (.4, .4))" ]
[ "0.6282855", "0.6281357", "0.6248567", "0.59403217", "0.59152997", "0.5827504", "0.5776358", "0.5738565", "0.5712663", "0.5696795", "0.5676538", "0.5648015", "0.562953", "0.561613", "0.56155276", "0.55678797", "0.5497624", "0.54904354", "0.54486006", "0.54314625", "0.5423515", "0.54223675", "0.54113", "0.5411271", "0.53719854", "0.53622663", "0.535984", "0.5324028", "0.5299927", "0.5293153", "0.5290765", "0.5287647", "0.52757347", "0.5258956", "0.5251056", "0.52459323", "0.5244385", "0.522757", "0.52196205", "0.5217357", "0.5214493", "0.5212261", "0.52077025", "0.52029496", "0.51973665", "0.5196491", "0.5196491", "0.51873755", "0.5172473", "0.5170248", "0.51598996", "0.5149622", "0.5147696", "0.5144397", "0.5143376", "0.5141507", "0.51395416", "0.51372737", "0.51355606", "0.51344", "0.5130934", "0.51307166", "0.51286614", "0.5120221", "0.511824", "0.51171154", "0.51171154", "0.51142013", "0.5109653", "0.5091393", "0.50873345", "0.508703", "0.50869185", "0.50859183", "0.50832504", "0.5080313", "0.5078234", "0.5076535", "0.5068651", "0.5063953", "0.5063618", "0.50605655", "0.5056465", "0.5047512", "0.5045468", "0.5045167", "0.5041795", "0.50400686", "0.50397015", "0.50357366", "0.50348514", "0.5034747", "0.503347", "0.5032881", "0.503284", "0.502888", "0.5027721", "0.50182986", "0.50181973", "0.50166523", "0.5011902" ]
0.0
-1
Gets the number of announcements on the server
def get(self): return {'status': 'success', 'count': Announcement.query.count()}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetCount(self):\n return self._server.get_count()", "def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def count_urls(self):\n return self.request(\"count:Message_Url\", [ None ])", "def _parse_release_count(self, resp: Dict[str, Any]) -> str:\n return f\"{len(resp.get('releases', []))}\"", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def count(self):\n return self.get_count()", "def getAppCount(self):\n logger.debug('Getting the number of apps discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='totalAppCount']\"))", "def count(self, index):\n if isinstance(index, list):\n index = ','.join(index)\n req = requests.get(\n urljoin(self.base_url, '{0}/_count'.format(index)),\n verify=self.verify_certs)\n return req.json()['count']", "def get_count(self):\r\n return self.count", "def get_item_count(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def getCount(self):\n return self.count", "def number_of_subscribers(subreddit):\n\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n headers = {\"User-Agent\": \"my-integration/1.2.3\"}\n\n response = get(url=url, headers=headers)\n\n if response.status_code == 200:\n # print(response.json())\n\n response_json = response.json()\n data = response_json.get('data')\n subscribers = data.get(\"subscribers\")\n\n return subscribers\n\n return 0", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def get_count(self):\n\n\t\treturn self.__count", "def count(self) -> int:\n return self._adapter.count()", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def getCount(self):\n return self.base.get(\"count\", [])", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def get_count(self):\n return self._count", "def num_articles(self):\n\t\treturn len(index)", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def get_article_count(cls):\n return int(cls.db.get(\"article_count\"))", "def get_count(self):\n connection = self.__get_database_connection()\n response = self.__make_request(connection, '/%s' % (self.database_name))\n return response.get('doc_count', 0)", "def number_of_subscribers(subreddit):\n h = {'user-agent': 'GEEK1050'}\n link = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n req = requests.get(link, headers=h)\n\n req_data = req.json().get(\"data\").get(\"subscribers\")\n for element in req_data['children']:\n print(element['children']['title'])", "def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))", "def number_of_subscribers(subreddit):\n header = {\"User-agent\": \"darth\"}\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n response = (requests.get(url, headers=header))\n if response.status_code != 200:\n return 0\n return response.json().get('data').get('subscribers')", "def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e", "def count(self):\n return self.properties.get('count')", "def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')", "def DownloadedPacketCount(self):\n if self.force_auto_sync:\n self.get('DownloadedPacketCount')\n return self._DownloadedPacketCount", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def count(self):\n return self._count", "def notification_count(request):\n\n # Check if logged in\n user = request.user\n if not user.is_authenticated():\n return {}\n\n return {\n 'notification_count': len(user.profile.notifications.filter(read=False))\n }", "def GetCount(self):\n return(self.count)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def hives_count(self) -> int:\n return self.hives.count()", "def count(self):\n return self.connection.llen(self.key)", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def service_count(self) -> str:\n return pulumi.get(self, \"service_count\")", "def count(self):\n \n return self._count", "def message_count(self) -> int:\n return len(self._leased_messages)", "def count(self):\n return self.connection._llen(self.key)", "def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}/about.json\"\n headers = {\n 'User-Agent': 'My User Agent 1.0',\n 'From': '[email protected]'\n }\n r_subs = requests.get(url.format(subreddit), headers=headers)\n if r_subs.status_code == 200:\n data = r_subs.json()['data']\n subscribers = data.get('subscribers')\n if subscribers is not None:\n return subscribers\n return 0", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def message_count(self):\n pass", "def number_of_subscribers(subreddit):\n url = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit), headers={\"User-Agent\": \"kalkidan\"})\n if url.status_code == 200:\n return url.json().get(\"data\").get(\"subscribers\")\n else:\n return 0", "def count(self) -> float:\n return pulumi.get(self, \"count\")", "def number_of_subscribers(subreddit):\n\n url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)\n\n headers = {'User-Agent': 'My User Agent 1.0'}\n\n request = requests.get(url, headers=headers)\n req = request.json()\n\n if request.status_code == 404:\n return 0\n\n subs = req.get('data').get('subscribers')\n return subs", "def LnsCount(self):\n if self.force_auto_sync:\n self.get('LnsCount')\n return self._LnsCount", "def get(self):\n return {'number_of_msgs': number_of_msgs()}", "def __len__(self):\n\n num_pages = self.get_num_pages()\n\n self.logger.info(f\"Num pages: {num_pages}\")\n\n params = {\"page\": num_pages}\n url = add_query_params(self.url, params)\n\n # get the amount of data on last page\n data, _, result = self.retrieve_data(url)\n\n if result == GithubApiResult.SUCCESS: \n return (100 * (num_pages -1)) + len(data)\n\n self.logger.debug(\"Unable to retrieve data length from api\")\n return 0", "def number_of_subscribers(subreddit):\n url = \"https://api.reddit.com/r/{}/about\".format(subreddit)\n header = {'User-Agent': 'CustomClient/1.0'}\n request = requests.get(url, headers=header, allow_redirects=False)\n\n if request.status_code != 200:\n return 0\n jreq = request.json()\n\n if 'data' in jreq:\n return jreq.get(\"data\").get(\"subscribers\")\n else:\n return 0", "def number_of_subscribers(subreddit):\n\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n headers = {'user-agent': 'request'}\n response = requests.get(url, headers=headers, allow_redirects=False)\n if str(response) != '<Response [200]>':\n return 0\n response_json = response.json()\n subs = response_json.get('data').get('subscribers')\n return subs", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def Count(self):\r\n\t\treturn self._get_attribute('count')", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def _get_hit_count(self, database, enquire):\n return self._get_enquire_mset(\n database, enquire, 0, database.get_doccount()\n ).size()", "def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n header = {\"Content-Type\": \"application/json\",\n \"User-Agent\": \"Mozilla/5.0\"}\n request = requests.get(\n url,\n headers=header,\n allow_redirects=False)\n if request.status_code >= 300:\n return 0\n return json.loads(request.content.decode(\"utf-8\"))[\"data\"][\"subscribers\"]", "def get_inventory_count(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])", "def response_count(self):\n return self.responses.count()", "def unread_count(self) -> dict[str, int]:\n return self.subreddit._reddit.get(API_PATH[\"modmail_unread_count\"])", "def all_client_number():\n\n url = CMX_URL + '/api/location/v2/clients/count'\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n response = requests.get(url, headers=header, auth=CMX_AUTH, verify=False)\n response_json = response.json()\n clients_number = response_json['count']\n return clients_number", "def num_updates(self):\r\n return len(self.q(css='section.updates section article').results)", "def client_count(request):\n return request.param", "def number_of_subscribers(subreddit):\n\n import requests\n\n resInf = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit),\n headers={\"User-Agent\": \"My-User-Agent\"},\n allow_redirects=False)\n if resInf.status_code >= 300:\n return 0\n\n return resInf.json().get(\"data\").get(\"subscribers\")", "def count(self):\n return self._lift(\"count\")", "def number_of_subscribers(subreddit):\n url = 'https://www.reddit.com/r/{}/about.json'\n headers = {'user-agent': 'X-Modhash'}\n url_format = requests.get(url.format(subreddit), headers=headers).json()\n try:\n name = url_format['data']['subscribers']\n return name\n except:\n return 0", "def getNrEntries(self):\n return len(self.data)", "def count(self):\n return len(self.__links)", "def count(cls, client) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e", "def count(self):\n return self.size()", "def number_of_subscribers(subreddit):\n response = requests.get('https://www.reddit.com/r/{}/about.json'\n .format(subreddit),\n headers={'User-Agent': 'Camilo@holberton'},\n allow_redirects=False)\n if response.status_code == 200:\n response = response.json()\n data = response.get('data')\n subs_count = data.get('subscribers')\n if data and subs_count:\n return subs_count\n return 0", "def _listen_count(hass: HomeAssistant) -> int:\n return sum(hass.bus.async_listeners().values())", "def get_subscriber_count(self, response):\n return response.css('.yt-subscriber-count')\\\n .extract_first(default='')", "def host_count(self) -> list:\n return self.__host_count", "def get_num_episodes(self) -> int:\n return len(self.episodes)", "def downloads(self):\n return self.proto.details.appDetails.numDownloads", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def count(self):\n return len(self.wallpapers)", "def number_of_subscribers(subreddit):\n header = {'User-Agent': 'Chrome/90.0.4430.212 Safari/537.36'}\n req = requests.get('https://www.reddit.com/r/{}/about.json'\n .format(subreddit), allow_redirects=False,\n headers=header)\n if req.status_code == 200:\n subscribers = req.json().get('data').get('subscribers')\n return subscribers\n else:\n return 0", "def get_count(self, entry):\n return entry.count", "def Count(self):\n return self._get_attribute('count')", "def number_of_subscribers(subreddit):\n link = 'http://www.reddit.com/r/{}/about.json'.format(subreddit)\n red = requests.get(link, headers={'User-Agent': 'tope628'}).json()\n try:\n subs = red.get('data').get('subscribers')\n except:\n return 0\n if red is None:\n return 0\n return subs", "def number_of_subscribers(subreddit):\n URL = 'https://api.reddit.com/r/{}/about'.format(subreddit)\n header = {'User-Agent': 'Custom-User'}\n\n resp = requests.get(URL, headers=header).json()\n try:\n return resp['data']['subscribers']\n except Exception:\n return 0", "def Count(self) -> int:", "def Count(self) -> int:" ]
[ "0.71485007", "0.6948312", "0.67301506", "0.6616612", "0.66085494", "0.6601228", "0.6522103", "0.6497675", "0.64732087", "0.64507085", "0.6435281", "0.6422948", "0.6408664", "0.64079416", "0.64079416", "0.6383736", "0.6378424", "0.6348932", "0.6346476", "0.63463545", "0.6344192", "0.633256", "0.6327298", "0.6321813", "0.6302732", "0.6301328", "0.6299842", "0.626915", "0.62689275", "0.6259281", "0.62586087", "0.6254517", "0.62454045", "0.62454045", "0.62454045", "0.62454045", "0.62454045", "0.62454045", "0.62454045", "0.62454045", "0.62454045", "0.62454045", "0.62305874", "0.62272346", "0.6218682", "0.62155014", "0.62113714", "0.62111723", "0.62111723", "0.6210123", "0.6209256", "0.62078446", "0.62040055", "0.61756855", "0.61753577", "0.6172193", "0.61697304", "0.61556077", "0.61525595", "0.614718", "0.61454016", "0.6144045", "0.6135915", "0.6135058", "0.6116133", "0.6116133", "0.6115127", "0.6115127", "0.6115127", "0.6113484", "0.61116415", "0.6111351", "0.6107313", "0.61057943", "0.6103706", "0.6098096", "0.6096294", "0.6092778", "0.6091503", "0.6091081", "0.6087497", "0.6084583", "0.6084426", "0.60823256", "0.6078431", "0.6076792", "0.6066133", "0.6054524", "0.6052834", "0.6046561", "0.60420907", "0.604083", "0.6035977", "0.6031488", "0.60272723", "0.60261506", "0.6024344", "0.6022849", "0.6021428", "0.6021428" ]
0.6914508
2
Gets all announcements on the server
def get(self): announcements = Announcement.query.all() announcements = announcements_schema.dump(announcements) if not announcements: return {'status': 'success', 'announcements': announcements}, 206 # Partial Content Served return {'status': 'success', 'announcements': announcements}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def getAnnouncements(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ContentValidator.getAnnouncements()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getAnnouncements\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getAnnouncements\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/content/v1.0/announcements\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def get_announcements(self, factory: 'AnnouncementFactory') -> 'AnnouncementCollection':\n collection = factory.get_announcement_collection(self.get_announcement_data_list())\n return collection", "def announce(self):\n m = rtorrent9.rpc.Multicall(self)\n self.multicall_add(m, \"d.tracker_announce\")\n\n return m.call()[-1]", "def get(self):\n return {'status': 'success', 'count': Announcement.query.count()}, 200", "def fetch_list(self):\n\t\treturn self.fetch(self.list_url % ART_SERVER_HOST)", "def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()", "def pull_articles(self, *args, **kwargs):\n tasks.pull_articles()\n return Response({})", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def get(self, request):\n announcement_id = request.GET.get(\"id\")\n if announcement_id:\n try:\n announcement = Announcement.objects.get(id=announcement_id)\n return self.success(AnnouncementSerializer(announcement).data)\n except Announcement.DoesNotExist:\n return self.error(\"Announcement does not exist\")\n announcement = Announcement.objects.all().order_by(\"-create_time\")\n if request.GET.get(\"visible\") == \"true\":\n announcement = announcement.filter(visible=True)\n return self.success(self.paginate_data(request, announcement, AnnouncementSerializer))", "def getall():\n elements = Advertisements().get_all_elements()\n data = jsonify(elements)\n data.statut_code = 200\n return data", "def issuelinks_all(request, format=None):\n if request.method == 'GET':\n issuelinks = IssueLink.objects.all()\n serializer = IssueLinkSerializer(issuelinks, many=True)\n return Response(serializer.data)", "def list_all_agencies():\n return JsonResponse.create(StatusCode.OK, get_all_agencies())", "def all(self) -> list[dict[str, Any]]:\n return self.client.get(self._url())", "def api_all():\n all_mail = mail_dao.get_all()\n return _create_response(all_mail)", "def get_all(self):\n return self.__fetcher.get_fetched()", "def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)", "def get_articles(self, publish_status):\n query_str = (\n \"SELECT Id,KnowledgeArticleId,Title,UrlName FROM {} \"\n \"WHERE PublishStatus='{}' AND language='en_US'\"\n ).format(\n settings.SALESFORCE_ARTICLE_TYPE,\n publish_status,\n )\n result = self.api.query(query_str)\n return result['records']", "def all_entries(cls):\n info = Diary.entries\n response = jsonify({\"data\": info})\n response.status_code = 200\n return response", "def list():\r\n articles = []\r\n if request.method == 'GET':\r\n # Get all articles\r\n response = table.scan()\r\n articles = response.get('Items')\r\n\r\n return render_template('article/articles.html', articles=articles, title='List Articles')", "async def get_all(request):\n pass", "def list(self):\n return self.connection.get(self.service)", "def get_all_podcasts():\r\n return [Podcast.podcast_json(podcast) for podcast in Podcast.query.all()]", "def list(limit, export):\n GetArticles.get_all_articles(limit, export)", "def all(self):\n return self.client.request_with_method(Methods.LIST % self.name)['items']", "def fetch(self):\n\n entries = []\n for activity in self.activities[\"entries\"]:\n entries.append(\n [\n element\n for element in [activity[\"title\"], activity[\"content\"][0][\"value\"]]\n ]\n )\n\n return entries[0 : self.max_entries]", "async def getofficialnews(self, appID: int = None) -> typing.List:\n appID = appID if appID is not None else self.appID\n\n news = await SteamNewsPost.asyncgetnewsforapp(\n appID=appID, count=15, maxlength=600\n )\n logging.info(f\"{len(news)} {self._parsername} post(s) returned by Steam's API\")\n officialnews = [\n item for item in news if self.RLnewsfilter(item, self.psyonixstaff)\n ]\n\n logging.info(f\"Found {len(officialnews)} official {self._parsername} post(s)\")\n return officialnews", "def get_articles(db:Session):\n return db.query(ArticleModel).all()", "async def get_all_investigators(request):\n client_key = general.get_request_key_header(request)\n investigator_list = await security_messaging.get_investigators(request.app.config.VAL_CONN, client_key)\n\n investigator_list_json = []\n for address, dp in investigator_list.items():\n investigator_list_json.append({\n 'public_key': dp.public_key,\n 'name': dp.name\n })\n return response.json(body={'data': investigator_list_json},\n headers=general.get_response_headers())", "def get_all_content(self):\n return self._get_all_content()", "def get():\n all_finished_anime = AnimeViewed.query.all()\n list_anime_viewed = []\n\n for anime_viewed in all_finished_anime:\n list_anime_viewed.append(anime_viewed.to_dict())\n\n return make_response(jsonify(list_anime_viewed), 200)", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def _announceContainers(self):\n distance = float(self.config.container_manager.announce_distance)\n targetpeers = self.config.owner.getByDistance(self.session, distance)\n\n containers = self.announcequeue\n self.announcequeue = []\n msg = 'announcing %d %ss to %d peers'\n self.logger.log(msg % (len(containers), self.cname, len(targetpeers)))\n\n self.session.commit() # release the session lock for following long operations\n msg = '%s url: %s, owner: %s, name: %s'\n owner, name = (self.config.owner, self.config.owner.name)\n for container in containers:\n self.logger.log(msg % (self.cname, container.url, owner, name))\n for peer in targetpeers:\n try:\n peer.transport.containerOffer(self.config.owner.name, container.url,\n self.cname)\n except Exception, e:\n self.logger.log('Exception while sending to peer %r: %r' %\n (peer, e))", "def articles(self):\n articles = Post.objects.live().descendant_of(self)\n articles = articles.order_by('-date')\n\n return articles", "def articleList():\n articles = get_news(\n 5, since=news.YESTERDAY.strftime(\"%yyyy-%mm-%dd\"), query=\"covid\"\n )\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(\n ARTICLE,\n {\n \"title\": title_list,\n \"desc\": desc_list,\n \"url\": url_list,\n \"img\": image_list,\n \"sources\": source_list,\n },\n )\n return True", "def articles(self, audience_filter=None):\n articles = ArticlePage.objects.live().descendant_of(self)\n if audience_filter is not None:\n articles = articles.filter(audience__name=audience_filter)\n articles = articles.order_by('-date')\n return articles", "def get_announcement(self, request):\n return StringMessage(\n data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or \"\")", "def cmd_notification_all(client, args):\n notifications_all = client.get_notifications(args.new)\n notifications_all['messages'] = [message.__dict__ for message in\n notifications_all['messages']]\n formatted_replies = []\n for reply in notifications_all['replies']:\n formatted_reply = reply.__dict__\n formatted_reply['content'] = format_comment_tree(formatted_reply['content'])\n formatted_replies.append(formatted_reply)\n notifications_all['replies'] = formatted_replies\n generate_output({'notifications_all': notifications_all}, args.output_file)", "def main():\n output_queue = Queue()\n\n out_list = list()\n\n logging.info('Retrieving news...')\n download = DownloadNewsWorker(output_queue)\n download.retrieve_news()\n\n while not output_queue.empty():\n item = output_queue.get()\n out_list.append(item)\n\n return out_list", "def get_publishers(self):", "def get_news(company_name: str) -> list[dict]:\n news_params = {\n \"q\": company_name,\n \"apiKey\": config.NEWS_API_KEY\n }\n response = requests.get(\"https://newsapi.org/v2/everything\", params=news_params)\n response.raise_for_status()\n news_data = response.json()\n return news_data[\"articles\"][:3]", "def fetchFullText(self, arnumbers):\n if type(arnumbers) != list:\n arnumbers = [arnumbers]\n browser = webdriver.Chrome()\n articles = []\n for num in arnumbers:\n browser.get(\"http://ieeexplore.ieee.org/xpls/icp.jsp?arnumber=\" + str(num))\n # TODO: Ensure the page load is ready\n page = pq(browser.page_source)\n articles.append(page('div#article'))\n browser.quit()\n return articles", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def list(self, request):\n queryset = Article.objects.all()\n serializer_context = {'request': request}\n page = self.paginate_queryset(queryset)\n serializer = self.serializer_class(\n page,\n context=serializer_context,\n many=True\n )\n output = self.get_paginated_response(serializer.data)\n return output", "def parse_announcement_data(self) -> 'Scraper':\n logger.info('Parsing extracted html partial')\n for tag in self.html_partial: # there are 63 tags\n if tag.name == 'h4':\n announcement_data = self.get_data_from_tag(tag)\n self.announcement_data_list.append(announcement_data)\n logger.info('Compiled announcement data list from html web page partial')\n return self", "def GET(self, *args):\n all_news= self.get_all_news()\n all_news.sort( key=lambda n : n['date'], reverse=True)\n if len(args):\n n_last=int(args[0])\n all_news = all_news[:n_last]\n\n return json.dumps(all_news)", "def get_agencies():\n\n xml_query_string = 'http://webservices.nextbus.com/service/publicXMLFeed?command=agencyList'\n xml_request = requests.get(xml_query_string)\n agencies = {}\n root = ET.fromstring(xml_request.text)\n\n for child in root:\n agencies[child.attrib['tag']] = child.attrib['title']\n return agencies", "def get_articles():\n _, articles = base_query(db_session)\n return jsonify([p.serialize for p in articles])", "def list(self, request):\n encounters = Encounter.objects.all()\n serializer = EncounterListSerializer(encounters, many=True)\n return Response(serializer.data)", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "def _get_dates():\n remote = os.path.join(BASE_URL, RSS_FEED)\n local = os.path.join(TMP, RSS_FEED)\n u..(remote, local)\n\n with open(local) as f:\n return PUB_DATE.findall(f.read())", "def articles(self):\n return self.get_queryset().filter(content_type__model='article').order_by('-articles__published_at')", "def pull_articles(ls):\n # pull articles\n doi = self.search_articles(file)\n els_key = self.els_key\n\n for i in doi:\n els_url = 'https://api.elsevier.com/content/article/doi/' + doi + '?APIKey=' + els_key\n r = requests.get(els_url)\n for num in range(len(ls)):\n with open(folder + f'/write_test_els_paper{num}.xml', 'wb') as file:\n file.write(r.content)", "def get_all_songs() -> Generator[dict, None, None]:\n\n logging.debug(\"Fetching from server\")\n\n api = _get_api()\n\n for song_page in api.get_all_songs(incremental=True):\n for song in song_page:\n yield song", "def get(self):\r\n return get_all()", "def get_remote_news_items(self):\n items = []\n params = {\n \"base_url\": self.osha_json_url,\n \"lang\": api.portal.get_tool(\"portal_languages\").getPreferredLanguage(),\n \"query_tags\": self.remote_news_query_tags,\n }\n qurl = \"{base_url}/{lang}/services/hw/news/{query_tags}\".format(**params)\n result = urlopen(qurl)\n if result.code == 200:\n json = load(result)\n for node in json.get(\"nodes\"):\n item = node.get(\"node\")\n pd = item.get('publication_date', '')\n items.append({\n 'remote_item': True,\n 'Title': item['title'],\n 'Date': (\n pd and DateTime(pd, datefmt=\"international\").strftime(\n \"%Y/%m/%d %H:%M\") or \"\"),\n 'getURL': item.get('path'),\n 'path': item.get('path'),\n 'Description': item.get('summary', '') or item.get('body', ''),\n 'text': item.get('summary', '') and item.get('body', '') or '',\n 'remote_image': item.get('image', ''),\n 'node_id': item.get('nid'),\n })\n return items", "def fetch_all(self):\n emails = []\n res, messages = self._mailconn.search(None, 'ALL')\n if res == 'OK':\n for msg in messages[0].split():\n try:\n res, data = self._mailconn.fetch(msg.decode('utf-8'), '(RFC822)')\n except Exception as error:\n self.close_mail_connection()\n print('No email to read: '+error)\n exit()\n \n msg = email.message_from_string((data[0][1]).decode('utf-8'))\n if not isinstance(msg, str):\n if self.is_sender_in_whitelist(msg['From']):\n emails.append(msg)\n\n return emails", "def articles(self):\n return articles.Articles(self)", "def getAnnouncement(self, request):\n announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or \"\"\n return StringMessage(data=announcement)", "async def list(self, ctx):\n\n cursor = await db.execute(\"Select MessageID, TimeEnding, Members, ChannelID from Giveaway \"\n \"where GuildID = ? and Ended = ?\", (ctx.guild.id, False))\n result = await cursor.fetchall()\n\n for i, tup in enumerate(result):\n try:\n msg = await ctx.guild.get_channel(tup[3]).fetch_message(tup[0])\n tup = list(tup)\n tup[0] = msg\n result[i] = tup\n except:\n result.remove(tup)\n await db.execute(\"Delete from Giveaway where MessageID = ?\", (tup[0],))\n await db.commit()\n\n if not result:\n return await send_embed(ctx, \"No active giveaways on this server.\", negative=True)\n\n embeds = []\n fields = []\n\n for i, tup in enumerate(result, start=1):\n fields.append((str(tup[0].id),\n f\"Prize: {tup[0].embeds[0].author.name}\\n\"\n f\"{tup[2]} possible winners\\n\"\n f\"Ends at {datetime.utcfromtimestamp(tup[1]).strftime('%Y-%m-%d %H:%M:%S')}\"))\n\n if i % 10 == 0 or i == len(result):\n embed = discord.Embed(\n colour=discord.Colour.blue(),\n title=\"Active Giveaways\"\n )\n\n for field in fields:\n embed.add_field(name=field[0], value=field[1], inline=False)\n\n embeds.append(embed)\n fields = []\n\n await self.bot.paginate(ctx, embeds)", "def list_articles():\n\n return template(\"index\", articles=get_articles())", "def cache_announcement():\n confs = Conference.query(ndb.AND(\n Conference.seatsAvailable <= 5, Conference.seatsAvailable > 0\n )).fetch(projection=[Conference.name])\n\n if confs:\n # If there are almost sold out conferences,\n # format announcement and set it in memcache\n announcement = ANNOUNCEMENT_TPL % (\n ', '.join(conf.name for conf in confs))\n memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)\n else:\n # If there are no sold out conferences,\n # delete the memcache announcements entry\n announcement = \"\"\n memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)\n\n return announcement", "def get_advisories(self):\n\n advisories = []\n\n for i in range(len(self.__data['advisories'])):\n data = requests.get(self.__data['advisories'][i]['links']['self']['href'], headers=getHeaders()).json()\n this = {}\n this['id'] = data['id']\n this['name'] = data['name']\n advisories.append(this)\n\n return advisories", "def get(self):\n return GlobalNews.retrieve()", "def articles(self):\r\n return articles.Articles(self)", "def all_hosts(self):\n ...", "def get(self, request):\n activities = (\n activitystreams.streams[\"local\"]\n .get_activity_stream(request.user)\n .filter(\n Q(comment__isnull=False)\n | Q(review__isnull=False)\n | Q(quotation__isnull=False)\n | Q(mention_books__isnull=False)\n )\n )\n\n large_activities = Paginator(\n activities.filter(mention_books__isnull=True)\n .exclude(content=None, quotation__quote=None)\n .exclude(content=\"\"),\n 6,\n )\n small_activities = Paginator(\n activities.filter(\n Q(mention_books__isnull=False) | Q(content=None) | Q(content=\"\")\n ),\n 4,\n )\n\n page = request.GET.get(\"page\")\n data = {\n \"large_activities\": large_activities.get_page(page),\n \"small_activities\": small_activities.get_page(page),\n }\n return TemplateResponse(request, \"discover/discover.html\", data)", "def news(self) -> List[News]:\n return self._news", "def fetch_feed_list(self, **args):\n return self.fetch(\"/feedlist\", **args)", "async def list(self, ctx):\n\n query = {\"resolved\": False, \"user_id\": ctx.author.id}\n count = await self.bot.mongo.db.reminder.count_documents(query)\n\n async def get_reminders():\n async for x in self.bot.mongo.db.reminder.find(query).sort(\"expires_at\", 1):\n yield Reminder.build_from_mongo(self.bot, x)\n\n def format_item(i, x):\n name = f\"{x._id}. {discord.utils.format_dt(x.expires_at, 'R')}\"\n return {\"name\": name, \"value\": textwrap.shorten(x.event, 512), \"inline\": False}\n\n pages = ViewMenuPages(\n source=AsyncEmbedFieldsPageSource(\n get_reminders(),\n title=\"Reminders\",\n format_item=format_item,\n count=count,\n )\n )\n\n try:\n await pages.start(ctx)\n except IndexError:\n await ctx.send(\"No reminders found.\")", "def fetch(api_key, query='', page=1, from_date=False, to_date=False):\n fetch_articles(api_key, query, page, from_date, to_date)", "def get_amp_events(query_params=\"\",\n host=env.AMP.get(\"host\"),\n client_id=env.AMP_CLIENT_ID,\n api_key=env.AMP_API_KEY,\n):\n print(\"\\n==> Getting events from AMP\")\n url = f\"https://{client_id}:{api_key}@{host}/v1/events\"\n response = requests.get(url, params=query_params, verify=False)\n if debug:#PATRICK\n print(cyan(env.get_line(),bold=True))\n print(cyan(response.json())) \n # Consider any status other than 2xx an error\n response.raise_for_status() \n events_list = response.json()[\"data\"]\n return events_list", "def get_all_incidents(self):\n sql = f\"SELECT * FROM incidences\"\n curr = Db().cur\n curr.execute(sql)\n output = curr.fetchall()\n return output", "async def get_news(q: str = None):\n\treturn aggregate_news(q)", "def get_feed_entries_task():\n get_feed_entries()\n logger.info(\"Entries for Feed\")", "def get_all_authors():\n try:\n authors = g.projects.distinct('authors')\n all_authors = sorted(authors, key=lambda k: str(k).lower()) if authors else []\n return jsonify(all_authors)\n except Exception as err:\n raise ApiException(str(err), 500)", "def list_servables(self):\n if not self._server_started() is True:\n print('Server not started at host %s, port %d' % (self.host, self.port))\n sys.exit(0)\n else:\n headers = {'Content-Type': 'application/json'}\n url = 'http://'+self.host+':'+str(self.port)+'/servables'\n res = requests.get(url=url, headers=headers)\n res.content.decode(\"utf-8\")\n res_body = res.json()\n\n if res.status_code != requests.codes.ok:\n print(\"Request error! Status code: \", res.status_code)\n sys.exit(0)\n elif res_body['status'] != 0:\n print(res_body['err_msg'])\n sys.exit(0)\n else:\n return res_body['servables']", "def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')", "def get_ing_from_all_ns():\n ing_list = get_all_ingress()\n ing_list_json = get_json_list(ing_list)\n return Response(response=ing_list_json, status=200, mimetype='application/json')", "def get_all(self):\n url = self._dbname + '/_all'\n return self._connection.get(url).json()", "async def fetch_and_parse(self, timeout=10):\n\n headers = {}\n if self.username and self.password:\n creds = f'{self.username}:{self.password}'.encode('utf-8')\n headers['Authorization'] = f'Basic {base64.urlsafe_b64encode(creds)}'\n\n async with aiohttp.ClientSession(headers=headers) as session:\n rsp = await self._fetch(session, timeout)\n\n feed_entries = []\n if rsp:\n data = feedparser.parse(rsp)\n feed_entries = data.entries\n if data.bozo:\n self.log.error(f\"No valid RSS data from feed {self.url}: {data.bozo_exception}\")\n return feed_entries", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return res", "def get(self):\n return get_all_posts()", "def domain_list_all(self):\n page = 1\n on_page = 100\n ret = []\n while True:\n r = self.domain_list(page=page, on_page=on_page)\n ret += r['domains']\n if len(ret) >= r['total']:\n break\n page += 1\n return ret", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def nasaCalendar(self):\n return requests.get(self.nasaURL).text", "async def get_article_links(self):\n urls = []\n for page in range(self._start, self._end+1):\n urls.append(self._searchURL + str(page))\n result_list = await self._connect(urls)\n\n self._urls = []\n hares_links = []\n for result in result_list:\n soup = result[1]\n search_links = soup.find_all(class_='search-title')\n article_links = re.findall(r'url=(.*?)\\\"', str(search_links))\n for l in article_links:\n l = unquote(l)\n if 'hare48.pixnet.net' in l:\n hares_links.append(l)\n else:\n self._urls.append(l)\n self._urls.extend(await self._transform_hares(hares_links))", "def fetch_article_list(self, url):\n print(url)\n\n r = requests.get(url, headers=headers, timeout=10)\n html = r.text\n time.sleep(1)\n\n if r.status_code is not 200:\n print('Server dinied. Status:[%s].'%r.status_code)\n return\n\n # local data test\n #with open('./dataset/sina-blog-list.html', 'r') as f:\n # html = f.read()\n\n #print(html)\n\n soup = BeautifulSoup(html, 'html5lib')\n tags = soup.select('div[class=articleList] > div[class~=articleCell] > p > span[class=atc_title] > a')\n\n for t in tags:\n print('Appened: '+t['href'])\n self.article_urls.append(t['href'])\n\n # Get the url of next blog-list page\n nxpage = soup.select('div[class=SG_page] > ul > li[class=SG_pgnext] > a')\n if len(nxpage) > 0:\n #print ('Next list page: '+nxpage[0]['href'])\n self.fetch_article_list(nxpage[0]['href'])\n else:\n print('Have reached to the botom of blog lists.')\n\n\n # backup lists to local file\n with open(self.path+'/blog-lists.txt', 'w') as f:\n f.write('\\n'.join(self.article_urls))", "def list_envelopes():\n\n #\n # Step 1. Prepare the options object\n #\n from_date = datetime.min.isoformat()\n #\n # Step 2. Get and display the results\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.list_status_changes(account_id, from_date=from_date)\n return results", "def get_all_data():\n return jsonify(service.get_all_data())", "def get_all_servers(self) -> List[Server]:\n pass", "def collect():\n\n stats = {}\n for feed in Feed.objects:\n try:\n logger.info('Fetching from {0}...'.format(feed.ext_url))\n new_articles = fetch(feed)\n stats[feed.ext_url] = len(new_articles)\n\n except SAXException as e:\n if feed.errors is None:\n feed.errors = 0\n\n # Error with the feed, make a note.\n logger.info('Error fetching from {0}.'.format(feed.ext_url))\n feed.errors += 1\n feed.save()\n pretty_stats = json.dumps(stats, sort_keys=True, indent=4)\n notify('Corpora collection complete.', 'Total article count: {0}\\n\\nResults for this pass:\\n{1}'.format(len(Article.objects), pretty_stats))", "def get(self) -> List[Conversation]:\n return get_all_conversations(), 200", "def _get_live_entries(self):\n from article.models import Entry\n return self.entry_set.filter(status__exact=Entry.LIVE_STATUS)", "def list(self):\n return self.request(\"GET\")", "def contentAll(groupId, channelId):\n group = db.Group.find_one_or_404({\"_id\": ObjectId(groupId)})\n assignments = [\n db.Assignment.find({\"_id\": ObjectId(assignmentId)})\n for assignmentId in group[\"assignmentIds\"]\n ]\n data = [\n {\n \"assignmentId\": assignment[\"_id\"],\n \"name\": assignment[\"name\"],\n \"dis\": assignment[\"dis\"],\n \"maxGrade\": assignment[\"maxGrade\"],\n \"startDate\": assignment[\"startDate\"],\n \"dueDate\": assignment[\"dueDate\"],\n \"type\": assignment[\"type\"],\n \"url\": assignment[\"url\"],\n }\n for assignment in assignments\n ]\n return dumps(data), 200", "async def _view_all_notes(self, ctx: Context):\n\n author = ctx.author\n\n note_infos = []\n\n embed_links = ctx.channel.permissions_for(ctx.guild.me).embed_links\n\n author_str = f\"{author.name}'\"\n\n if author.name[-1].lower() != \"s\":\n author_str += \"s\"\n\n async with self.config.member(author).notes() as notes:\n total = len(notes)\n for page_num, note in enumerate(notes, start=1):\n msg_info = \"\"\n if note[\"author\"]:\n msg_info += _(\"**Author:** {}\").format(note[\"author\"])\n if note[\"channel\"]:\n msg_info += _(\"\\n**Channel:** {}\").format(note[\"channel\"])\n if note[\"jump_url\"]:\n if embed_links:\n msg_info += _(\n \"\\n[Click here to jump to message]({})\"\n ).format(note[\"jump_url\"])\n else:\n msg_info += _(\n \"\\n**Jump To Message:** {}\"\n ).format(note[\"jump_url\"])\n\n note_info = _(\n \"{}\\n\\n**Note:**\\n```{}```\\n**Reason:**\\n```{}```\"\n ).format(\n msg_info,\n note[\"note\"],\n note[\"reason\"]\n ).strip()\n\n if embed_links:\n page = discord.Embed(\n colour=0xff0000,\n description=note_info,\n title=_(\"{} TvM Notes\").format(author_str),\n timestamp=ctx.message.created_at\n )\n\n page.set_footer(\n text=_(\"Page {page_num}/{leng}\").format(\n page_num=page_num, leng=total\n )\n )\n else:\n page = _(\n \"**{author} TvM Notes**\"\n \"\\n\\n{note}\"\n \"\\n{footer}\"\n ).format(\n author=author_str,\n note=note_info,\n footer=_(\"*Page {page_num}/{leng}*\").format(\n page_num=page_num, leng=total\n )\n )\n\n note_infos.append(page)\n\n await menu(ctx, note_infos, DEFAULT_CONTROLS)", "def get_all(self, endpoint, params=None):\n merged_json = []\n\n # Continue fetching pages until we reach an empty one. GitHub doesn't return a count of the total number of\n # pages, so there's no alternative.\n page = 1\n get_next_page = True\n while get_next_page:\n json = self.get(endpoint, page, params)\n merged_json += json\n if not len(json) > 0:\n get_next_page = False\n page += 1\n\n return merged_json", "def announce(self, request_announce):\n return self.client.call('POST',\n self.name + 'announce',\n payload=request_announce)", "def get_news(url, n_pages=1):\n news = []\n while n_pages:\n print(\"Collecting data from page: {}\".format(url))\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n news_list = extract_news(soup)\n print(news_list)\n next_page = extract_next_page(soup)\n url = 'https://news.ycombinator.com/' + next_page\n news.extend(news_list)\n n_pages -= 1\n return news", "def all(cls):\n api = BuslineAPI()\n try:\n objects = api.all()\n except ApiException:\n objects = cls.objects.all()\n return objects" ]
[ "0.67929053", "0.6228621", "0.61567163", "0.6098976", "0.5930889", "0.59122926", "0.58568746", "0.5851314", "0.58407116", "0.5822206", "0.5804673", "0.5770579", "0.57031876", "0.56998605", "0.5559147", "0.5553164", "0.5529305", "0.55254424", "0.55103004", "0.5502346", "0.5491109", "0.54636586", "0.54524744", "0.5426534", "0.5419024", "0.5413854", "0.5413613", "0.53921217", "0.5376735", "0.5372632", "0.5365086", "0.5358035", "0.5355823", "0.53466743", "0.53457797", "0.5342036", "0.53215134", "0.5308503", "0.53049874", "0.53033197", "0.52899224", "0.52886283", "0.5288171", "0.52653676", "0.5262282", "0.5257949", "0.5253736", "0.5252559", "0.5243107", "0.5240721", "0.5231705", "0.52264273", "0.52202976", "0.5218399", "0.5216552", "0.5214751", "0.5212639", "0.52106243", "0.5209017", "0.52036065", "0.520022", "0.51923585", "0.5191369", "0.51913154", "0.51896745", "0.5175046", "0.5174689", "0.51740247", "0.5167345", "0.51587343", "0.5152173", "0.5144662", "0.51442784", "0.5138055", "0.5137677", "0.5131056", "0.5117573", "0.51158994", "0.5114704", "0.5112412", "0.5111812", "0.51066905", "0.51064587", "0.51045614", "0.50964665", "0.5095457", "0.50944376", "0.5093201", "0.50916016", "0.5077286", "0.50766504", "0.5074742", "0.50616074", "0.50603443", "0.5060274", "0.5058891", "0.5054259", "0.50527066", "0.50524807", "0.50499046" ]
0.6474763
1
delete a announcement by ID
def delete(self, announcementID): announcement = Announcement.query.filter_by(announcementID=announcementID) if not announcement.first(): return {'status': 'fail', 'message': 'No announcement with ID ' + str(announcementID) + ' exists'}, 404 announcement.delete() db.session.commit() return {'status': 'sucess', 'message': 'Announcement Deleted'}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, _id):", "def delete(self, id):\n raise NotImplementedError", "def delete(self,id):\r\n return delete(id=id)", "def delete(self, id):\n\n ns.abort(404, 'This API is not supported yet.')", "def delete(self, id):\n return delete_msg(id)", "def delete(self, id):\n delete_entry(id)\n return None, 204", "def delete(self, id):\n return self.app.post('/delete/' + str(id), data=dict(id=id),\n follow_redirects=True)", "def delete_incident(self, id):\n sql = f\"DELETE FROM incidences WHERE incidences.id ={id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def delete_by_id(self, subject_id: str) -> any:\n pass", "def delete(self, cls, id):\n pass", "def delete_object(self, id):\n self.request(id, post_args={\"method\": \"delete\"})", "def delete(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.remove()\n return {'msg':'Academic achievement deleted.'}, 200\n\n except Exception as e:\n print(e)\n return {'msg':'Could not delete academic achievement.'}, 500", "def delete(id):\n elementFromDB = Advertisements().get_one_element(id)\n if elementFromDB is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n try:\n elements = Advertisements().delete_element(id)\n result = jsonify(elements)\n result.statut_code = 200\n return result\n except Exception as identifier:\n return abort(500, identifier)", "def delete(cls, id):\n raise Exception('Not Implemented Yet')", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete(self, id):\n self.not_supported()", "def delete_entry(self, id, **args):\n args.update(id=id)\n return self.fetch(\"/entry/delete\", post_args=args)", "def delete():", "def delete_object(self, id):\n return self.request(\n \"{0}/{1}\".format(self.version, id), method=\"DELETE\"\n )", "def delete(self, id):\n r = validate_get(id)\n tareaID = r.tarea.id\n r.destroySelf()\n flash(_(u'El %s fue eliminado permanentemente.') % name)\n raise redirect('../list/%d' % tareaID)", "def delete(id_patient: str):\n database = get_connection()\n col = database.patients\n query = {\"patient_data.id\": id_patient}\n col.delete_one(query)", "def delete(self, id):\n try:\n deleted_id = self.borrow_repo.remove_one_by_id(id)\n if deleted_id:\n self.write({'id': deleted_id})\n else:\n self.write_not_found(\n 'A request with id {} was not found'.format(id)\n )\n except BumerangError as e:\n self.set_status(500)\n self.finish({'error': str(e)})", "def delete(self, id):\n try:\n self.gridfs.delete(ObjectId(id))\n except Exception, e:\n print e\n raise e", "def delete(id):\n get_autor(id)\n try:\n db.insert_bd('DELETE FROM autor WHERE id = %d' % id)\n return redirect(url_for('autor.index'))\n except:\n return render_template('404.html')", "def deleteOne(id):\n print(inspect.stack()[1][3])\n query = Followup.delete().where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to find the given client'}\n return {'status': \"Delete Succesful\"}", "def delete_comment(self, id, **args): \n args.update(id=id)\n return self.fetch(\"/comment/delete\", post_args=args)", "def delete(article_id):\r\n response = table.get_item(\r\n Key={'article_id': article_id}\r\n )\r\n data = response.get('Item')\r\n if data is None:\r\n flash('Unable to get Article')\r\n return redirect(url_for('article.list')) \r\n\r\n # Delete article for a particular id\r\n response = table.delete_item(\r\n Key={'article_id':article_id}\r\n )\r\n\r\n if response:\r\n flash('Article is successfully deleted')\r\n\r\n return redirect(url_for('article.list'))", "def del_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n comment.delete()\n actor = comment.actor\n url = '../../' + str(comment.actor.pk)\n return redirect(url)", "def delete(self, id):\r\n try:\r\n self.valid_args()\r\n inst = db.session.query(self.__class__).get(id)\r\n if inst is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).delete(inst)\r\n db.session.delete(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return '', 204\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='DELETE')", "def delete(self, request, id, format=None):\n posts = self.get_object(id)\n posts.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete(id):\n db = core.connect()\n # FIXME: What happens to orphaned comments? - David 7/6/09\n del db[id]", "def delete(self, id):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('delete', url)", "def delete(thing, id_):\n pass", "def delete(self, id):\n if delete_task(get_db(), id):\n return \"\", 204\n api.abort(404, f\"Invalid task with id: {id}\")", "def delete(id_=None):\n\n logger.debug('Catch DELETE request by URL /api/departments/%i.', id_)\n ds.delete(id_)\n return '', 204", "def delete(anime_viewed_id):\n if isinstance(anime_viewed_id, int):\n anime_viewed = AnimeViewed.query.filter_by(id=anime_viewed_id).first()\n\n if not anime_viewed:\n abort(Response(f'The anime viewed with the ID {anime_viewed_id} was not found.', 404))\n\n anime_viewed.delete()\n\n return make_response(jsonify({}), 200)\n else:\n abort(Response(f'The specified anime viewed ID is invalid. Is not a number.', 400))", "def delete_item(id):\n return '', 201", "def delete_article(id):\n # get and check article author against current_user\n the_article = models.Article.get_by_id(id)\n if current_user.id == the_article.author.id or current_user.role == 'admin':\n # delete article\n query = models.Article.delete().where(models.Article.id==id)\n query.execute()\n return jsonify(\n data='Article successfully deleted',\n status={\"code\": 200, \"message\": \"Article deleted successfully\"}\n ), 200\n else:\n return jsonify(data={}, status={\"code\": 403, \"message\": \"Not authorized\"})", "def delete(self, id):\n empleadoeliminar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoeliminar:\n db.session.delete(empleadoeliminar)\n db.session.commit()\n return 201\n api.abort(404)", "def api_delete_by_id(id):\n mail_dao.delete_by_id(int(id))\n return api_all()", "def delete_by_id(cls, id):\n\t\tauthor = Author.query.get(id)\n\t\tauthor.saved = False\n\t\tdb.session.commit()", "def delete_podcast(_id):\r\n Podcast.query.filter_by(id=_id).delete()\r\n # filter podcast by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def delete(self, id):\n return self._post(\n request=ApiActions.DELETE.value,\n uri=ApiUri.ACTIONS.value,\n params={'id': id}\n )", "def delete(self, id):\n return self._post(\n request=ApiActions.DELETE.value,\n uri=ApiUri.ACTIONS.value,\n params={'id': id}\n )", "def delete(id):\n result = delete_post(id)\n flash(result)\n return redirect(url_for(\"show\"))", "def test_delete_alert_by_id(self):\n pass", "async def delete(self, ctx, ID: int):\n\n cursor = await db.execute(\"Select count(*) from Todo where MemberID = ?\", (ctx.author.id,))\n result = await cursor.fetchone()\n\n if not result:\n return await send_embed(ctx, \"You do not have any to-do's to delete.\", negative=True)\n\n if ID < 1 or ID > result[0]:\n return await send_embed(ctx, \"Invalid ID to delete.\", negative=True)\n\n await db.execute(\"Delete from Todo where MemberID = ? and ID = ?\", (ctx.author.id, ID))\n await db.commit()\n\n await send_embed(ctx, \"Note deleted.\")", "def delete(self, _id):\n self._db[_id].delete()", "def delete(self, id):\t\t\n\t\ttry:\n\t\t\tpost_service.delete(id)\n\t\texcept AssertionError as e:\n\t\t\tpost_space.abort(400, e.args[0], status = \"Could not delete post\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tpost_space.abort(500, e.args[0], status = \"Could not delete post\", statusCode = \"500\")", "def _http_delete(self, id: int):\n self._http_request(\"pl_delete&id=%i\" % id)\n self.get_playlist()", "def delete(self, id: int):\n self._select_interface(self._rc_delete, self._http_delete, id)", "def delete(self, id: str) -> Any:\n\n return self.client.delete(self._url(id))", "def delete_song_petition(request, id):\n # instance gets the id from the Song Petition selected\n instance = get_object_or_404(SongPetition, id=id)\n # delete method deletes the instance from the database\n instance.delete()\n # Feedbacj message telling that the petition was deleted \n messages.success(request, \"Petition succesfully deleted\")\n return redirect(\"petition:list\")", "def delete_answer(request, answer_id):\n raise NotImplementedError", "def delete(self, request, id, format=None):\n comments = self.get_object(id)\n comments.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete_presentation(self, talk_id):\r\n QtSql.QSqlQuery('''DELETE FROM presentations WHERE Id=\"%s\"''' % talk_id)\r\n log.info(\"Talk %s deleted.\" % talk_id)", "def delete_record(self, id_: str) -> None:\n instance = self._get(id_)\n self._delete_from_db(instance)", "def delete_item_by_id(self, id):\n response = self.table_connector.delete_item(Key={self.primary_key: id})\n print(response)", "def del_awcomment(request, pk):\n comment = get_object_or_404(AwardComment, pk=pk)\n comment.delete()\n award = comment.award\n url = '../../' + str(comment.award.pk)\n return redirect(url)", "def delete_appointment(request, appointment_id):\n appointment_id = appointment_id\n\n if not appointment_id:\n return HttpResponse(\"Please provide an appointment Id\"), 406\n \n try:\n appointment = Appointment.objects.get(id=int(appointment_id))\n except:\n return HttpResponse(\"No appointment with that ID exist\"), 404\n \n appointment.delete()\n return HttpResponse(\"Successfully Deleted\")", "def delete(self, id):\n\n # Fetch Post\n post = Post.query.filter_by(id=id).first()\n if post is None:\n return { 'message': 'Post does not exist'}, 404\n\n # Check User permission\n current_user = flask_praetorian.current_user()\n if post.user_id != current_user.id:\n return { 'message': 'Unauthorized to delete Post'}, 401\n \n try:\n db.session.delete(post)\n db.session.commit()\n except Exception:\n return { 'message': 'Unable to delete Post'}, 500\n \n return { 'message': 'Post deleted successfully' }", "def delete(\n id: int = typer.Argument(\n ...,\n help=\"ID of the log entry\"\n )\n):\n manager = LogBookManager()\n deleted, message = manager.delete(id)\n\n if deleted:\n typer.echo(\n typer.style(message, fg=typer.colors.GREEN, bold=True)\n )\n else:\n typer.echo(\n typer.style(message, fg=typer.colors.RED, bold=True)\n )", "def delete_amenity_with_id(amenity_id):\n\n data = storage.get(Amenity, amenity_id)\n if data is None:\n abort(404)\n storage.delete(data)\n storage.save()\n return jsonify({}), 200", "def delete(id):\r\n get_post(id)\r\n db = get_db()\r\n db.cursor().execute('DELETE FROM novel.post WHERE id = %s', id)\r\n db.commit()\r\n return redirect(url_for('novel.index'))", "def delete_item(id: str):\n db.delete(id, kind=endpoint_model)\n return {\"result\": \"ok\"}", "def delete(self, expense_id):\n url = base_url + expense_id\n resp = zoho_http_client.delete(url, self.details, self.headers)\n return parser.get_message(resp)", "def delete_note(entry_id):\n\n entry = Entry.query.get(entry_id)\n\n entry.description = None\n\n db.session.commit()\n\n return redirect(f\"/update-entry/{entry.entry_id}\")", "def delete(self, request, pk, pk_reminder, format=None):\n reminder = self.get_reminder(pk=pk_reminder)\n reminder.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete(self, request, pk, pk_reminder, format=None):\n reminder = self.get_reminder(pk=pk_reminder)\n reminder.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def delete(self, request, pk, pk_reminder, format=None):\n reminder = self.get_reminder(pk=pk_reminder)\n reminder.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def cli(ctx,id):\n if not id:\n id = click.prompt(\n click.style('You didn\"t provide the id of the note to delete. Please provide one',fg=\"white\",bg=\"red\"), type=int)\n db = ctx.database()\n cursor = db.cursor()\n query = \"SELECT * from `notes` where id = {}\".format(id)\n cursor.execute(query)\n notes = cursor.fetchall()\n\n if notes:\n if click.confirm(click.style('Are you sure?',fg=\"magenta\")):\n query = \"DELETE from `notes` where id = {}\".format(id)\n cursor.execute(query)\n db.commit()\n click.secho(\"Note with id {} has been deleted\".format(id),fg=\"white\",bg=\"green\")\n else:\n click.secho(\"Nothing deleted. Delete action aborted.\",fg=\"white\",bg=\"green\")\n return\n click.secho(\"No note found with id {}. Delete action aborted.\".format(id),fg=\"white\",bg=\"red\")", "def __Delete(self, url, id = None):\n\n conn = self.__GetConnection()\n if (id != None):\n url += \"/\" + str(id)\n conn.request(\"DELETE\", url, \"\", self.__MakeHeaders(True))\n response = conn.getresponse()\n self.__CheckResponse(response)", "def delete_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n db = get_db()\n cur = db.execute('select id, title from entries where id = ?',\n [id.strip()])\n entries = cur.fetchall()\n title = entries[0]['title']\n db = get_db()\n db.execute('delete from entries where id = ?', [id.strip()])\n db.commit()\n flash('Recipe, ' + escape(title) + ', has been deleted', 'success')\n return redirect(url_for('show_entries'))", "def delete(self, id):\n self.cursor.execute(\"DELETE FROM Book WHERE Id = ?\", (id,))\n self.connection.commit()", "def delete(self, request , pk=None): \n return Response({'message':'DELETE'})", "def amenity_delete_by_id(amenity_id):\n\n fetched_obj = storage.get(\"Amenity\", str(amenity_id))\n\n if fetched_obj is None:\n abort(404)\n\n storage.delete(fetched_obj)\n storage.save()\n\n return jsonify({})", "def delete_entry(self, scenario_id):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_id,))", "def deleteNote(self, authenticationToken, guid):\r\n pass", "def delreply(request, post_id):\n if not request.user.is_authenticated():\n return redirect('/login/?next=%s' % request.path)\n else:\n\n reply = Reply.objects.get(id = post_id)\n \n reply.delete() \n return redirect('/home/')", "def delete_event(id):\n event = Event.query.get(id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n db.session.delete(event)\n db.session.commit()\n return jsonify({\"message\": \"Your event has been successfully deleted.\"})", "def delete(self, id):\n result = self._collection.remove({'_id': ObjectId(str(id))})\n # result is {u'n': 1, u'ok': 1} if deleted\n # TODO (cc) use constants for return codes and messages\n if result['ok'] == 1 and result['n'] == 1:\n return {'result': 'SUCCESS', 'msg': \"Delete was successful\", 'id': id}\n else:\n # TODO(cc) handle object not found error\n return {'result': 'FAILED', 'msg': 'Record not found in DB', 'id': id}", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(id):\n r = requests.delete(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.no_content:\n return r.text, r.status_code\n return redirect(url_for('index'), code=278)", "def delete(self, id):\n Ticket.query.filter_by(id=id).delete()\n db.session.commit()\n return None", "def api_delete_dish(request, id):\n\n close_old_connections()\n \n # Not marking it as served if it isn't even ready yet.\n if not request.user.is_authenticated:\n return HttpResponseForbidden(\"You're not authenticated.\")\n \n # Delete the dish.\n Dish.objects.get(id=id).delete()\n\n close_old_connections()\n \n return HttpResponse('Deleted.')", "def del_msg_by_id(self, value):\n self.database.delete(self.tname, self.primary_key, value)", "def delete_entry(self, scenario_info):\n sql = self.delete(\"id\")\n self.cur.execute(sql, (scenario_info[\"id\"],))", "def delete(self, id):\n transacao = Transacoes.get_transacao(id)\n if not transacao:\n api.abort(404, 'Transacao not found')\n\n Transacoes.delete_transacao(transacao)\n return {\"msg\": \"Transacao deleted.\"}, 200", "def delete_view(self, request, object_id):\r\n obj = self.get_object(unquote(object_id))\r\n obj.delete()\r\n return HttpResponse(\"Deleted\")", "def delete(self):\n ...", "def test_announcments_delete_model(self):\n db.session.delete(Announcement.query.filter_by(title = \"Title\", description = \"Description\", date = datetime(2015, 6, 5, 8, 10, 10, 10)).first())\n db.session.commit()\n self.assertEqual(Announcement.query.filter_by(title = \"Title\", description = \"Description\", date = datetime(2015, 6, 5, 8, 10, 10, 10)).count(), 0)", "def delete_event(id):\n oEvent, error = Event.get_by_id(id)\n if error:\n return make_response(jsonify({\"error\": error}), 400)\n oEvent.delete()\n return make_response(jsonify({\"success\": \"Event Deleted\"}))", "def delete_addition_by_id(self,id):\r\n \r\n session = self.persistence.get_session() \r\n affected_rows = session.query(TopicAddition).filter(TopicAddition.id==id).delete()\r\n session.commit()\r\n\r\n if (affected_rows < 1): \r\n raise NoAffectedRows", "def delete(self, id):\n\n query = \"DELETE FROM {} WHERE id = {}\".format(self.table, id)\n\n self.remove(query)\n return True", "def deleteAgenda():\n data = request.json\n if \"agenda_id\" in data:\n connectMongo.deleteAgenda(data.get(\"agenda_id\"))\n return jsonify(response=200, msg=\"Agenda has been deleted\")\n else:\n return jsonify(response=400, msg=\"you didn't sent all the necessary information\")", "def delete_specific_incident(self, incident_id):\n self.cursor.execute(\"\"\"DELETE FROM incidents WHERE incident_id ='%s' AND status='draft'\n \"\"\" %(incident_id))\n self.commiting()\n return incident_id" ]
[ "0.7832141", "0.7315442", "0.718373", "0.71590054", "0.715206", "0.7093981", "0.7008346", "0.6998064", "0.6946827", "0.6931493", "0.6922772", "0.68966997", "0.6869891", "0.6861729", "0.6804666", "0.6804666", "0.6804666", "0.6804666", "0.6804666", "0.6717501", "0.6696411", "0.6673575", "0.66603714", "0.66594744", "0.66467136", "0.66382396", "0.66345125", "0.6628265", "0.6592035", "0.6582318", "0.6560286", "0.65587026", "0.6546335", "0.65402", "0.65378785", "0.65352356", "0.65278465", "0.6520866", "0.651513", "0.6510721", "0.65071815", "0.6506678", "0.65035796", "0.64896804", "0.64790756", "0.64746606", "0.64746606", "0.64588886", "0.6457124", "0.6451936", "0.64488983", "0.640982", "0.6398811", "0.6396728", "0.63888806", "0.63797474", "0.6375208", "0.63710463", "0.63665706", "0.63664854", "0.63630927", "0.6362971", "0.63611", "0.63560694", "0.6355228", "0.6354714", "0.6352703", "0.6348122", "0.63448215", "0.6335536", "0.6324916", "0.6324916", "0.6324916", "0.63206273", "0.63176614", "0.6309418", "0.6304065", "0.6295914", "0.62832296", "0.62823176", "0.6278421", "0.6272373", "0.62700355", "0.6266346", "0.62510395", "0.62510395", "0.6247183", "0.62464976", "0.62398124", "0.6235671", "0.62327623", "0.6229891", "0.62272334", "0.62260056", "0.6225562", "0.621615", "0.62019897", "0.620153", "0.6200851", "0.6195918" ]
0.79569536
0
Function that converts category name to Python module name Eg. rwgeneric to RwGenericYang
def get_module_name_from_log_category(log_category): words = log_category.split('-') words.append('yang') return ''.join(word.capitalize() for word in words)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize_module_name(layer_name):\n modules = layer_name.split('.')\n try:\n idx = modules.index('module')\n except ValueError:\n return layer_name\n del modules[idx]\n return '.'.join(modules)", "def module_name(self):\n return \"py{0:s}\".format(self.library_name[3:])", "def generate_module_name_dir(name, train_type):\n if name == ComponentName.BINNING:\n if train_type == FederatedLearningType.VERTICAL:\n return ComponentName.VERT_FEATURE_BINNING.lower()\n else:\n return ComponentName.HORZ_FEATURE_BINNING.lower()\n elif name == ComponentName.FEATURE_CALCULATION:\n if train_type == FederatedLearningType.VERTICAL:\n return ComponentName.VERT_FEATURE_CALCULATION.lower()\n else:\n raise ValueError(\"The HorzFeatureCalculation Does't Support Yet.\")\n else:\n return name.lower()", "def get_module_short_name(klass):\n return klass.__module__.rsplit('.', 1)[-1]", "def makename(package, module):\n # Both package and module can be None/empty.\n if package:\n name = package\n if module:\n name += '.' + module\n else:\n name = module\n return name", "def getMangledName(self, name, module=None):\n if module is os.path:\n return \"os.path\"\n if isinstance(name, str) and (name.startswith(self.start) or name == self.package):\n return self.prefix + name\n return name", "def get_nuts_category(year):\n if year >= 2016:\n return f\"nuts2_2016\"\n elif year >= 2013:\n return f\"nuts2_2013\"\n elif year >= 2010:\n return f\"nuts2_2010\"\n elif year >= 2006:\n return f\"nuts2_2006\"\n else:\n return f\"nuts2_2003\"\n\n # for t in [2016,2013,2010,2006,2003]:\n # if year >=t:\n # return(f'nuts2_{str(t)}')", "def sanitize_module_name(module_name):\n module_name = module_name.replace('-', '_').replace('.', '_')\n if module_name[0] not in string.ascii_letters:\n module_name = \"a\" + module_name\n return module_name", "def normalize_package_name(_s: str) -> str:\n return _s.replace('_', '-').lower()", "def _compression_module_type_to_attr_name(compression_module_type: CompressionModuleType):\n if compression_module_type == CompressionModuleType.FUNCTION_QUANTIZER:\n return \"function_quantizers\"\n if compression_module_type == CompressionModuleType.ACTIVATION_QUANTIZER:\n return \"activation_quantizers\"\n raise RuntimeError(\"Unknown extra module type\")", "def parse_category_label(label: str) -> str:\n return number_first_regex.sub(\n '_',\n space_regex.sub(\n '_',\n label.strip().lower().replace('*', '').replace('(', '').replace(\n ')', '').replace('.', '')))", "def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))", "def get_ConTextItem_category_string(ci):\n return \"_\".join(ci.category)", "def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"", "def create_importable_name(charm_name):\n return charm_name.replace(\"-\", \"_\")", "def _get_module_name(filename: str) -> str:\n return \".\".join(_get_relative(filename).split(os.path.sep)[2:]).replace(\".pyi\", \"\").replace(\".__init__\", \"\")", "def __create_classname(self, fullname):\n return PACKAGE_NAME + \".\" + fullname", "def category_reducer(category):\n if not \"--\" in category:\n if category in BAD_CATEGORIES:\n return \"Unknown\"\n return category\n\n main, sub = category.split(\"--\")\n\n main = main.strip()\n if main in [\"Science\"]:\n return sub.strip()\n else:\n return main", "def _project_name_to_package_name(project_name):\n return project_name.lower().replace('-', '')", "def create_charm_name_from_importable(charm_name):\n # _ is invalid in charm names, so we know it's intended to be '-'\n return charm_name.replace(\"_\", \"-\")", "def denormalize_module_name(parallel_model, normalized_name):\n fully_qualified_name = [mod_name for mod_name, _ in parallel_model.named_modules() if\n normalize_module_name(mod_name) == normalized_name]\n if len(fully_qualified_name) > 0:\n return fully_qualified_name[-1]\n else:\n return normalized_name # Did not find a module with the name <normalized_name>", "def format_category_name(category):\n\n category_words = category.name.rstrip().replace(',', '').replace(\"'\", '').split(\" \")\n return \"-\".join(category_words)", "def to_py_name(cpp_name, entry_type):\r\n if entry_type == 'function':\r\n return cpp_name\r\n first_underscore = cpp_name.find('_')\r\n assert(first_underscore != -1)\r\n return cpp_name[first_underscore + 1:]", "def process_ci_name(name):\n if name == \"Cinder_Jenkins\":\n return 'Jenkins'\n elif name:\n return name.replace('_', ' ')", "def category_part(self) -> str:\n if not self.is_old_style:\n raise ValueError('New identifiers have no category semantics')\n return self.split('/')[0]", "def _type_name(cls, manual_name):\r\n cf_name = ''\r\n if manual_name:\r\n cf_name = manual_name.lower()\r\n else:\r\n camelcase = re.compile(r'([a-z])([A-Z])')\r\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\r\n \r\n cf_name += ccase(cls.__name__)\r\n cf_name = cf_name.lower()\r\n if cls.__use_module_name__:\r\n cf_name = cls.__module__ + '_{}'.format(cf_name)\r\n return cf_name", "def get_full_module_name(o, lower=False):\n if not isinstance(o, type):\n o = o.__class__\n module = o.__module__\n if module is None or module == str.__class__.__module__:\n return o.__name__\n name = module + '.' + o.__name__\n if lower:\n return name.lower()\n else:\n return name", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str = FEATURE_NAME_SUFFIX) -> str:\n key = name.replace(\".\", \"__ludwig_punct_period__\")\n return key + feature_name_suffix", "def _label_for(self, app_mod):\n return app_mod.__name__.rsplit('.',1)[0]", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str=FEATURE_NAME_SUFFIX) ->str:\n key = name.replace('.', '__ludwig_punct_period__')\n return key + feature_name_suffix", "def slug(self) -> str:\n return self.__class__.__module__.rsplit(\".\", maxsplit=1)[-1]", "def to_full_name(typ: type) -> str:\n return f\"{typ.__module__}.{typ.__qualname__}\"", "def get_kmodule(mod, dep_map):\n\n\t_mod = mod\n\tif mod in dep_map: return mod\n\n\tmod = mod.replace('_', '-')\n\tif mod in dep_map: return mod\n\n\traise ValueError, 'unable to get module name \"%s\"' % _mod", "def convert_class_name(name: str) -> str:\n name_tokens = re_class_name.findall(name)\n return \"_\".join(i.lower() for i in name_tokens if i)", "def _normalize_class_name(self, name):\n class_name = ''.join(\n word.capitalize()\n for word in re.sub('[^A-Za-z0-9]+', ' ', name).split()\n )\n\n if not class_name.endswith('Extension'):\n class_name += 'Extension'\n\n return class_name", "def get_package_name(cls) -> str:\n return '.'.join(cls.__module__.split('.')[:-1])", "def getmodulename(path):\r\n info = getmoduleinfo(path)\r\n if info: return info[0]", "def category_name(self):\n try:\n category = self.proto.category.parent\n return f'{category.name} - {self.proto.category.name}'\n except AttributeError:\n return self.proto.category.name", "def libraryName(self):\n ret=\"\"\n if self.kind == \"lib\":\n ret = self.name + \"Engine\"\n elif self.kind == \"exe\":\n ret = self.name + \"Exelib\"\n else:\n raise Invalid(\"Invalid kind of component: %s. Supported kinds are 'lib' and 'exe'\" % self.name)\n return ret", "def set_category(self, category_name):\n try:\n module_name = get_module_name_from_log_category(category_name)\n log_yang_module = importlib.import_module('gi.repository.' + module_name)\n if not log_yang_module:\n logger.error(\"Module %s is not found to be added as log category for %s\", module_name, category_name)\n print(\"Module %s is not found to be added as log category for %s\", module_name, category_name)\n return \n for level in RwLogger.level_event_cls_map.values():\n if not hasattr(log_yang_module, level):\n logger.error(\"Module %s does not have required log notification for %s\", module_name, level)\n print(\"Module %s does not have required log notification for %s\", module_name, level) \n return\n self._log_yang_module = log_yang_module \n self._log_category_name = category_name\n\n except Exception as e:\n logger.exception(\"Caught error %s when trying to set log category (%s)\",repr(e), category_name)", "def make_python_name(self, name):\n # FIXME see cindex.SpellingCache\n for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''),\n (\"$\", \"DOLLAR\"), (\".\", \"DOT\"), (\"@\", \"_\"), (\":\", \"_\"),\n ('-', '_')]:\n if k in name: # template\n name = name.replace(k, v)\n # FIXME: test case ? I want this func to be neutral on C valid\n # names.\n if name.startswith(\"__\"):\n return \"_X\" + name\n if len(name) == 0:\n pass\n elif name[0] in \"01234567879\":\n return \"_\" + name\n return name", "def module_name(cls):\n return __name__.split(\".\")[0]", "def classify_file(filename, categories):\n extension = filename.split('.')[-1]\n for category, extensions in categories.items():\n if extension in extensions:\n return category\n return 'other'", "def getFileCategory(fileName):\n extension = __getExt(fileName)\n if extension in (_7z, \"7zip\", ace, air, apk, \"appxbundle\", \"arc\", arj, \"asec\", \"bar\", bz2, \"bzip\", cab, \"cso\", deb, \"dlc\", dmg, gz, \"gzip\", hqx, \"inv\", \"ipa\", iso, \"isz\", jar, \"msu\", \"nbh\", pak, rar, rpm, sis, sisx, sit, \"sitd\", sitx, tar, targz, tgz, \"webarchive\", xap, z, zip):\n return FileCategory.Archive\n\n if extension in (_3ga, aac, aiff, amr, ape, \"arf\", asf, asx, \"cda\", \"dvf\", flac, \"gp4\", \"gp5\", gpx, \"logic\", m4a, m4b, \"m4p\", midi, mp3, ogg, \"pcm\", \"rec\", snd, \"sng\", \"uax\", wav, wma, wpl, \"zab\"):\n return FileCategory.Audio\n\n if extension in (_as, asm, asp, \"aspx\", bat, c, \"cp\", cpp, cs, css, \"gradle\", htm, \"inc\", jad, java, js, json, \"jsp\", \"lib\", m, \"matlab\", ml, o, perl, php, pl, \"ps1\", py, rb, \"rc\", rss, \"scpt\", sh, sql, src, \"swift\", \"vb\", \"vbs\", \"ws\", \"xaml\", \"xcodeproj\", xml, xsd, xsl, xslt, yml):\n return FileCategory.Code\n\n if extension in (abw, \"aww\", azw, \"azw3\", \"azw4\", cbr, cbz, chm, \"cnt\", \"dbx\", djvu, doc, docm, docx, dot, dotm, dotx, epub, fb2, \"iba\", \"ibooks\", \"ind\", \"indd\", \"lit\", mht, mobi, mpp, odf, odt, ott, \"pages\", pmd, \"prn\", \"prproj\", ps, pub, \"pwi\", rep, rtf, sdd, sdw, \"shs\", \"snp\", sxw, tpl, vsd, \"wlmp\", wpd, wps, wri):\n return FileCategory.Document\n\n if extension in (bmp, cpt, dds, dib, dng, \"dt2\", emf, gif, ico, \"icon\", icns, jpeg, jpg, pcx, pic, png, psd, \"psdx\", raw, tga, \"thm\", tif, tiff, wbmp, \"wdp\", webp):\n return FileCategory.Image\n\n if extension in (oxps, pdf, xps):\n return FileCategory.PDF\n\n if extension in (key, \"keynote\", pot, potx, pps, ppsx, ppt, pptm, pptx):\n return FileCategory.Presentation\n\n if extension in (ods, \"numbers\", sdc, xls, xlsx, xlsb):\n return FileCategory.Spreadsheet\n\n if extension in (\"alx\", application, csv, \"eng\", html, log, \"lrc\", \"lst\", nfo, opml, \"plist\", reg, srt, sub, \"tbl\", text, txt):\n return FileCategory.Text\n\n if extension in (\"264\", _3g2, _3gp, avi, \"bik\", \"dash\", \"dat\", \"dvr\", flv, h264, m2t, m2ts, m4v, mkv, mod, mov, mp4, mpeg, mpg, \"mswmm\", mts, ogv, rmvb, swf, \"tod\", \"tp\", ts, vob, webm, wmv):\n return FileCategory.Video\n\n return FileCategory.Binary", "def __call__(self, pickler, name, module):\n if module is os.path:\n if self.returnStr:\n return \"os.path\"\n return _sPickle.SPickleTools.reducer(str, (\"os.path\",))\n if isinstance(name, str) and (name.startswith(self.start) or name == self.package):\n prefix = self.prefix\n if self.returnStr:\n return prefix + name\n return _sPickle.SPickleTools.reducer(operator.add, (prefix, name))\n return name", "def get_class_name_from_pkg_name(opts):\n pkg_name = opts[\"package\"]\n return \"\".join(map(str.capitalize, pkg_name.split(\"_\")))", "def simplifyOutName(name):\n return \"HLTNav_\" + name.replace(\"HLTNav_\", \"\").replace(\"Trig\", \"\").replace(\"Alg\", \"\")", "def normalize_python_library_name(library_name: str) -> str:\n # Remove the special support package designation (e.g [grpc]) in the\n # brackets when parsing the requirements file to resolve confusion 2 in the\n # docstring.\n # NOTE: This does not cause ambiguities because there is no viable scenario\n # where both the library and a variant of the library exist in the\n # directory. Both the default version and the variant are imported in the\n # same way (e.g import google.api.core) and if pip allowed a scenario where\n # both versions were installed, then there would be ambiguities in the\n # imports. For this reason, it is safe to disambiguate the names by removing\n # the suffix. We have also implemented the backend tests,\n # test_uniqueness_of_lib_names_in_requirements_file and\n # test_uniqueness_of_lib_names_in_compiled_requirements_file, in\n # scripts/install_python_prod_dependencies_test.py to ensure that all\n # library names in the requirements files are distinct when normalized.\n library_name = re.sub(r'\\[[^\\[^\\]]+\\]', '', library_name)\n return library_name.lower()", "def package_name(string):\n return 'USymbol' + convert_name(string, False)", "def category2url(cat):\n return remove_diacritics(cat).replace(\" \", \"_\")", "def make_fullname(basename, _type=None):\n return '{}.{}'.format(basename, extensions.get(_type, None))", "def pkgTypeInfo(self, pkg_typ):\n\n if pkg_typ is PY_SOURCE:\n return \"PYTHON SOURCE FILE MODULES\"\n elif pkg_typ is PY_COMPILED:\n return \"PYTHON COMPILED CODE OBJECT MODULES \"\n elif pkg_typ is C_EXTENSION:\n return \"DYNAMICALLY LOADABLE SHARED LIBRARY (C-EXTENSION) MODULES\"\n elif pkg_typ is PY_RESOURCE:\n return \"MACINTOSH RESOURCE MODULES\"\n elif pkg_typ is PKG_DIRECTORY:\n return \"PYTHON PACKAGE DIRECTORY MODULES\"\n elif pkg_typ is C_BUILTIN:\n return \"BUILT-IN MODULES\"\n elif pkg_typ is PY_FROZEN:\n return \"FROZEN PYTHON MODULES\"\n else:\n return \"UNKNOWN MODULES\"", "def decamelize(name):\n pat = re.compile(r'([A-Z]*[^A-Z]*)(.*)')\n bits = []\n while True:\n head, tail = re.match(pat, name).groups()\n bits.append(head)\n if tail:\n name = tail\n else:\n break\n return '_'.join([bit.lower() for bit in bits])", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name", "def getCategory():", "def _convert_category(category_field):\n\n return category_field # TODO", "def category_name(self):\n return self.category.name", "def process_path(module_path):\n\n if module_path == 'numpy.ndarray':\n return 'StorageNumpy', 'hecuba.hnumpy'\n if module_path == 'StorageDict':\n return 'StorageDict', 'hecuba.hdict'\n last = 0\n for key, i in enumerate(module_path):\n if i == '.' and key > last:\n last = key\n module = module_path[:last]\n class_name = module_path[last + 1:]\n return class_name, module", "def convert_packname_for_depsolver(packname):\n return packname.replace('-', '_')", "def getCategoryByName(layerTypeName):\n # !!! hard coded list of activation layers could cause problems when more layers are added in caffe\n if layerTypeName in [\"ReLU\", \"PReLU\", \"ELU\", \"Sigmoid\", \"TanH\", \"AbsVal\", \"Power\",\n \"Exp\", \"Log\", \"BNLL\", \"Threshold\", \"Bias\", \"Scale\"]:\n return LayerType.CATEGORY_ACTIVATION\n elif \"Loss\" in layerTypeName:\n return LayerType.CATEGORY_LOSS\n elif \"Data\" in layerTypeName:\n return LayerType.CATEGORY_DATA\n else:\n return LayerType.CATEGORY_NONE", "def get_python_classname(raw_classname):\n class_name = raw_classname.replace(\" \",\"\")\n class_name = class_name.replace(\"-\",\"\")\n return class_name", "def typeToName(type: int) -> unicode:\n ...", "def get_topicname ( base_name, object_type, condition ) :\n return base_name + '-' + object_type.upper( ) + '-' + condition.upper( )", "def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]", "def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]", "def get_package_name(name):\n name = _strip_package_name(name)\n return name", "def MakeModuleElement (s):\n return _UnderscoreSubstitute_re.sub('_', _XMLIdentifierToPython(s))", "def _get_category_label(category_obj, level):\n result = ''\n for i in range(0, level + 1):\n if i < len(category_obj):\n level_label = category_obj[i]\n if not level_label:\n level_label = '$'\n else:\n # place holder\n level_label = '$'\n if not result:\n result = level_label\n else:\n result = result + '###' + level_label\n return result", "def process_path(module_path):\n if module_path == 'numpy.ndarray':\n return 'StorageNumpy', 'hecuba.hnumpy'\n last = 0\n for key, i in enumerate(module_path):\n if i == '.' and key > last:\n last = key\n module = module_path[:last]\n class_name = module_path[last + 1:]\n return class_name, module", "def modelName2Module(model_name):\n module_name = 'RAiDER.models.' + model_name.lower().replace('-', '')\n model_module = importlib.import_module(module_name)\n wmObject = getattr(model_module, model_name.upper().replace('-', ''))\n return module_name, wmObject", "def name(self) -> str:\n return str(self.category.value)", "def ifdef_name(filename):\n return filename.replace(\"/\", \"_\").replace(\".\", \"_\").upper() + \"_\"", "def get_category(self) -> str:\n return self.category", "def get_class_name(self, name):\n name_list = name.split('_')\n file_name = ''\n for item in name_list:\n file_name += item.capitalize()\n return file_name", "def target_naming(ty,target):\n de = ty.description(target)\n de = de[0].upper() + de[1:] + \".\"\n return de", "def mangle_name(name):\n import re\n try:\n return re.sub('_+','_',re.sub('[^\\w_]','_',name).lower()).rstrip('_')\n except TypeError:\n raise TypeError(\n 'Trying to mangle name with invalid type of: ' + str(type(name)))", "def get_category(self, obj):\n cat_lst = []\n for k, v in obj.items():\n cat_lst = cat_lst + list(v.keys())\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n cat_lst = cat_lst + list(in_v.keys())\n in_k, in_v = list(in_v.items())[-1]\n simpl_lst = [i for n, i in enumerate(cat_lst) if i not in cat_lst[:n]]\n res = []\n for cat in simpl_lst:\n if cat not in self._loop_name:\n re_outer = re.compile(r'([^A-Z ])([A-Z])')\n re_inner = re.compile(r'(?<!^)([A-Z])([^A-Z])')\n res.append(re_outer.sub(r'\\1 \\2', re_inner.sub(r' \\1\\2', cat)))\n self._category = res", "def _modname(cls, full=False):\n module = getattr(cls, '__module__', None)\n if module is None or module == str.__class__.__module__:\n return cls.__name__\n if full and module == \"__main__\":\n import inspect\n the_module = inspect.getmodule(cls)\n spec = getattr(the_module, '__spec__', None)\n if spec is None:\n if the_module.__name__ == '__main__':\n module = '.'.join([the_module.__package__,\n os.path.basename(the_module.__file__.split('.')[0])])\n else:\n module = getattr(the_module, '__package__', None)\n else:\n module = spec.name if spec else module\n return module\n return module + '.' + cls.__class__.__name__", "def to_type_name(self, text) -> str:\n return util.to_snake_case(self.split_to_body_and_ext(text)[0]).capitalize()", "def MAKE_NAME(name):\n name = name.replace('$', 'DOLLAR')\n name = name.replace('.', 'DOT')\n if name.startswith('__'):\n return '_X' + name\n elif name[0] in '01234567879':\n return '_' + name\n return name", "def edit_url(\n modulename: str, is_package: bool, mapping: Mapping[str, str]\n) -> str | None:\n for m, prefix in mapping.items():\n if m == modulename or modulename.startswith(f\"{m}.\"):\n filename = modulename[len(m) + 1 :].replace(\".\", \"/\")\n if is_package:\n filename = f\"{filename}/__init__.py\".lstrip(\"/\")\n else:\n filename += \".py\"\n return f\"{prefix}{filename}\"\n return None", "def category(self) -> str:\n return pulumi.get(self, \"category\")", "def uidfy(self, cls):\n return \"{0}.{1}\".format(cls.__module__, cls.__name__)", "def uidfy(self, cls):\n return \"{0}.{1}\".format(cls.__module__, cls.__name__)", "def category_slug_to_name(self, slug):\n name = ''\n for k, v in Post.CATEGORY_CHOICES:\n if k == slug:\n name = v\n return name", "def get_converter(category, name, disable_logging=False):\n return PluginLoader._import(\"convert.{}\".format(category), name, disable_logging)", "def camelize(name):\n return ''.join([bit.capitalize() for bit in name.split('_')])", "def category_name(self):\r\n return conf.lib.clang_getDiagnosticCategoryName(self.category_number)", "def get_class_name_from_data_name(name):\n upper = True\n name_ = ''\n for char in name:\n if char == '_':\n upper = True\n continue\n name_ += char if not upper else char.upper()\n upper = False\n return 'Data' + name_", "def __init__(self, category):\n self.category = category\n self.name = \"Filters.document.category('{}')\".format(self.category)", "def fullname(cls):\n module = cls.__module__\n if module is None or module == str.__class__.__module__:\n return cls.__class__.__name__\n return module + '.' + cls.__class__.__name__", "def elenaNamesToOlafNames(name):\n layer, region, num, position = locateTTHalfModule(name)\n reg = {'A': 'R3', 'B': 'R2', 'C':'R1'}\n return layer+'Layer'+reg[region]+'Module'+str(num+1)+position.capitalize()", "def _strip_package_name(name):\n name = _strip(name)\n if name.find('.') != -1:\n name = name.split('.')[0]\n return name", "def _parse_module_name(program_param):\n if program_param and program_param.endswith(\".py\"):\n return program_param[:-3]\n return program_param", "def package_name(self):", "def encode(category_main : ):", "def get_package_name(x):\n return re.search(r\"^(\\w|-)*\", x).group()", "def _get_path_category(self, path: Path) -> str:\n if str(path).startswith(\"/\"):\n return \"global\"\n elif str(path).startswith(\"~\"):\n return \"local\"\n\n return \"custom\"", "def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]" ]
[ "0.6348545", "0.62357664", "0.6161529", "0.6005554", "0.59594476", "0.5909393", "0.58015877", "0.576444", "0.5749584", "0.5743478", "0.57170683", "0.57096314", "0.5701094", "0.56758934", "0.5675464", "0.5667408", "0.56373435", "0.5630453", "0.55773044", "0.5576993", "0.55697715", "0.5569567", "0.55563915", "0.5553497", "0.5547277", "0.5545855", "0.55446887", "0.55227077", "0.5520681", "0.5513283", "0.5505583", "0.54980767", "0.5470531", "0.54668045", "0.5462252", "0.5456542", "0.545223", "0.5445622", "0.54393154", "0.5438888", "0.54355097", "0.5428177", "0.54208994", "0.54208577", "0.5414338", "0.540822", "0.5402593", "0.5398408", "0.53954506", "0.53714126", "0.5367868", "0.535996", "0.5337245", "0.5334869", "0.5321865", "0.5321347", "0.53174204", "0.53151345", "0.53037274", "0.5302989", "0.5294884", "0.528223", "0.5281131", "0.52588636", "0.525355", "0.525355", "0.5239127", "0.5229486", "0.5225893", "0.5209114", "0.5206366", "0.5203421", "0.5203288", "0.52019143", "0.5197209", "0.51938313", "0.51803094", "0.517785", "0.5176043", "0.5175558", "0.5172683", "0.51642627", "0.515652", "0.515594", "0.515594", "0.51517093", "0.51508677", "0.51502097", "0.5144393", "0.5141582", "0.51364607", "0.5134558", "0.5123811", "0.5115597", "0.51139784", "0.5104829", "0.5102415", "0.5102032", "0.51004606", "0.50976664" ]
0.7726592
0
Create an instance of RwLogger
def __init__(self, category=None, log_hdl=None, file_name=None): logging.Handler.__init__(self) """ Set the default formatter to include a rwlog marker so we know the message are being sent to rwlog.""" self.setFormatter("(rwlog)" + logging.BASIC_FORMAT) if file_name is None: frame = get_frame() file_name = get_caller_filename(frame) if category is not None: if not isinstance(category, six.string_types): raise TypeError("Category should be a string") self.category = category # GBoxed types don't accept constructors will arguments # RwLog.Ctx(file_name) will throw an error, so call # new directly if not log_hdl: log_hdl = RwLog.Ctx.new(file_name) self._log_hdl = log_hdl self.set_category('rw-generic') self._group_id = None self._rwlogd_inited = False shm_filename = self._log_hdl.get_shm_filter_name() self._shm_filename = os.path.join('/dev/shm',shm_filename) try: self._shm_fd = open(self._shm_filename,'rb') self._shm_data=mmap.mmap(self._shm_fd.fileno(),length=0,flags=mmap.MAP_SHARED,prot=mmap.PROT_READ) except Exception as e: logger.error("Failed to open shm file: %s with exception %s",self._shm_filename,repr(e)) print("Failed to open shm file: %s with exception %s",self._shm_filename,repr(e)) self._log_serial_no = 0 self._log_severity = 7 # Default sev is debug
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def _init_logger(self):\n self.logger = logging.getLogger('WSClientAPILogger')\n self.logger.setLevel(logging.DEBUG)\n self.logger_handler = logging.FileHandler(self.__class__.__name__ + '.log')\n self.logger_handler.setLevel(logging.DEBUG)\n self.logger_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S')\n self.logger_handler.setFormatter(self.logger_formatter)\n self.logger.addHandler(self.logger_handler)", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger", "def __init__(self):\n self.logger = logger()", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def build_logger(self):\n pass", "def _create_logger(self, log_dir: str) -> logging.Logger:\n self.log_dir = log_dir\n self.log_file = os.path.join(log_dir, self.name)\n os.makedirs(self.log_dir, exist_ok=True)\n logger = logging.getLogger(self.log_file)\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(self.log_file)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s\",\n datefmt=\"%Y-%m-%d-%H:%M:%S\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def _create_logger(title, log_msg_id=\"\", log_file_suffix=\".log\"):\n\n logging.setLoggerClass(SkidlLogger)\n logger = logging.getLogger(title)\n\n # Errors & warnings always appear on the terminal.\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Errors and warnings are stored in a log file with the top-level script's name.\n handler = SkidlLogFileHandler(get_script_name() + log_file_suffix, mode=\"w\")\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Set logger to trigger on info, warning, and error messages.\n logger.setLevel(logging.INFO)\n\n # Augment the logger's functions to count the number of errors and warnings.\n logger.error = CountCalls(logger.error)\n logger.warning = CountCalls(logger.warning)\n\n return logger", "def logger():\n return RPLogger('pytest_reportportal.test')", "def setup_logger(level):\n\n logger = logging.getLogger('splunk.appserver.insteon.controllers.WoeidLookup')\n logger.propagate = False # Prevent the log messages from being duplicated in the python.log file\n logger.setLevel(level)\n\n file_handler = logging.handlers.RotatingFileHandler(make_splunkhome_path(['var', 'log', 'splunk', 'insteon_woeid_controller.log']), maxBytes=25000000, backupCount=5)\n\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger", "def create_logger():\n global logger\n\n formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\n handler = TimedRotatingFileHandler(log_file, when=\"midnight\", interval=1)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n handler.suffix = \"%Y-%m-%d\"\n logger = logging.getLogger(\"sacplus\")\n logger.setLevel(log_level)\n logger.addHandler(handler)", "def logger():\n logger = logging.getLogger(\"Automation_Dispatcher\")\n logger.setLevel(settings.LOGLEVEL)\n handler = logging.StreamHandler()\n logger.addFilter(_Commmon_filter())\n handler.setFormatter(logging.Formatter('%(asctime)s [%(component)s]'\n ' [%(levelname)s] %(message)s', \"%Y-%m-%d %H:%M:%S\"))\n logger.addHandler(handler)\n return logger", "def __init__(self):\n\n self._logger = logging.getLogger(__name__)", "def setup_logger():\n LOG_DIR = unicode( os.environ.get(u'usep_gh__LOG_DIR') )\n LOG_LEVEL = unicode( os.environ.get(u'usep_gh__LOG_LEVEL') )\n filename = u'%s/usep_gh_handler.log' % LOG_DIR\n formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )\n logger = logging.getLogger( __name__ )\n # logger = logging.getLogger( u'usep_gh_handler' )\n level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }\n logger.setLevel( level_dict[LOG_LEVEL] )\n file_handler = logging.FileHandler( filename )\n file_handler.setFormatter( formatter )\n logger.addHandler( file_handler )\n logger.debug( u'in utils.log_helper.setup_logger(); log initialized at %s' % unicode(datetime.datetime.now()) )\n return logger", "def setup_logger():\n logger = logging.getLogger(\"extract_brass_bedpe\")\n LoggerFormat = '[%(levelname)s] [%(asctime)s] [%(name)s] - %(message)s'\n logger.setLevel(level=logging.INFO)\n handler = logging.StreamHandler(sys.stderr)\n formatter = logging.Formatter(LoggerFormat, datefmt='%Y%m%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def setup_logger(level, name, use_rotating_handler=True):\r\n \r\n logger = logging.getLogger(name)\r\n logger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n logger.setLevel(level)\r\n \r\n log_file_path = os.path.join( os.environ['SPLUNK_HOME'], 'var', 'log', 'splunk', 'radius_auth_rest_handler.log' )\r\n \r\n if use_rotating_handler:\r\n file_handler = logging.handlers.RotatingFileHandler(log_file_path, maxBytes=25000000, backupCount=5)\r\n else:\r\n file_handler = logging.FileHandler(log_file_path)\r\n \r\n formatter = logging.Formatter('%(asctime)s %(levelname)s ' + name + ' - %(message)s')\r\n file_handler.setFormatter(formatter)\r\n \r\n logger.addHandler(file_handler)\r\n \r\n return logger", "def logger(self) -> Logger:\n logger = getLogger(\"WatchTheDoor\")\n logger.setLevel(INFO)\n return logger", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def __init__(self):\n\n self.log = logger.getLogger(name=\"directord\")", "def __init__(self):\r\n self.file_object = './ExecutionLogs/PredictFromModel.log'\r\n\r\n \"\"\" Initialize logger class for log writing \"\"\"\r\n self.log_writer = logger.logger(self.file_object)", "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger", "def __init_board_logger(self):\n \n # Create a Logger object and set log level\n board_logger = logging.getLogger(__name__)\n board_logger.setLevel(logging.DEBUG)\n\n # Create a handler to console and set level\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n\n # Create a formatter and add it to the handler\n console_format = logging.Formatter(\"%(asctime)s - %(threadName)s - \"\n + \"%(module)s - %(levelname)s : %(message)s\")\n console_handler.setFormatter(console_format)\n\n # Create a handler for syslog and set level\n syslog_handler = SysLogHandler(\"/dev/log\")\n syslog_handler.setLevel(logging.INFO)\n\n # Create a formatter and add it to handler\n syslog_format = logging.Formatter(\"%(asctime)s - %(threadName)s - \"\n + \"%(module)s - %(levelname)s : %(message)s\")\n syslog_handler.setFormatter(syslog_format)\n\n # Add handlers to logger\n board_logger.addHandler(console_handler)\n board_logger.addHandler(syslog_handler)\n\n # Return logger\n return board_logger", "def create_logger():\n logger = logging.getLogger(\"punctuation_logger\")\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.NOTSET) # Set Logger's level to NOTSET, default is WARNING\n\n # create the logging file handler\n if options.log_file is not None:\n fh = logging.FileHandler(options.log_file)\n \n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n fh.setLevel(logging.NOTSET)\n # add handler to logger object\n logger.addHandler(fh)\n return logger", "def _get_logger(self):\n return Logger(\"SLOTH\")", "def construct_logger(in_logger_file_path):\n logger_configfile_path = in_logger_file_path + \"/log.properties\"\n # print logger_configfile_path\n logging.config.fileConfig(logger_configfile_path)\n logger = logging.getLogger(\"ITR2\")\n return logger", "def __init__(self):\n self.logger = logging.getLogger(FeatureEngineeringLogger.__name__)", "def setup_logger():\n formatter = ColoredFormatter(\n (\n '%(log_color)s%(levelname)-5s%(reset)s '\n '%(yellow)s[%(asctime)s]%(reset)s'\n '%(green)s %(name)s %(purple)s %(filename)s %(purple)s %(funcName)s %(purple)s:%(lineno)d%(reset)s '\n '%(bold_blue)s%(message)s%(reset)s'\n ),\n datefmt='%y-%m-%d %H;%M:%S',\n log_colors={\n 'DEBUG': 'blue',\n 'INFO': 'yellow',\n 'WARNING': 'red',\n 'ERROR': 'blue,bg_bold_red',\n 'CRITICAL': 'red,bg_white',\n }\n )\n\n logger = logging.getLogger('shen-yue-is-beautiful')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(cformat, date_format,\n log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def default_logger_creator(config):\n return UnifiedLogger(config, logdir, loggers=None)", "def __init__(self):\n self.log = logging.getLogger()", "def get_instance()->'ErmineLogger':\n if ErmineLogger.instance is None:\n ErmineLogger.instance = ErmineLogger()\n return ErmineLogger.instance", "def _createPredictionLogger(self):\n\n class DummyLogger:\n def writeRecord(self, record): pass\n def writeRecords(self, records, progressCB): pass\n def close(self): pass\n\n self._predictionLogger = DummyLogger()", "def create_logger(log_dir=None):\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_format = '%(asctime)s %(process)d [%(levelname)s] %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_format)\n logger = logging.getLogger('es_on_gke')\n if log_dir:\n log_file = os.path.join(log_dir, 'log.txt')\n file_hdl = logging.FileHandler(log_file)\n formatter = logging.Formatter(fmt=log_format)\n file_hdl.setFormatter(formatter)\n logger.addHandler(file_hdl)\n return logger", "def setup_logger(level):\n logger = loguru.logger\n logger.remove()\n\n # Hearth logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Hearth,\n format=LoggerFormats.Hearth\n )\n\n # Stethoscope logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Stethoscope,\n format=LoggerFormats.Stethoscope\n )\n\n return logger", "def whLogger(name):\n return logging.getLogger('wh.'+name)", "def __init__(self):\n\n self.__logger = logging.getLogger()\n\n formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: ' +\n '%(message)s')\n\n file_handler = RotatingFileHandler('.log', 'a', 1000000, 1)\n\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n self.__logger.addHandler(file_handler)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n self.__logger.addHandler(stream_handler)", "def get_instance():\n if Logger._logger_instance is None:\n Logger()\n return Logger._logger_instance", "def setupLogger(logger=None, log_format=\"%(asctime)s %(levelname)s [\"+APP_NAME+\"] %(message)s\", level=logging.INFO, log_name=APP_NAME+\".log\", logger_name=APP_NAME):\r\n\tif logger is None:\r\n\t\tlogger = logging.getLogger(logger_name)\r\n\t\r\n\tlogger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n\tlogger.setLevel(level)\r\n\t\r\n\tfile_handler = logging.handlers.RotatingFileHandler(make_splunkhome_path([\"var\", \"log\", \"splunk\", log_name]), maxBytes=2500000, backupCount=5)\r\n\tformatter = logging.Formatter(log_format)\r\n\tfile_handler.setFormatter(formatter)\r\n\t\r\n\tlogger.handlers = []\r\n\tlogger.addHandler(file_handler)\r\n\t\r\n\treturn logger", "def setup_logger():\n\n global _logger\n global _has_logbook\n\n if _has_logbook:\n _logger = Logger('UoM_WIFI')\n try:\n log_path = join(sys.argv[1], '%s.log' % USERNAME)\n except IndexError:\n log_path = join(split(abspath(__file__))[0], '%s.log' % USERNAME)\n\n # because the log file is owned by root, if this program is ran by a\n # regular user, we need to prevent it from crashing by writing to a file\n # owned by root\n try:\n # create the handler\n log_handler = RotatingFileHandler(log_path)\n\n # push the context object to the application stack\n log_handler.push_application()\n except IOError:\n _has_logbook = False", "def create_instance(data, logging_file_name):\n Utils.logging(data, logging_file_name)", "def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))", "def construct_logger(name, save_dir):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n file_no_ext = out_file_core()\n\n fh = logging.FileHandler(os.path.join(save_dir, file_no_ext + \".txt\"), encoding=\"utf-8\")\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(name)s %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n gitdiff_patch = os.path.join(save_dir, file_no_ext + \".gitdiff.patch\")\n os.system(f\"git diff HEAD > {gitdiff_patch}\")\n\n return logger", "def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )", "def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))", "def _get_logger(self):", "def get_logger(self, logname, logfile, loglevel, propagate):\n # TODO: simplify\n logger = logging.getLogger(logname)\n logger_handler = WatchedFileHandler(logfile, mode='w')\n # removed \\t%(name)-6s\n log_fmt = '%(asctime)s\\t%(levelname)-8s\\t%(message)s'\n logger_handler.setFormatter(\n logging.Formatter(log_fmt, '%b %d %H:%M:%S'))\n logger.addHandler(logger_handler)\n logger.propagate = propagate\n logger.setLevel(loglevel)\n return logger", "def logger(self):\n pass", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = logging.getLogger(logger_name(__name__))", "def get_logger(name: str) -> logging.Logger:\n try:\n p = Path(name)\n if p.exists():\n name = str(p.absolute().relative_to(Path.cwd()).as_posix())\n except:\n pass\n logger = logging.getLogger(name)\n # logger.addHandler(TqdmLoggingHandler())\n return logger", "def create_logger(logging, tool_name, level):\n logger = logging.getLogger(tool_name)\n\n # Create handlers\n handler = logging.StreamHandler()\n handler.setLevel(level)\n\n # Create formatters and add it to handlers\n logformat = logging.Formatter(\n '[%(name)s - %(asctime)s] %(levelname)s: %(message)s')\n handler.setFormatter(logformat)\n\n # Add handlers to the logger\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def create_logger(level=logging.DEBUG, record_format=None):\n if record_format is None:\n record_format = \"[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s\"\n\n logger = logging.getLogger(\"mylogger\")\n logger.setLevel(level)\n # 修改\n fh.setLevel(level)\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger", "def logger_setup(self, logger_name):\n logger = logging.getLogger(logger_name)\n logger_path = \"/tmp/\" + logger.name\n logger_format = '%(asctime)s %(name)s %(levelname)s %(lineno)d %(message)s'\n\n # set up logging to file\n logging.basicConfig(\n level=logging.INFO,\n format=logger_format,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logger_path,\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which for console use\n formatter = logging.Formatter(logger_format)\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n return logger", "def __init__(self):\r\n self.logger = dict()", "def create_logger(app_name: str) -> logging.Logger:\n if not os.path.exists(os.path.join(os.getcwd(), 'logs')):\n os.mkdir(os.path.join(os.getcwd(), 'logs'))\n\n app_logfile = os.path.join(os.getcwd(), 'logs', f'{app_name}.log')\n\n logger = logging.getLogger(f\"{app_name}-logger\")\n logger.setLevel(logging.DEBUG)\n\n handler = logging.handlers.RotatingFileHandler(filename=app_logfile, mode='a', maxBytes=20000, backupCount=10)\n handler.setLevel(logging.DEBUG)\n\n # Set the formatter\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n # Set it as the base handler\n logger.base_handler = handler\n\n # Also add a newline handler to switch to later\n newline_handler = logging.FileHandler(filename=app_logfile, mode='a')\n newline_handler.setLevel(logging.DEBUG)\n newline_handler.setFormatter(logging.Formatter(fmt='')) # Must be an empty format\n \n logger.newline_handler = newline_handler\n\n # Also add the provision for a newline handler using a custom method attribute\n logger.newline = types.MethodType(add_newlines, logger)\n\n # Also add a StreamHandler for printing to stderr\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n \n logger.addHandler(console_handler)\n\n return logger", "def setup_logger(logger_name, logfile='crawler.log'):\n _logger = logging.getLogger(logger_name)\n _logger.setLevel(logging.INFO)\n h = logging.handlers.RotatingFileHandler(filename=logfile,\n maxBytes=10e6, backupCount=1)\n f = logging.Formatter(\n '%(asctime)s %(processName)-10s %(levelname)-8s %(message)s')\n h.setFormatter(f)\n _logger.addHandler(h)\n return _logger", "def init_logger(level=logging.DEBUG, when=\"D\", backup=7,\n _format=\"%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s\",\n datefmt=\"%m-%d %H:%M:%S\"):\n formatter = logging.Formatter(_format, datefmt)\n logger = logging.getLogger()\n logger.setLevel(level)\n\n log_path = ops.join(os.getcwd(), 'logs/shadownet.log')\n _dir = os.path.dirname(log_path)\n if not os.path.isdir(_dir):\n os.makedirs(_dir)\n\n handler = handlers.TimedRotatingFileHandler(log_path, when=when, backupCount=backup)\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = handlers.TimedRotatingFileHandler(log_path + \".log.wf\", when=when, backupCount=backup)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def init_logger(level=logging.DEBUG, when=\"D\", backup=7,\r\n _format=\"%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s\",\r\n datefmt=\"%m-%d %H:%M:%S\"):\r\n formatter = logging.Formatter(_format, datefmt)\r\n logger = logging.getLogger()\r\n logger.setLevel(level)\r\n\r\n log_path = ops.join(os.getcwd(), 'logs/shadownet.log')\r\n _dir = os.path.dirname(log_path)\r\n if not os.path.isdir(_dir):\r\n os.makedirs(_dir)\r\n\r\n handler = handlers.TimedRotatingFileHandler(log_path, when=when, backupCount=backup)\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n handler = handlers.TimedRotatingFileHandler(log_path + \".log.wf\", when=when, backupCount=backup)\r\n handler.setLevel(logging.WARNING)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n handler = logging.StreamHandler()\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n return logger", "def get_logger():\n # Prepare log directory.\n try:\n os.mkdir('logs')\n except FileExistsError:\n pass\n\n # Create logger and formatter.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(message)s')\n\n # Create and attach stream handler.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Create and attach file handler.\n file_handler = logging.handlers.TimedRotatingFileHandler(\n 'logs/log.txt', when='d', encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger", "def logger(self):\n return logging", "def __init__(self):\n # Hardware initialization\n gpio.init()\n # Logging\n self._logger = logging.getLogger(' '.join([__name__, __version__]))\n self._logger.debug(\n 'Instance of %s created: %s',\n self.__class__.__name__,\n str(self)\n )", "def _init_logger(self, handler: Rfc5424SysLogHandler | SyslogHandlerTLS) -> Logger:\n syslog_logger = getLogger('SysLogLogger')\n syslog_logger.setLevel(self.logging_level)\n syslog_logger.addHandler(handler)\n return syslog_logger", "def getLogger(self):\n logger = logging.getLogger(self.name)\n logger.setLevel(self.level)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # add a rotating handler\n if not logger.handlers:\n handler = RotatingFileHandler(self.path, self.maxBytes, self.backupCount)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n # Log to stream for debugging\n streamHandler = logging.StreamHandler(sys.stdout)\n streamHandler.setFormatter(formatter)\n logger.addHandler(streamHandler)\n\n return logger", "def setup_custom_logger(name):\n formatter = logging.Formatter(fmt=FORMAT, datefmt=DATEFMT)\n\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(LEVEL)\n logger.addHandler(handler)\n\n return logger", "def initLogger(base, name):\n\n quickLogger = logging.getLogger(name)\n quickLogger.setLevel(logging.DEBUG)\n #Prevents duuplicate log entries after reinitialization. \n if(not quickLogger.handlers):\n fh = logging.FileHandler(base+'_'+name+'.log')\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n quickLogger.addHandler(fh)\n quickLogger.addHandler(ch)\n\n return quickLogger", "def __create_logger(who, level):\n global loggers\n global toconsole\n global LEVELS\n global console\n global logfile\n loggers[who] = logging.getLogger(who)\n loggers[who].setLevel(level)\n format = logging.Formatter(\"%(asctime)s - %(name)s - \"\\\n \"%(levelname)s - %(message)s\")\n if (toconsole):\n if (console == None):\n console = logging.StreamHandler()\n console.setFormatter(format)\n loggers[who].addHandler(console)\n else:\n if (logfile == None):\n logfile = logging.handlers.RotatingFileHandler('/var/log/yapc.log',\n maxBytes=10485760,\n backupCount=10)\n logfile.setFormatter(format)\n loggers[who].addHandler(logfile)\n loggers[GENERIC_LOG_NAME].log(LEVELS[\"VDBG\"],\n \"Add logger for \"+who+\" at level \"+str(level))", "def _init():\n global logger\n logger = logging.getLogger(\"Log\")", "def __init__(self):\n\n self.logger = logging.getLogger('sound-count')\n\n self.logger.setLevel(logging.DEBUG)\n\n self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n\n self.stdout_handler = logging.StreamHandler()\n self.stdout_handler.setFormatter(self.formatter)\n\n self.file_handler = logging.FileHandler(config['LOG_PATH'])\n self.file_handler.setFormatter(self.formatter)\n\n self.logger.addHandler(self.stdout_handler)\n self.logger.addHandler(self.file_handler)", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'GNN-{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def __init__(self, logger_class=None, logger_name=None):\n\t\tself._logger_class = logger_class\n\t\tself._logger_name = logger_name", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def __init__(self, name):\n self.mylog = logging.getLogger(name)\n self.handler = logging.StreamHandler()\n self.formatter = MyFormatter('%(levelname)s: %(message)s')\n self.handler.setFormatter(self.formatter)\n self.mylog.addHandler(self.handler)\n self.mylog.setLevel(logging.INFO)\n self.handler.setLevel(logging.INFO)\n self.debug_level = 0\n self.verbosity = False", "def _logger(self):\n logger = logging.getLogger(self.NAME)\n logger.setLevel(self.LOG_LEVEL)\n shandler = logging.StreamHandler(sys.stdout)\n fmt = '\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'\n fmt += '%(lineno)d %(asctime)s\\033[0m| %(message)s'\n shandler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(shandler)\n return logger", "def init_logger():\n LOG_LEVEL = logging.INFO\n LOGFORMAT = \"%(log_color)s%(levelname)-1s: %(log_color)s%(message)s\"\n logging.root.setLevel(LOG_LEVEL)\n formatter = ColoredFormatter(LOGFORMAT)\n stream = logging.StreamHandler()\n stream.setLevel(LOG_LEVEL)\n stream.setFormatter(formatter)\n log = logging.getLogger('pythonConfig')\n log.setLevel(LOG_LEVEL)\n log.addHandler(stream)\n return log", "def make_logger(name=str(os.getpid())):\n if not sys.platform.startswith(\"win\") and sys.stderr.isatty():\n def add_color_emit_ansi(fn):\n \"\"\"Add methods we need to the class.\"\"\"\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new\n log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit)\n log_file = os.path.join(gettempdir(), str(name).lower().strip() + \".log\")\n log.basicConfig(level=-1, filemode=\"w\", filename=log_file)\n log.getLogger().addHandler(log.StreamHandler(sys.stderr))\n adrs = \"/dev/log\" if sys.platform.startswith(\"lin\") else \"/var/run/syslog\"\n try:\n handler = log.handlers.SysLogHandler(address=adrs)\n except:\n log.debug(\"Unix SysLog Server not found, ignored Logging to SysLog.\")\n else:\n log.getLogger().addHandler(handler)\n log.debug(\"Logger created with Log file at: {0}.\".format(log_file))\n return log", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def initialize(context, level):\n if not Log.initialized:\n Log.logger = logging.getLogger(context)\n Log.initialized = True\n logging.basicConfig(\n filename=CONST.APP_LOG_FILENAME,\n format=CONST.APP_LOG_FORMAT,\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n Log.logger.setLevel(level)\n Log.logger.log(50, 'Logging initialised, level={}'.format(level))\n return Log.logger", "def initialize_logger(self, exp_dir):\n env = EnvSing.get_instance()\n # configure logger\n self.log_file = exp_dir + \"/pruner.log\"\n\n if not env.exists(self.log_file):\n env.dump(\"\", self.log_file)\n self.fd = env.open_file(self.log_file, flags=\"w\")\n self._log(\"Initialized Pruner Logger\")", "def init_logger(self, logger_path,\n logger_name='Experiment') -> logging.Logger:\n self.logger = logging.getLogger(logger_name)\n\n self.logger.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(logger_path) # TOD bug here\n formatter = logging.Formatter('%(asctime)s||%(message)s')\n file_handler.setFormatter(formatter)\n self.logger.addHandler(file_handler)", "def setup_logger(log_file_path =\"\"):\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'purple',\n }\n )\n logging.basicConfig(handlers=[logging.FileHandler(log_file_path, 'w', 'utf-8')],\n format=\"%(message)s\"\n )\n logger = logging.getLogger('')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(__lvl__)\n ch = logging.StreamHandler()\n ch.setLevel(__lvl__)\n preformat = f'[{logger.name}]'\n # [%(threadName)s/%(levelname)s] = [MainThread/INFO]\n ch.setFormatter(logging.Formatter(fmt=preformat + ' %(levelname)s [%(asctime)s] %(message)s',\n datefmt='%H:%M:%S'))\n logger.addHandler(ch)\n return logger", "def create_logger_service(program_id, processor_id):\n logger = logging.getLogger(__name__)\n logger = logging.LoggerAdapter(logger,\n extra={'program_id': program_id,\n 'processor_id': processor_id})\n return logger", "def __init__(self, log):\n self.log = log\n self.logger = logging.getLogger(self.__class__.__name__)", "def create_logger(\n project_name: str,\n level: str = \"INFO\",\n log_dir: str = \"/tmp/logs\",\n file_name: Optional[str] = None,\n do_print: bool = True,\n simple_logging: bool = False,\n log_to_file: bool = False,\n rich_logging: bool = False,\n time_zone: Optional[str] = None,\n):\n import __main__\n\n if file_name is None:\n try:\n file_name = ntpath.basename(__main__.__file__).split(\".\")[0]\n except:\n file_name = \"logs\"\n\n logger = logging.getLogger(file_name)\n logger.handlers.clear()\n logger.setLevel(getattr(logging, level))\n\n if time_zone:\n from pytz import timezone, utc\n def time_formatter(*args):\n # TODO: Doesnt work with rich formatter\n utc_dt = utc.localize(datetime.datetime.utcnow())\n my_tz = timezone(time_zone)\n converted = utc_dt.astimezone(my_tz)\n return converted.timetuple()\n\n logging.Formatter.converter = time_formatter\n\n if rich_logging:\n from rich.logging import RichHandler\n stream_format = f\"{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = RichHandler(omit_repeated_times=False)\n else:\n stream_format = f\"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = logging.StreamHandler()\n\n file_formatter = stream_formatter = logging.Formatter(\n stream_format, \"%Y-%m-%d %H:%M:%S\"\n )\n\n if simple_logging:\n file_formatter = logging.Formatter(\"%(message)s\")\n stream_formatter = logging.Formatter(\"%(message)s\")\n\n if log_to_file:\n date = datetime.date.today()\n date = \"%s-%s-%s\" % (date.day, date.month, date.year)\n log_file_path = os.path.join(log_dir, \"%s-%s.log\" % (file_name, date))\n\n create_folder(log_dir)\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if do_print:\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n logger.propagate = False\n\n return logger", "def get_logger(name: str, level: str = LOG_LEVEL) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(level)\n coloredlogs.install(\n level=level, logger=logger, fmt='%(asctime)s %(name)s: %(lineno)s %(levelname)s: %(message)s', field_styles=FIELD_STYLES\n )\n return logger", "def get_logger(level=None, name=None, filename=None, log_dir=None):\n if isinstance(log_dir, str):\n log_dir = Path(log_dir)\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if len(logger.handlers) == 0:\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime(\"%Y_%m_%d\")\n\n if not log_dir:\n log_dir = settings.logs_folder\n\n log_filename = log_dir / \"{}_{}.log\".format(filename, todays_date)\n\n # if the logs folder does not already exist, create it\n if not log_dir.exists():\n log_dir.makedirs_p()\n # create file handler and log formatter and set them up\n formatter = lg.Formatter(\n \"%(asctime)s [%(process)d] %(levelname)s - %(name)s - %(\" \"message)s\"\n )\n if settings.log_file:\n handler = lg.FileHandler(log_filename, encoding=\"utf-8\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n if settings.log_console:\n handler = lg.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def logger(self) -> logging.Logger:\n cls = type(self)\n return logging.getLogger(cls.__module__ + \".\" + cls.__name__)", "def get_logger(args):\n logger_kind = 'tensorboard' if 'logger' not in args.__dict__ else args.logger\n if logger_kind == 'tensorboard':\n logger = pl.loggers.tensorboard.TensorBoardLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.dataset,\n )\n\n elif logger_kind == 'wandb':\n logger = pl.loggers.WandbLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.backbone,\n )\n\n else:\n raise Exception(f'Error. Logger \"{lokker_kind}\" is not supported.')\n return logger", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def init_tensorboard_logger(self, **kwargs):\n self._tensorboard_logger = SummaryWriter(logdir=self.tensorboard_path, **kwargs)\n self.log.info(\"Tensorboard Logger initialized in: \" + str(self.tensorboard_path))\n return self._tensorboard_logger", "def setup_logger(name, log_file, level=logging.INFO):\n if name in ( \"\", None ):\n raise \"No name\"\n return\n\n if log_file in ( \"\", None ):\n raise \"No log_file\"\n return\n\n formatter = logging.Formatter(\n fmt = '%(asctime)s.%(msecs)03d %(levelname)s File: \"%(pathname)s\", line %(lineno)d, in %(module)s - %(funcName)s: %(message)s',\n datefmt= '%Y-%m-%d %H:%M:%S'\n )\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def init_logger(name, path=None):\n import logging.handlers\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = 0\n _nf = ['[%(asctime)s]',\n '[%(name)s]',\n '[%(filename)20s:%(funcName)15s:%(lineno)5d]',\n '[%(levelname)s]',\n ' %(message)s']\n _cf = ['$GREEN[%(asctime)s]$RESET',\n '[%(name)s]',\n '$BLUE[%(filename)20s:%(funcName)15s:%(lineno)5d]$RESET',\n '[%(levelname)s]',\n ' $CYAN%(message)s$RESET']\n nformatter = logging.Formatter('-'.join(_nf))\n cformatter = ColoredFormatter('-'.join(_cf))\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(cformatter)\n\n if path:\n path += '/' + name + '.log'\n else:\n path = get_path('log') + '/' + name + '.log'\n rf = logging.handlers.RotatingFileHandler(path, maxBytes=5 * 1024 * 1024, backupCount=5)\n rf.setLevel(logging.DEBUG)\n rf.setFormatter(nformatter)\n\n logger.addHandler(ch)\n logger.addHandler(rf)\n return logger", "def get_logger(name, log_dir, config_dir):\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def create_logger(log_dir):\n logger = logging.getLogger(__file__)\n logger.setLevel(logging.INFO)\n\n # file logger\n log_filename = \"probabilist_connectogram_%s.log\" % time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n if log_dir:\n log_path = os.path.join(log_dir, log_filename)\n else:\n log_path = log_filename\n file_handler = logging.FileHandler(log_path)\n formatter = logging.Formatter('%(asctime)s :: %(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n # console logger\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n logger.info(\"Log path: %s\" % log_path)\n\n return logger", "def setup_logger(name):\n #Get PC host name\n hostname = socket.gethostname()\n\n #Log variables\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(name)\n\n #Create a file handler\n handler = logging.FileHandler('\\\\\\\\fs01\\\\share\\\\IT\\\\Shane\\\\log\\\\ProdFloorTool.log')\n handler.setLevel(logging.INFO)\n\n #Create a logging format\n formatter = logging.Formatter(hostname + ' - %(asctime)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n #Add the handlers to the logger\n logger.addHandler(handler)\n\n return logger", "def clone(self):\n # TODO: ensure that io is thread save\n return Logger(prepend=str(self._prepend), verbosity=self._verbosity, io=self._io, pretty=self._pretty)", "def get_logger(self, name=\"amulet-logger\", level=logging.DEBUG):\n log = logging\n logger = log.getLogger(name)\n fmt = log.Formatter(\"%(asctime)s %(funcName)s \"\n \"%(levelname)s: %(message)s\")\n\n handler = log.StreamHandler(stream=sys.stdout)\n handler.setLevel(level)\n handler.setFormatter(fmt)\n\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def setup(log_level, log_name):\n\n # Log format string for flake8 compliance\n log_fmt = ('%(levelname)-8s %(asctime)s%(filename)s:%(lineno)-4s '\n '%(message)s')\n\n # Configure logging\n config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': log_fmt,\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n },\n 'loggers': {\n 'createtransfers': {\n 'level': log_level,\n 'handlers': ['console'],\n },\n },\n }\n\n logger = logging.getLogger(log_name)\n logging.config.dictConfig(config)\n return logger", "def set_up_logger(name,\n logfilename,\n log_file_level='NOTICE',\n log_stderr_level='NOTICE',\n logo=False):\n logger = logbook.Logger(name)\n if logo:\n fmt_str = '{record.message:^120}'\n logger.handlers.append(logbook.StderrHandler(level='WARNING',\n format_string=fmt_str))\n logofile = os.path.join(os.path.dirname(__file__), 'logo.txt')\n with open(logofile, 'r') as f:\n for line in f:\n logger.warn(line.strip('\\n'))\n logger.handlers = []\n\n fmt_str = ('[{record.time:%Y-%m-%d %H:%M:%S}][{record.level_name:*^11}] :'\n ' {record.message:~^45}'\n ' line {record.lineno:<3} in '\n '{record.module:<}.{record.func_name:<} ')\n\n logfilename = os.path.join(os.getenv('PANLOG', '/var/huntsman/logs'), logfilename)\n logger.handlers.append(TRFH(logfilename,\n level=log_file_level,\n mode='a+',\n date_format='%Y-%m-%d',\n bubble=True,\n backup_count=100,\n format_string=fmt_str))\n\n logger.handlers.append(StdH(level=log_stderr_level,\n bubble=True,\n format_string=fmt_str))\n return logger", "def __init__(self, file_path: str = \"..\\\\Resource\\\\log.txt\", name: str = 'TangoServerLogger'):\r\n self.logger = logging.getLogger(name)\r\n self.logger.setLevel(logging.DEBUG)\r\n\r\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n\r\n logfile = logging.FileHandler(file_path)\r\n logfile.setLevel(logging.DEBUG)\r\n logfile.setFormatter(formatter)\r\n\r\n self.logger.addHandler(logfile)" ]
[ "0.68505394", "0.6639985", "0.66274846", "0.65573317", "0.65444726", "0.65337306", "0.6491986", "0.648222", "0.64741373", "0.64380926", "0.64128065", "0.6381014", "0.6374408", "0.6352451", "0.6310848", "0.630257", "0.62926865", "0.6291359", "0.6255781", "0.62482107", "0.62347984", "0.62326074", "0.6221126", "0.6218974", "0.6210656", "0.6209099", "0.6197003", "0.61830235", "0.6164583", "0.6162523", "0.6151774", "0.61168545", "0.6112994", "0.6112776", "0.6083973", "0.60790586", "0.60775983", "0.6073959", "0.60666364", "0.60563976", "0.6047497", "0.6042815", "0.6029889", "0.6029099", "0.60216063", "0.60155004", "0.599305", "0.59911406", "0.5981234", "0.59787863", "0.59639776", "0.59628487", "0.5959046", "0.5956651", "0.5953479", "0.5933138", "0.59301907", "0.5930062", "0.59282196", "0.59204984", "0.5913111", "0.5894758", "0.58873487", "0.5886831", "0.588659", "0.5873493", "0.586381", "0.58574516", "0.58548063", "0.5850064", "0.58478713", "0.58469874", "0.58453876", "0.5837596", "0.5831682", "0.58236593", "0.5822605", "0.5802668", "0.5794079", "0.5792225", "0.57809144", "0.57776237", "0.5774225", "0.5773499", "0.57726246", "0.57699984", "0.5767189", "0.57651347", "0.5763014", "0.57575", "0.5754701", "0.5752725", "0.57508713", "0.57496583", "0.5747662", "0.57469475", "0.5738433", "0.5737683", "0.5734757", "0.57326823", "0.5732485" ]
0.0
-1
Set Log category name to be used.
def set_category(self, category_name): try: module_name = get_module_name_from_log_category(category_name) log_yang_module = importlib.import_module('gi.repository.' + module_name) if not log_yang_module: logger.error("Module %s is not found to be added as log category for %s", module_name, category_name) print("Module %s is not found to be added as log category for %s", module_name, category_name) return for level in RwLogger.level_event_cls_map.values(): if not hasattr(log_yang_module, level): logger.error("Module %s does not have required log notification for %s", module_name, level) print("Module %s does not have required log notification for %s", module_name, level) return self._log_yang_module = log_yang_module self._log_category_name = category_name except Exception as e: logger.exception("Caught error %s when trying to set log category (%s)",repr(e), category_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rider_category_name(self, rider_category_name):\n\n self._rider_category_name = rider_category_name", "def category(self, category: str):\n\n self._category = category", "def set_scribe_category(category):\r\n LogOptions._SCRIBE_CATEGORY = category", "def set_category(self, category):\n\n\t\tif category is not None and not isinstance(category, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: category EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__category = category\n\t\tself.__key_modified['category'] = 1", "def category_name(self):\r\n return conf.lib.clang_getDiagnosticCategoryName(self.category_number)", "def category_name(self):\n return self.category.name", "def set_file_name(self):\n name = 'LogImage'\n name_log_date = time.strftime(\"%Y%m%d\")\n self.name_log = name + name_log_date + '.log'", "def set_category(self, frontmatter):\n gcates = self._global_categories\n cate_name = ''\n segments = self.path.split(os.path.sep)\n if len(segments) > 2:\n cate_name = segments[1].lower()\n else:\n cate_name = 'uncategorized'\n if cate_name not in gcates:\n gcates[cate_name] = Category(name=cate_name, config=self._config)\n this_cate = gcates[cate_name]\n this_cate.notes.append(self)\n this_cate.count += 1\n self.category = this_cate\n\n # for key in frontmatter:\n # if key.strip().lower().startswith('cate'):\n # # public\n # self.category = frontmatter[key]\n # return\n # self.category = 'general'", "def category_names(self, category_names):\n\n self._category_names = category_names", "def _change_category(cls, category):\n time_now = cls.__stop_category()\n with GlobalProvenance() as db:\n cls._category_id = db.insert_category(category, cls._machine_on)\n cls._category = category\n cls._category_time = time_now", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def rename(self, name):\n self._name = name\n self._logger = logging.getLogger(name)\n self._logger.setLevel(self._level)", "def category_name(self):\n try:\n category = self.proto.category.parent\n return f'{category.name} - {self.proto.category.name}'\n except AttributeError:\n return self.proto.category.name", "def get_category_name(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def category(self, category):\n allowed_values = [\"CALLBACK\", \"CALL_RESTRICTION\", \"CALL_RULE\", \"CAMPAIGN\", \"CAMPAIGN_RULE\", \"CONTACT\", \"CONTACT_LIST_FILTER\", \"DNC_LIST\", \"ENTITY_LIMIT\", \"IMPORT_ERROR\", \"MESSAGING_CAMPAIGN\", \"ORGANIZATION_CONFIGURATION\", \"SCHEDULE\"]\n if category.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for category -> \" + category)\n self._category = \"outdated_sdk_version\"\n else:\n self._category = category", "def category(self, category):\n allowed_values = [\"Trace\", \"Verbose\", \"Info\", \"Wait\", \"Highlight\", \"Gap\", \"Alert\", \"Warning\", \"Error\", \"Fatal\", \"Planned\", \"Updated\", \"Finished\", \"Abandoned\"] # noqa: E501\n if category not in allowed_values:\n raise ValueError(\n \"Invalid value for `category` ({0}), must be one of {1}\" # noqa: E501\n .format(category, allowed_values)\n )\n\n self._category = category", "def name(self) -> str:\n return str(self.category.value)", "def local_category(self, local_category: str):\n\n self._local_category = local_category", "def scribe_category():\r\n if LogOptions._SCRIBE_CATEGORY is None:\r\n LogOptions._SCRIBE_CATEGORY = app.get_options().twitter_common_log_scribe_category\r\n return LogOptions._SCRIBE_CATEGORY", "def category(self) -> str:\n return pulumi.get(self, \"category\")", "def get_name(self):\n return self.category_name", "def setLogFileName(self, _strLogFileName):\n self.edLogging.setLogFileName(_strLogFileName)", "def get_module_name_from_log_category(log_category):\n words = log_category.split('-')\n words.append('yang')\n return ''.join(word.capitalize() for word in words)", "def __init__(self, category):\n self.category = category\n self.name = \"Filters.document.category('{}')\".format(self.category)", "def rename_cats(self, **mapping):\n if self.is_categorised:\n self.cats = self.cats.rename(columns=mapping)\n else:\n raise NotCategorisedError", "def category(self, category: Category):\n\n self._category = category", "def category_axis(self, category_axis):\n\n self.container['category_axis'] = category_axis", "def category(self) -> str:\n return self._category", "def category(self) -> str:\n return self._category", "def log_group_name(self):\n return self._get_param(CW_LOGS_CFN_PARAM_NAME)", "def category(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def log_group_name(self) -> str:\n ...", "def set_label(self, value: str = \"nowhere\"):\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"label\"))\r\n self._name = value", "def __str__(self):\n return self.category_name", "def byCategory(self, name):\n\t\timport revitron\n\t\ttry:\n\t\t\tself.collector = self.collector.OfCategory(\n\t\t\t revitron.BuiltInCategory(name).get()\n\t\t\t)\n\t\texcept:\n\t\t\tpass\n\t\treturn self", "def category(self) -> str:\n return self._search_in_properties(ATTR_CATEGORY)", "def set_name(self, name):\n self.name = name\n self.labels.change_name(name)", "async def set_category(self, category: str, data: dict) -> None:\n await super(MemoryKVCache, self).set_category(category, data)\n\n if self.in_transaction:\n self.dirty_categories.add(category)", "def category_id(self, category_id):\n\n self._category_id = category_id", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def category(self, category: str):\n if category is None:\n raise ValueError(\"Invalid value for `category`, must not be `None`\") # noqa: E501\n\n self._category = category", "def config_log_category_cb(data, option):\n change_log_level(G.CONFIG.debug_category, logging.ERROR)\n G.CONFIG.debug_category = G.CONFIG.network.debug_category\n change_log_level(\n G.CONFIG.network.debug_category, G.CONFIG.network.debug_level\n )\n return 1", "def change_log_level(category, level):\n\n if category == \"encryption\":\n category = \"crypto\"\n\n if category == \"all\":\n nio_logger.setLevel(level)\n else:\n nio_logger.getChild(category).setLevel(level)", "def _load_name_with_category(self, names, name, category, snake_case=True):\n if snake_case:\n name = xform_name(name)\n\n if name in names:\n logger.debug(f'Renaming {self.name} {category} {name}')\n self._renamed[(category, name)] = name + '_' + category\n name += '_' + category\n\n if name in names:\n # This isn't good, let's raise instead of trying to keep\n # renaming this value.\n raise ValueError(\n 'Problem renaming {} {} to {}!'.format(\n self.name, category, name\n )\n )\n\n names.add(name)", "def set_name(self, name):\n self.class_name = name", "def get_category(self) -> str:\n return self.category", "def reset_category(self, category, metric_slugs):\n key = self._category_key(category)\n if len(metric_slugs) == 0:\n # If there are no metrics, just remove the category\n self.delete_category(category)\n else:\n # Save all the slugs in the category, and save the category name\n self.r.sadd(key, *metric_slugs)\n self.r.sadd(self._categories_key, category)", "def categories_level(self, categories_level):\n\n self._categories_level = categories_level", "def category_title(self):\n categories = {c[0]:c[1] for c in self.CATEGORY_CHOICES}\n if self.category in categories:\n return categories[self.category]", "def set_name(self,name):\r\n self._name = __name", "def category(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"category\")", "def category(self) -> Optional[str]:\n return pulumi.get(self, \"category\")", "def set_index_name(self, name, axis=0):\n self.get_axis(axis).name = name", "def log_name(self) -> Optional[str]:\n return self._log_name", "def __set_name__(self, cls, name):\n pass", "def gaming_category(self):\n \n self.table.loc[self.table[\"category\"].str.contains(\"GAME\"), \"category\"] = \"GAMING\"", "def setName(self, name):\n self.name = str(name)", "def set_name(self, name):\n\t\tself.name_ = name", "def set_name(self, newname=\"\"):\n self.name = newname", "def set_name(self, name):\n self.settings[\"name\"] = name", "def add_category(self, category):\n if category not in self.categories and category.strip() != \"\":\n self.categories.append(category.strip())", "def local_category(self) -> str:\n return self._local_category", "def setName(self, *args):\n return _libsbml.Group_setName(self, *args)", "def create_category(self): # , conf_dir, title):\n category_file_path = self.event_dir / 'category.json'\n category_data = {\n 'title': self.title,\n }\n category_data_text = json.dumps(category_data, **\n JSON_FORMAT_KWARGS) + '\\n'\n save_file(category_file_path, category_data_text)\n logger.debug('File {} created', category_file_path)", "def set_name(self, _name):\n self.name = _name", "def increment_cat(self, category):\r\n self.category_count.setdefault(category, 0)\r\n self.category_count[category] += 1", "def __str__(self):\n \n return \"Category ID: %s %s\" % (self.category_id, self.name)", "def format_category_name(category):\n\n category_words = category.name.rstrip().replace(',', '').replace(\"'\", '').split(\" \")\n return \"-\".join(category_words)", "def set_name(self, name):\r\n self.__name = name", "def categories(self, categories):\n self._categories = categories", "def SetCaseName(self, case_name):\n if case_name:\n self._index_name = case_name.lower()\n else:\n self._index_name = uuid.uuid4().hex", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_cname(self, cname):\n self.__cname = cname", "def cluster_name(self, cluster_name):\n\n self._cluster_name = cluster_name", "def facility_name(self, facility_name):\n\n self._facility_name = facility_name", "def set_thread_name(self, thread_name: str):\n self.thread_name = thread_name", "def add_category(self, name, user_id):\r\n category_id, message = self._db_manager.add_category(name, user_id)\r\n flash(message)\r\n return category_id", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def GetCategory(self) -> str:\n return self._category", "def setName(self, name): \n\n self._name = name", "def initialize_category(self):\n if self.category not in self.categories.keys():\n self.docCount[self.category] = 0\n self.wordCount[self.category] = 0\n self.wordFrequencyCount[self.category] = {}\n self.categories[self.category] = True", "def setname(self, name):\n self.__name = name", "def create_category(self, name):\n logger.info('CategoryOfProduct category create initiated')\n newname = name\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n categories = self.Category.search([('name', '=', newname), ('parent', '=', 'Ingredients')])\n parent = self.Category.search(['name', '=', 'Ingredients'])\n if categories:\n return False\n category = self.Category()\n if parent:\n category.parent = parent[-1]\n category.name = newname\n category.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def set_group_name(self, name):\n self.groupname = name", "async def log_channel(self, ctx, channel: discord.TextChannel = None):\n if not channel:\n channel = ctx.channel\n await self.config.logChannel.set(channel.id)\n await ctx.send(f\"Set {channel.mention} as the log channel.\")", "def category(self):\r\n return lambda cls : self.__named(cls, CategoryContext)" ]
[ "0.71033996", "0.656124", "0.6451363", "0.64011025", "0.63785875", "0.6319959", "0.6303118", "0.6279708", "0.6278118", "0.61796457", "0.608429", "0.608429", "0.608429", "0.608429", "0.608429", "0.60362625", "0.6006342", "0.59686434", "0.5840249", "0.57967573", "0.57849914", "0.5779494", "0.5769242", "0.57586586", "0.57558995", "0.57198656", "0.56977445", "0.5682481", "0.56563765", "0.56287086", "0.561542", "0.5587003", "0.5587003", "0.55740976", "0.5556669", "0.5545823", "0.5513134", "0.5493322", "0.54842603", "0.54472846", "0.54234093", "0.5412769", "0.54069716", "0.54018086", "0.54018086", "0.538428", "0.5381492", "0.5373735", "0.53695464", "0.536118", "0.5356725", "0.5278704", "0.52765954", "0.5269553", "0.52610725", "0.5259812", "0.52543265", "0.52377456", "0.5236784", "0.52357376", "0.52331746", "0.5228698", "0.522281", "0.5208827", "0.5201656", "0.5199516", "0.51964873", "0.5186871", "0.51829255", "0.51689464", "0.5168873", "0.5166332", "0.5161005", "0.5160784", "0.5153124", "0.5152339", "0.51493347", "0.51493347", "0.51476246", "0.51443547", "0.514065", "0.5133739", "0.512212", "0.5121688", "0.5121688", "0.5121688", "0.5121688", "0.5121688", "0.5118835", "0.51123226", "0.51103836", "0.5102934", "0.5102865", "0.5102372", "0.5102372", "0.5101003", "0.5101003", "0.509927", "0.50889874", "0.5088318" ]
0.7935372
0
Tests whether ``TextInputStyle`` instance names are all strings.
def test__TextInputStyle__name(): for instance in TextInputStyle.INSTANCES.values(): vampytest.assert_instance(instance.name, str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isStringStyle(self, style):\n return style in [QsciLexerJava.DoubleQuotedString,\n QsciLexerJava.SingleQuotedString,\n QsciLexerJava.UnclosedString,\n QsciLexerJava.VerbatimString]", "def isStringStyle(self, style):\n return style in [QsciLexerCSS.DoubleQuotedString,\n QsciLexerCSS.SingleQuotedString]", "def IsString(self, pos):\n style = self.GetStyleAt(pos)\n return self.FindTagById(style) in ('string_style', 'char_style')", "def test_check_only_one_fontName(self):\n fonts = []\n result = False\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if not isinstance(font, list):\n result = True\n else:\n result = False\n break\n #here we are checking if all objects in fonts list are str, the result have to be True\n self.assertTrue(result)", "def is_text(self):\n return self.value_type in (str, unicode)", "def test__TextInputStyle__value():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.value, TextInputStyle.VALUE_TYPE)", "def validate_names(name):\n return isinstance(name, str) and not re.search(r'[\\s]', name)", "def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def isTextWidget(self, w: Wrapper) -> bool:\n if Qsci:\n return isinstance(w, (Qsci.QsciScintilla, QtWidgets.QTextEdit))\n return isinstance(w, QtWidgets.QTextEdit)", "def is_text( self ):\n return self.get_main_type() == 'text'", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def test_check_opentype_stylename(self):\n stylename_mapping = {\n 'Regular': ['Thin', 'Light', 'Extra Light', 'Regular',\n 'Medium', 'SemiBold', 'Extra Bold', 'Black'],\n 'Italic': ['Thin Italic', 'Extra Light Italic', 'Italic',\n 'Medium Italic', 'SemiBold Italic', 'Extra Bold Italic',\n 'Black Italic'],\n 'Bold': ['Bold'],\n 'Bold Italic': ['Bold Italic']\n }\n\n font = Font.get_ttfont(self.operator.path)\n self.assertIn(font.stylename, stylename_mapping)\n self.assertIn(font.ot_style_name, stylename_mapping[font.stylename])", "def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def _is_str(item):\n return isinstance(item, str)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def has_input_names(self) -> bool:\n return self.inputs and self.inputs[0].name is not None", "def _is_name_type(self, type_id):\n return type_id == self.name_type", "def test_check_normal_style_matches_names(self):\n contents = self.read_metadata_contents()\n family = Metadata.get_family_metadata(contents)\n\n for font_metadata in family.fonts:\n if font_metadata.style != 'normal':\n continue\n\n font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)\n\n if bool(font.macStyle & 0b10):\n self.fail(('Metadata style has been set to normal'\n ' but font second bit (italic) in macStyle has'\n ' been set'))\n\n style = font.familyname.split('-')[-1]\n if style.endswith('Italic'):\n self.fail(('macStyle second bit is not set but postScriptName \"%s\"'\n ' is ended with \"Italic\"') % font.familyname)\n\n style = font.fullname.split('-')[-1]\n if style.endswith('Italic'):\n self.fail(('macStyle second bit is not set but fullName \"%s\"'\n ' is ended with \"Italic\"') % font.fullname)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def does_signature_contain_str(signature=None):\n\n # if we did not receive a signature we assume the model could require\n # a string in it's input\n if signature is None:\n return True\n\n return any(v.dtype == dtypes.string.as_datatype_enum\n for v in signature.inputs.values())", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def is_string(value):\n return isinstance(value, basestring)", "def is_string(value):\n return isinstance(value, string_types)", "def is_string(obj):\n return isinstance(obj, str)", "def w_is_typed(tokens):\n return (\n 'type' in tokens or\n 'answerblock' in tokens or\n 'drawbox' in tokens or\n 'answerfigure' in tokens\n )", "def _is_valid_target_str(self, target):\n if isinstance(target, str):\n return True\n else:\n return False", "def is_string(value):\n return isinstance(value, (str, bytes))", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def HasTextColour(self):\r\n \r\n return self._colText != wx.NullColour", "def is_string(obj):\n return isinstance(obj, basestring)", "def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)", "def applyStringTypes(self):\n ok = False\n try:\n for ii, atName in enumerate(self.getAttributeList()):\n _, isMandatory = self.__getAttributeInfo(atName)\n dataType = \"string\"\n for row in self.data:\n if row[ii] is None or row[ii] in [\".\", \"?\"]:\n row[ii] = \".\" if isMandatory else \"?\"\n else:\n row[ii] = self.__castD[dataType](row[ii])\n #\n self.__attributeTypeD[atName] = dataType\n ok = True\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n if self._raiseExceptions:\n raise e\n return ok", "def is_action_str(string: str) -> bool:", "def check_options(self, options):\n return not any(not isinstance(element, str) for element in options)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def is_string_type(self):\n raise exceptions.NotImplementedError()", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def is_const_input(self, name: str) -> bool:\n return self.const_inputs is not None and name in self.const_inputs", "def is_str(value):\n return isinstance(value, str)", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def is_name_mostly_numeric(self) -> bool:\n app_no_punc = self.app_name_no_punc()\n\n try:\n int(app_no_punc)\n return True\n except ValueError:\n pass\n\n alphabetic_chars = 0\n for char in app_no_punc:\n if not char.isnumeric():\n alphabetic_chars += 1\n\n return alphabetic_chars / len(app_no_punc) < .75", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def Check_is_valid(self, String):\r\n\r\n if self.Special_Names.__contains__(String):\r\n return False\r\n elif self.Special_Names_no_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_one_Operands.__contains__(String):\r\n return False\r\n elif self.Special_Names_two_Operands.__contains__(String):\r\n return False\r\n elif self.Data_types.__contains__(String):\r\n return False\r\n elif self.Registers.__contains__(String):\r\n return False\r\n elif self.Irvine32_functions.__contains__(String):\r\n return False\r\n elif String.__contains__('\"'):\r\n return False\r\n elif String.__contains__('\\''):\r\n return False\r\n elif String.__contains__('.'):\r\n return False\r\n elif String[0].isdecimal():\r\n return False\r\n if len(self.Data_variables) > 0:\r\n if self.Data_variables.__contains__(String):\r\n return False\r\n if len(self.Functions_names) > 0:\r\n if self.Functions_names.__contains__(String):\r\n return False\r\n if len(self.Labels_names) > 0:\r\n if self.Labels_names.__contains__(String):\r\n return False\r\n return True", "def _valid_typable_object_with_name(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return (ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys() and\n _valid_object_with_name(ui_object))\n else:\n assert False, 'Wrong Platform'", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def check_inputs(self, inputs):\n if self.debug:\n print(\"Checking inputs\")\n result = True\n for _input in inputs:\n if \"word_\" in _input and inputs[_input] == \"\":\n result = False\n elif \"idiom_\" in _input and inputs[_input] == \"\":\n if \"list\" not in _input:\n result = False\n return result", "def isValid(text):\n return bool(re.search(r'\\blight|lights\\b', text, re.IGNORECASE))", "def exclude_from_prefixing(self, inp):\n # Automatically return False if this is not of type \"str\"\n if type(inp) is not str:\n return False\n # Only return True if the string matches the name of a common material\n return True if inp in self.shared_materials or inp in self.shared_textures else False", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]", "def is_string(document):\r\n return isinstance(document, str)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def is_selector_str(cls, s):\n\n # assert type(s) is str\n assert isinstance(s, basestring)\n try:\n cls.parse(s)\n except:\n return False\n else:\n return True", "def validate_style(style):\n valid = {}\n for k, v in style.items():\n if (v.startswith('#') and all([d in hexdigits for d in v[1:]])):\n valid[k] = v\n return valid", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()", "def is_text(value, allow_empty=True):\n if isinstance(value, _types.StringTypes):\n return allow_empty or value != _const.EMPTY_STR\n return False", "def test_valid_text_str(self):\n f = lws.valid_text\n assert f('string', r'[a-z]*') is True\n assert f('string', r'string') is True\n assert f('string', r'[0-9]*') is False\n assert f('', r'.*') is False\n assert f('abcde', lambda x: 'e' in x) is True\n assert f('abcde', lambda x: 'f' in x) is False", "def str_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):\n name = type(var).__name__\n raise StringError(\n 'Function {} expected str, {} got instead.'.format(func, name))", "def has_input(self, name: str) -> bool:\n return self.get_input_type(name) != IN_INVALID", "def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)", "def validate_typeID(self, type_ID):\n if type(type_ID) == str:\n for letter in type_ID:\n if not letter.isalpha() and not letter.isdigit():\n return False\n return True\n return False", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def test_check_canonical_styles(self):\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n for font_metadata in fm.fonts:\n self.assertIn(font_metadata.style, self.CANONICAL_STYLE_VALUES)\n if self.is_italic(font_metadata):\n if font_metadata.style != 'italic':\n _ = \"%s: The font style is %s but it should be italic\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))\n else:\n if font_metadata.style != 'normal':\n _ = \"%s: The font style is %s but it should be normal\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))", "def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok", "def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True", "def ISNONTEXT(value):\n return not ISTEXT(value)", "def check_dog_name(dog):\n if not isinstance(dog.name, str):\n raise NotStringError(\"Dog name entered is not a string\")", "def isvalid(self):\n validName = not StringExtension.is_none_or_white_space(self._name)\n validValue = not StringExtension.is_none_or_white_space(self._value)\n if validName and validValue:\n return True\n return False", "def check_series(s: pd.Series) -> bool:\n\n error_string = (\n \"The input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroSeries) for more information.\"\n )\n\n if not isinstance(s.iloc[0], str) or s.index.nlevels != 1:\n raise TypeError(error_string)", "def typeValidator(self, food_type):\n if type(food_type) != str:\n API.abort(400, error_messages[16]['int_type'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_ ]+$)\", food_type) or food_type.isspace():\n API.abort(\n 400, error_messages[17]['wrong_format_ty'])\n\n return True", "def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def _is_valid_varname(self, name):\n if name in RESERVED or re.match(r'^str[0-9]+$', name): return False\n return True if VALID_NAME_RE.match(name) else False", "def is_valid_license_type(self):\n clean = self.license_type.lower().replace('-', ' ')\n return clean not in INVALID_LICENSE_TYPE", "def stringable(self):\n return True", "def _isstrvar(self, index):\n return self._typlist[index] <= 32768", "def is_cstring_type(self, objtype):\n return issubclass(objtype, self.CString) or issubclass(objtype, self.CWString)", "def _is_same_color(p1: str, p2: str):\n return p1.islower() == p2.islower()", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def validate_model(self, model):\n if type(model) == str:\n for letter in model:\n if not letter.isalpha() and not letter.isdigit():\n return False\n return True\n return False", "def validate_type_keyword(self):\n valid = False\n if self.annot_types[0].upper() == \"TYPE\":\n valid = True\n if self.annot_types[0] != \"TYPE\":\n msg = f'File keyword \"TYPE\" provided as {self.annot_types[0]}'\n self.store_validation_issue(\"warn\", msg, \"format:cap:type\")\n else:\n msg = \"Malformed TYPE row, missing TYPE. (Case Sensitive)\"\n self.store_validation_issue(\"error\", msg, \"format:cap:type\")\n return valid", "def _is_input_or_output_type(io: type, type_str: Literal[\"Input\", \"Output\", \"Meta\"]):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False", "def is_string_action(func: CLIActionType) -> bool:\n return check_function_type(func, [HammerDriver, Callable[[str], None]], Optional[str]) is None", "def isValid(text):\n return bool(re.search(r'\\blight\\b', text, re.IGNORECASE))", "def discard_name(self) -> bool:\n\n if not self.is_name_length_valid():\n return True\n\n if self.app_name_no_punc().lower() in self.discard:\n return True\n\n if self.is_substring_unwanted():\n return True\n\n if self.unwanted_regex_match():\n return True\n\n return self.is_name_mostly_numeric()", "def _is_text_tag(tag):\n return tag.name not in ['script', 'style']", "def check_statement(self, statement):\n return isinstance(statement, str)", "def is_supported_type(self) -> bool:\n t = self.type.strip()\n return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS", "def is_input(self):\n # https://html.spec.whatwg.org/multipage/forms.html#category-submit\n if self.style['appearance'] == 'auto' and self.element is not None:\n if self.element.tag in ('button', 'input', 'select', 'textarea'):\n return not isinstance(self, (LineBox, TextBox))\n return False", "def _match_entry_type_string(code_entry, string_list):\n entry_type = re.match(r\"<(AST.*):.*\", code_entry.get('type')).group(1)\n return bool(entry_type in string_list)", "def isText(self):\n return _libsbml.XMLToken_isText(self)" ]
[ "0.7289942", "0.72890097", "0.6223571", "0.6214723", "0.6137764", "0.5913751", "0.584948", "0.5819439", "0.5805448", "0.5728817", "0.5721202", "0.5700193", "0.5695489", "0.5691739", "0.5658013", "0.56440264", "0.56350374", "0.5633974", "0.55882585", "0.5547582", "0.5522903", "0.55197644", "0.5512341", "0.54763573", "0.5475964", "0.546512", "0.54597527", "0.5455931", "0.5454803", "0.54442304", "0.5388804", "0.5367636", "0.53668344", "0.5358757", "0.5352915", "0.53505826", "0.5340415", "0.53192616", "0.53128874", "0.52975994", "0.5280678", "0.52715397", "0.5269931", "0.5259622", "0.525163", "0.52511436", "0.5249828", "0.52414984", "0.5235946", "0.5210532", "0.52048415", "0.51939744", "0.51931757", "0.5188026", "0.51870626", "0.517522", "0.51627207", "0.51619834", "0.5161258", "0.5160525", "0.5159299", "0.51526576", "0.51510805", "0.51412606", "0.5130997", "0.51254934", "0.5125131", "0.5120145", "0.5075289", "0.5074756", "0.50676787", "0.5063489", "0.506176", "0.505853", "0.505123", "0.50506103", "0.5050541", "0.50499856", "0.50463265", "0.5043189", "0.50384986", "0.50367534", "0.503615", "0.5033287", "0.5032888", "0.50259763", "0.5024286", "0.5023556", "0.50155926", "0.5012068", "0.50011677", "0.50007576", "0.49848422", "0.4982455", "0.49760532", "0.4968837", "0.49613488", "0.4956058", "0.49536884", "0.49482372" ]
0.68636096
2
Tests whether ``TextInputStyle`` instance values are all the expected value type.
def test__TextInputStyle__value(): for instance in TextInputStyle.INSTANCES.values(): vampytest.assert_instance(instance.value, TextInputStyle.VALUE_TYPE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__TextInputStyle__name():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)", "def _check_value_type(self, value):\n if value is not None and self.value_type is not None:\n valid = isinstance(value, self.value_type)\n if not valid:\n return False\n return True", "def _check_helper(self, value, raise_exceptions=True) -> bool:\n if not isinstance(value, self.value_type):\n if raise_exceptions:\n raise InvalidParameterException(\n '%s: invalid type given: %s (required %s)' % (\n self.name, type(value),\n ', '.join([str(x) for x in self.value_type])\n )\n )\n return False\n\n return True", "def is_text(self):\n return self.value_type in (str, unicode)", "def checkType(self, value):\n pass", "def _check_value(self):\n value = str(self._value_field.toPlainText())\n if value=='': return True\n ACCEPTABLES_CHARS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n '.', ',', ';', ' ', '\\n', '-')\n\n for char in value:\n if not char in ACCEPTABLES_CHARS:\n return False\n if Variable.is_acceptable_arg(value):\n rows, columns = np.matrix(value).shape\n return 1 <= rows <= 4 and 1 <= columns <= 4\n else:\n return False", "def isStringStyle(self, style):\n return style in [QsciLexerJava.DoubleQuotedString,\n QsciLexerJava.SingleQuotedString,\n QsciLexerJava.UnclosedString,\n QsciLexerJava.VerbatimString]", "def isStringStyle(self, style):\n return style in [QsciLexerCSS.DoubleQuotedString,\n QsciLexerCSS.SingleQuotedString]", "def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def _validate_value_type(value: Any, expected: Sequence[Type]) -> bool:\n\n for entry in expected:\n if get_origin(entry) is None:\n if type(value) == entry: # pylint: disable=unidiomatic-typecheck\n return True\n continue\n if _validate_value_type(value, get_args(entry)):\n return True\n return False", "def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True", "def validate(self,value):\r\n return type(value) is self.datatype", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, str) and self._validate_length(value))\n else:\n return False", "def accepts(cls, value: Any) -> bool:\n try:\n cls.convert(value)\n return True\n except ValueError:\n return False", "def validGameSettings(self):\n if not isinstance(self.view, GView):\n return False\n if not isinstance(self.input, GInput):\n return False\n validStates = [STATE_INACTIVE, STATE_NEWWAVE, STATE_ACTIVE,\n STATE_PAUSED, STATE_CONTINUE, STATE_COMPLETE]\n if not self.getState() in validStates:\n return False\n if not self.getWave() is None or isinstance(self.getWave(), Wave):\n return False\n if not self.getText() is None or isinstance(self.getText(), GLabel):\n return False\n return True", "def is_input(self):\n # https://html.spec.whatwg.org/multipage/forms.html#category-submit\n if self.style['appearance'] == 'auto' and self.element is not None:\n if self.element.tag in ('button', 'input', 'select', 'textarea'):\n return not isinstance(self, (LineBox, TextBox))\n return False", "def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)", "def is_valid(self, value) -> 'True|str':\n if self.base_type is not None and not isinstance(value, self.base_type):\n return f'Value {value} is not type of {self.base_type}.'\n return True", "def _check_dtype(self):\n\n # assert valid dtype\n if self.dtype not in PRIMITIVE_TYPES:\n raise ValueError(\"Type '{}' is invalid. Following types are \"\n \"allowed: {}\"\n .format(self.dtype, PRIMITIVE_TYPES.keys()))\n\n # assert valid dtypes for values\n allowed_types = PRIMITIVE_TYPES[self.dtype]\n\n for value in self.values:\n if not isinstance(value, allowed_types):\n raise TypeError(\"Column '{}' has invalud value '{}' with \"\n \"invalid type '{}'. Allowed types are: {}.\"\n .format(self.name,\n value,\n type(value),\n allowed_types))", "def test_incompatible_option_type(key, value):\n wrong_types = {int, str, list, bool} - {type(value)}\n for wrong_type in wrong_types:\n test_value = wrong_type()\n with pytest.raises(InputError):\n _check_input_config({key: test_value})", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def CheckType(self, *args, **kwargs):\n pass", "def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True", "def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes", "def clean_values(cls, cleaned_input, attribute):\n values_input = cleaned_input.get(cls.ATTRIBUTE_VALUES_FIELD)\n attribute_input_type = cleaned_input.get(\"input_type\") or attribute.input_type\n\n if values_input is None:\n return\n\n if (\n values_input\n and attribute_input_type not in AttributeInputType.TYPES_WITH_CHOICES\n ):\n raise ValidationError(\n {\n cls.ATTRIBUTE_VALUES_FIELD: ValidationError(\n \"Values cannot be used with \"\n f\"input type {attribute_input_type}.\",\n code=AttributeErrorCode.INVALID.value,\n )\n }\n )\n\n is_swatch_attr = attribute_input_type == AttributeInputType.SWATCH\n for value_data in values_input:\n cls._validate_value(attribute, value_data, is_swatch_attr)\n\n cls.check_values_are_unique(values_input, attribute)", "def validate(self):\n self._check_type()", "def validate_value(self, value: valueType) -> bool:\n if value is None:\n raise Exception\n return True", "def test_not_blank_validator_valid_value_should_return_true(self):\n for item in self.stdtype_fixtures:\n self.assertTrue(NotBlankValidator(TypeHint(item.get('type')), item.get('valid')))", "def validate_style(style):\n valid = {}\n for k, v in style.items():\n if (v.startswith('#') and all([d in hexdigits for d in v[1:]])):\n valid[k] = v\n return valid", "def _check_type(self):\n check_type = DESCRIPTOR_VALUE.get(self.descriptor.type)\n if check_type is None:\n raise ValueError(\"Unknown metric descriptor type\")\n for ts in self.time_series:\n if not ts.check_points_type(check_type):\n raise ValueError(\"Invalid point value type\")", "def validate_type_annotations(self):\n valid = False\n invalid_types = []\n # skipping the TYPE keyword, iterate through the types\n # collecting invalid type annotations in list annots\n for t in self.annot_types[1:]:\n if t.lower() not in (\"group\", \"numeric\"):\n # if the value is a blank space, store a higher visibility\n # string for error reporting\n if \"Unnamed\" in t:\n invalid_types.append(\"<empty value>\")\n # Duplicated metadata header name causes type annotation issue.\n # Side effect of Pandas adding a suffix to uniquefy the header.\n # These invalid annotations should not be included in invalid\n # type annotation count. This exception may cause miscount of\n # type annot errors if user-supplied annotation has period.\n elif \".\" in t:\n pass\n else:\n invalid_types.append(t)\n if invalid_types:\n msg = 'TYPE row annotations should be \"group\" or \"numeric\"'\n self.store_validation_issue(\n \"error\",\n msg,\n \"format:cap:group-or-numeric\",\n associated_info=invalid_types,\n )\n else:\n valid = True\n return valid", "def _check_inputvalues(self):\n # Check x, y and z are int or float dtypes\n # ie do not contain any unusable values like strings\n if not (self.x.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n if not (self.y.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")\n\n # Performs checks on z if not empty\n if self.z is not None:\n for v in self.z.values():\n if not (v.dtype in [np.int, np.float]):\n raise TypeError(\"Route input 'x' must be either int or float dtypes\")", "def IsValidInputType(self, list_of_matches):\n for entry in list_of_matches:\n if not entry:\n return False\n\n return True", "def _can_cast_to(self, value, cast_type):\n try:\n _ = cast_type(value)\n return True\n except ValueError:\n return False", "def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, int) and self._validate_value(value))\n else:\n return False", "def test_check_canonical_styles(self):\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n for font_metadata in fm.fonts:\n self.assertIn(font_metadata.style, self.CANONICAL_STYLE_VALUES)\n if self.is_italic(font_metadata):\n if font_metadata.style != 'italic':\n _ = \"%s: The font style is %s but it should be italic\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))\n else:\n if font_metadata.style != 'normal':\n _ = \"%s: The font style is %s but it should be normal\"\n self.fail(_ % (font_metadata.filename, font_metadata.style))", "def check_if_input_is_int(self):\n try:\n int(self.input)\n except ValueError:\n return False\n else:\n return True", "def IsBlank(self):\n\n if self.value is None:\n return True\n elif type(self.value) == int or type(self.value) == long:\n return DCGM_INT64_IS_BLANK(self.value)\n elif type(self.value) == float:\n return DCGM_FP64_IS_BLANK(self.value)\n elif type(self.value) == str:\n return DCGM_STR_IS_BLANK(self.value)\n else:\n raise Exception(\"Unknown type: %s\") % str(type(self.value))", "def isTextWidget(self, w: Wrapper) -> bool:\n if Qsci:\n return isinstance(w, (Qsci.QsciScintilla, QtWidgets.QTextEdit))\n return isinstance(w, QtWidgets.QTextEdit)", "def _values_of_same_type(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`),\"\n \" therefore I cannot judge whether the two are of same type.\"\n % str(type(val1)),\n str(type(val2)),\n )\n\n return type(val1) is type(val2)", "def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False", "def validate_value_flag(self):\n if not self.app.args.value is None or self.app.args.value == '':\n return True\n else:\n return False", "def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )", "def is_valid_value(self, value: Any) -> bool:\n return self.type_registry.is_valid_nested(value)", "def w_is_typed(tokens):\n return (\n 'type' in tokens or\n 'answerblock' in tokens or\n 'drawbox' in tokens or\n 'answerfigure' in tokens\n )", "def is_valid(value: str) -> bool:\n if value is None:\n return not is_required\n return value in get_all_class_attr_values(constant_cls)", "def __check_args_type(self):\n if not isinstance(self.__min_range, (float, int)):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif not isinstance(self.__max_range, (float, int)):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)\n\n if isinstance(self.__min_range, bool):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif isinstance(self.__max_range, bool):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)", "def has_value(cls, value):\n return bool(isinstance(value, numbers.Number) or isinstance(value, time) or \\\n isinstance(value, datetime) or value)", "def _is_valid(self, value):\n\n # Entities have an istypeof method that can perform more sophisticated\n # type checking.\n if hasattr(self._type, \"istypeof\"):\n return self._type.istypeof(value)\n else:\n return isinstance(value, self._type)", "def check_validity(self):\n try:\n if self.type == ConstraintTypes.EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.NOT_EQUAL:\n enforce(\n isinstance(self.value, (int, float, str, bool)),\n f\"Expected one of type in (int, float, str, bool), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.LESS_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.GREATER_THAN_EQ:\n enforce(\n isinstance(self.value, (int, float, str)),\n f\"Expected one of type in (int, float, str), got {self.value}\",\n )\n elif self.type == ConstraintTypes.WITHIN:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], type(self.value[1])), \"Invalid types.\"\n )\n enforce(\n isinstance(self.value[1], type(self.value[0])), \"Invalid types.\"\n )\n elif self.type == ConstraintTypes.IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.NOT_IN:\n enforce(\n isinstance(self.value, (list, tuple, set)),\n f\"Expected one of type in (list, tuple, set), got {self.value}\",\n )\n if len(self.value) > 0:\n _type = type(next(iter(self.value)))\n enforce(\n all(isinstance(obj, _type) for obj in self.value),\n \"Invalid types.\",\n )\n elif self.type == ConstraintTypes.DISTANCE:\n enforce(\n isinstance(self.value, (list, tuple)),\n f\"Expected one of type in (list, tuple), got {self.value}\",\n )\n enforce(\n len(self.value) == 2, f\"Expected length=2, got {len(self.value)}\"\n )\n enforce(\n isinstance(self.value[0], Location),\n \"Invalid type, expected Location.\",\n )\n enforce(\n isinstance(self.value[1], float), \"Invalid type, expected Location.\"\n )\n else: # pragma: nocover\n raise ValueError(\"Type not recognized.\")\n except ValueError:\n return False # pragma: nocover\n\n return True", "def validate(self):\n return (self.check_input_digits_count()\n and self.check_if_input_is_int()\n and self.check_if_input_digits_are_unique())", "def input_validator(self, a_dict):\n for k, v in a_dict.items():\n if not isinstance(v, int):\n raise TypeError(\"{} must be an integer\".format(k))\n elif k == \"width\" and v < 1:\n raise ValueError(\"{} must be > 0\".format(k))\n elif k is \"height\" and v < 1:\n raise ValueError(\"{} must be > 0\".format(k))\n elif k is \"x\" and v < 0:\n raise ValueError(\"{} must be >= 0\".format(k))\n elif k is \"y\" and v < 0:\n raise ValueError(\"{} must be >= 0\".format(k))", "def has_acceptable_type(self, value):\n if not value:\n return False\n if super().has_acceptable_type(value):\n return True\n # Hmmm ok maybe we're running under IPython:\n try:\n import IPython\n return isinstance(value, IPython.kernel.zmq.iostream.OutStream)\n except ImportError:\n return False", "def test_check_only_one_fontName(self):\n fonts = []\n result = False\n for css_class in self.pisa_doc.css[0].values():\n for font in css_class.values():\n fonts.append(font)\n for font in fonts:\n if not isinstance(font, list):\n result = True\n else:\n result = False\n break\n #here we are checking if all objects in fonts list are str, the result have to be True\n self.assertTrue(result)", "def validate(cls, tab_dict, raise_error=True):\r\n return key_checker(['type'])(tab_dict, raise_error)", "def _check_validdtypeinput(self, symbol):\n if symbol.type == self.scanner.KEYWORD and \\\n symbol.id in self.validdtypeinputs:\n return True\n else:\n return False", "def are_all_datetimes(values: List[Union[str, int, float]]):\n for value in values:\n if not is_datetime(value):\n return False\n return True", "def check_input_type(var, type_name):\n\n type_options = [\"int\", \"float\", \"Date\", \"Region\"]\n if type_name == type_options[0]:\n if int(var):\n return True\n else:\n return False\n elif type_name == type_options[1]:\n if float(var):\n return True\n else:\n return False\n elif type_name == type_options[2]:\n if datetime.date.fromisoformat(var):\n return True\n else:\n return False\n elif type_name == type_options[3]:\n valid_regions = [\"NW\", \"SW\", \"MN\", \"MS\", \"NE\", \"SE\"]\n is_valid = False\n for region in valid_regions:\n if var == region:\n is_valid = True\n return is_valid\n else:\n Exception(\"This type doesn't exist in the checker!\")", "def applies(cls, obj):\n return type(obj) in cls.types", "def validate_shape_and_dtype(self):\n if self.rgb.dtype != tf.float32:\n raise ValueError(\"Expected float32 rgb!\")\n if len(self.rgb.shape) != 4:\n raise ValueError(f\"Expected (B, H, W, C), got {self.rgb.shape}\")\n _, _, _, channels = self.rgb.shape.as_list()\n if channels != 3:\n raise ValueError(f\"Expected 3 rgb channels, got shape {self.rgb.shape}\")", "def styles_formatting(styles):\n for index, style in enumerate(styles):\n try:\n parse_style(style)\n except EmptyLineError:\n continue\n except NotAValidCssClassError:\n raise Invalid(\"Style %i does not have a valid CSS class: %s\" % (index + 1, style))\n except:\n raise Invalid(\"Style %i is not correctly formatted: %s\" % (index + 1, style))\n return True", "def _typecheck(name, value, *types):\n if not types:\n raise ValueError('expected one or more types, maybe use _textcheck?')\n if not isinstance(value, types):\n raise TypeError(\"expected %s for %s, got %r\"\n % (\" or \".join([t.__name__ for t in types]),\n name, value))\n return value", "def test_badsizevaluebool(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(True, 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_is(self):\n invalid = self.TDTT()\n self.check_invalid_is(invalid)\n\n valid = self.TDTT(when=self.txt_when)\n self.check_valid_is(valid)", "def _valid_typable_object(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys()\n else:\n assert False, 'Wrong Platform'", "def is_int(self,):\n validator = self.__class__.get_setting_validator(self.key, **self.get_kwargs())\n\n return self.__class__.validator_is_int(validator)", "def test_check_type_1():\r\n hl = hotlist.HotList()\r\n hl._validate_value(1)\r\n hl._validate_value(1L)\r\n hl._validate_value(1.5)\r\n hl._validate_value(\"abc\")\r\n hl._validate_value(u\"abc\")\r\n hl._validate_value((1, 2, 3,))\r\n hl._validate_value((1, \"AAA\", 3,))\r\n hl._validate_value((1, (\"AAA\", 2, 3,) , 3,))\r\n hl._validate_value((1, frozenset([\"AAA\", 2, 3,]) , 3,))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value([ 1, 2, 3,])\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(( 1, 2, [ 3, 4, 5,],))\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value({})\r\n\r\n with pytest.raises(TypeError):\r\n hl._validate_value(hotlist.HotList())", "def validatePredefinedType(self, type: int) -> bool:\n ...", "def verify_configuration_types(config):\n if not isinstance(config[\"count\"], int):\n return False\n return True", "def test_match_types(self):\n f = lws.match_types\n # assert f(str, u'test') is True\n assert f(str, 'test') is True\n assert f(int, 123) is True\n assert f(int, 123.00) is False\n assert f(bool, [1, 2, 3]) is False", "def available(self):\n return self.value_type in self._values", "def _check_styles(self, dataset):\n # list of column ids to exclude from plotting\n exclude_cols = [\"sample_name\", \"patient_id\", \"title\"]\n\n # check column styles\n if dataset['styles']['columns']['color'] == []:\n if dataset['metadata']['columns'] != '':\n # load metadata\n mdat = load_data(dataset['metadata']['columns'])\n\n # exclude known uninformative columns\n cols_to_drop = [x for x in exclude_cols if x in mdat.columns]\n\n if len(cols_to_drop) > 0:\n mdat = mdat.drop(cols_to_drop, axis=1)\n\n # set default columns to use for plotting\n dataset['styles']['columns']['color'] = mdat.columns[mdat.nunique() > 1].tolist()\n\n # check row styles\n if dataset['styles']['rows']['color'] == []:\n if dataset['metadata']['rows'] != '':\n mdat = load_data(dataset['metadata']['rows'])\n dataset['styles']['rows']['color'] = mdat.columns[mdat.nunique() > 1].tolist()", "def check(self, value: Any) -> None:\n if not isinstance(value, self.oktype):\n raise TypeError(value)", "def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"", "def check_kwargs(cls, kwargs, data_type):\n err_str = data_type.__name__ + ': %s keyword argument not allowed or \"%s\" value invalid'\n for k, v in kwargs.items():\n # kwargs allowed for all data_types\n if k in cls.allowed_kwargs:\n if not cls.allowed_kwargs[k](v): # run kwarg validator\n raise TypeError(err_str % (k, v))\n # type specific kwargs\n elif data_type in cls.extra_kwargs and k in cls.extra_kwargs[data_type]:\n if not cls.extra_kwargs[data_type][k](v): # run kwarg validator\n raise TypeError(err_str % (k, v))\n else:\n raise TypeError(err_str % (k, v))", "def validate(self, test_data):\n if type(test_data) != bool:\n raise ValidationError('Invalid type/value.', 'bool',\n type(test_data))", "def isInputValid(self, input):\r\n pass", "def data_type_properties_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if values is not None:\n if presentation._get_primitive_ancestor(context) is not None:\n context.validation.report(\n u'data type \"{0}\" defines properties even though it has a primitive ancestor'\n .format(presentation._fullname),\n locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_TYPES)", "def check_validity_input_formats(input_formats):\n from invenio.search_engine import get_available_output_formats\n valid_formats = get_available_output_formats()\n\n # let's to extract the values of the available formats\n format_values = []\n for aformat in valid_formats:\n format_values.append(aformat['value'])\n\n invalid_format = ''\n for aformat in input_formats:\n if aformat.lower() not in format_values:\n invalid_format = aformat.lower()\n break\n return invalid_format", "def assert_style_data_correct(self) -> bool:\n style_chars = Path(os.environ[\"DATA_PATH\"]) / \"character_styles\"\n style_frags = Path(os.environ[\"DATA_PATH\"]) / \"fragment_styles\"\n if style_chars.exists() and style_frags.exists():\n return True\n return False", "def check_type(self):\n return True", "def check_type(content):\n return (isinstance(content, Elem) or type(content) == Text or\n (type(content) == list and all([type(elem) == Text or\n isinstance(elem, Elem)\n for elem in content])))", "def validateInputType(self, inputType):\n raise NotImplementedError()", "def is_valid(self, value):\r\n pass", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def _check_data_type(self, key: str, value: Any):\n allowedDataType = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"allowedDataType\", None)\n if allowedDataType is not None and not isinstance(value, allowedDataType):\n raise Exception(\n f\"Value '{value}' is not of the correct type. The allowed data type is: {allowedDataType.__name__}\"\n )", "def test_type_of_attrs(self):\n self.assertEqual(type(self.review.place_id), str)\n self.assertEqual(type(self.review.user_id), str)\n self.assertEqual(type(self.review.text), str)", "def are_all_numbers(values: List[Union[str, int, float]]):\n for value in values:\n if not is_number(value):\n return False\n return True", "def test_type_validation(self):\r\n with self.assertRaises(ValidationError):\r\n TestListModel.create(int_list=['string', True], text_list=[1, 3.0])", "def validate_input_values(self, source, **kwargs):\n return self._validate_values(\"input_values\", source, **kwargs)", "def _check_kwargs(self):\n valid_kw = {\n 'hf_type': 'str',\n 'hierarchy': 'bool',\n 'smooth': 'bool',\n 'water_level': 'float',\n # Object modifier kw\n 'no_shadow': 'bool',\n 'no_image': 'bool',\n 'no_reflection': 'bool',\n 'inverse': 'bool',\n 'double_illuminate': 'bool',\n 'hollow': 'bool'\n }\n\n self._validate_kwargs(valid_kw)\n\n valid_types = [\n 'gif', 'tga', 'pot', 'png', 'pgm',\n 'ppm', 'jpeg', 'tiff', 'sys', 'function'\n ]\n self._checkKwargValue('hf_type', valid_types)", "def test_check_normal_style_matches_names(self):\n contents = self.read_metadata_contents()\n family = Metadata.get_family_metadata(contents)\n\n for font_metadata in family.fonts:\n if font_metadata.style != 'normal':\n continue\n\n font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)\n\n if bool(font.macStyle & 0b10):\n self.fail(('Metadata style has been set to normal'\n ' but font second bit (italic) in macStyle has'\n ' been set'))\n\n style = font.familyname.split('-')[-1]\n if style.endswith('Italic'):\n self.fail(('macStyle second bit is not set but postScriptName \"%s\"'\n ' is ended with \"Italic\"') % font.familyname)\n\n style = font.fullname.split('-')[-1]\n if style.endswith('Italic'):\n self.fail(('macStyle second bit is not set but fullName \"%s\"'\n ' is ended with \"Italic\"') % font.fullname)", "def test_Input_String(self):\n height = StringVar(self.root, 2)\n width = StringVar(self.root, 'two')\n mines = StringVar(self.root, 3)\n with self.assertRaises(Exception) as context:\n self.menu.createGameWindow('Custom', height, width, mines)\n self.assertTrue('Invalid data type' in str(context.exception))", "def _check_annotations(value):\n if isinstance(value, dict):\n for k, v in value.items():\n _check_annotations(v)\n elif isinstance(value, list):\n for element in value:\n _check_annotations(element)\n elif isinstance(value, numpy.ndarray):\n if value.dtype not in (numpy.integer, numpy.floating, numpy.complex) \\\n and value.dtype.type != numpy.string_:\n raise ValueError(\"Invalid annotation. NumPy arrays with dtype %s are not allowed\" % value.dtype)\n elif not isinstance(value, ALLOWED_ANNOTATION_TYPES):\n raise ValueError(\"Invalid annotation. Annotations of type %s are not allowed\" % type(value))", "def _check_value(self,val):\n if self.allow_None and val is None:\n return\n\n if not _is_number(val):\n raise ValueError(\"Parameter '%s' only takes numeric values\"%(self._attrib_name))\n \n self._checkBounds(val)", "def test_non_required_validation(self):\r\n Text().validate('')\r\n Text().validate(None)", "def validate_values(self):\n if self.avp_def.has_defined_values():\n defined_values = dict(self.avp_def.attr_defined_values)\n if self.avp_value not in defined_values.values():\n raise ValueError(\n f\"{self.avp_def.attr_name} - value {self.avp_value} \"\n \"is not allowed\")\n\n return True", "def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))" ]
[ "0.61332947", "0.59603673", "0.5806856", "0.5770944", "0.57419115", "0.5711008", "0.5696463", "0.567288", "0.5661682", "0.5579446", "0.55630475", "0.5551327", "0.55341244", "0.5487145", "0.54515827", "0.5438341", "0.54083705", "0.54033923", "0.5351363", "0.5323914", "0.53112197", "0.5295399", "0.52837473", "0.52836823", "0.5283626", "0.52661467", "0.5265321", "0.5258055", "0.5244037", "0.5240988", "0.5238474", "0.5237914", "0.52242005", "0.5198061", "0.516486", "0.5162071", "0.51578933", "0.515695", "0.51535934", "0.51515996", "0.51474977", "0.5132104", "0.5131047", "0.51113284", "0.51098627", "0.5096178", "0.5091279", "0.5090316", "0.5088026", "0.5080884", "0.5075851", "0.50745827", "0.5065595", "0.506027", "0.5049609", "0.5043837", "0.50430465", "0.5037015", "0.5023747", "0.50225055", "0.50213045", "0.50200856", "0.50087976", "0.49924687", "0.49916828", "0.49882895", "0.49877807", "0.49778473", "0.49776718", "0.4976627", "0.49735695", "0.4972986", "0.49664512", "0.49516708", "0.49460635", "0.49454105", "0.49441576", "0.4935516", "0.49264142", "0.49258515", "0.49157563", "0.49102947", "0.49100938", "0.49032825", "0.48951945", "0.48919898", "0.48918563", "0.48880225", "0.48878497", "0.4884309", "0.4881816", "0.48810124", "0.48754752", "0.4873511", "0.48675048", "0.48661533", "0.48659703", "0.48615465", "0.48613164", "0.48599458" ]
0.741814
0
No real implementation necessary. Only for heapq.
def __lt__(self, other): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heapify(x):\n pass", "def test_pop_decreases_size(sample_priorityq):\n for i in range(5):\n sample_priorityq.insert([i, i + 3])\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 4\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 3\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 2", "def heappop(heap):\n pass", "def test_insert_increases_size(sample_priorityq):\n assert len(sample_priorityq.heap_list) == 0\n sample_priorityq.insert([5, 1])\n assert len(sample_priorityq.heap_list) == 1\n sample_priorityq.insert([6, 2])\n assert len(sample_priorityq.heap_list) == 2", "def example_seven():\n a = []\n heapq.heappush(a, 5)\n heapq.heappush(a, 3)\n heapq.heappush(a, 7)\n heapq.heappush(a, 4)\n\n print(heapq.heappop(a), heapq.heappop(a), heapq.heappop(a), heapq.heappop(a))", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)", "def example_eight():\n a = []\n heapq.heappush(a, 5)\n heapq.heappush(a, 3)\n heapq.heappush(a, 7)\n heapq.heappush(a, 4)\n\n assert a[0] == heapq.nsmallest(1, a)[0] == 3\n\n print('Before:', a)\n a.sort()\n print('After: ', a)", "def test_native(self):\n import heapq\n X = []\n heapq.heappush(X, Item('A', 5))\n heapq.heappush(X, Item('B', 5))\n heapq.heappush(X, Item('C', 5))\n heapq.heappush(X, Item('D', 5))\n heapq.heappush(X, Item('E', 5))\n heapq.heappush(X, Item('F', 5))\n item_ordered = []\n while X:\n item_ordered.append(heapq.heappop(X).val)\n self.assertEqual(['A', 'C', 'F', 'E', 'B', 'D'], item_ordered)\n\n X = []\n heapq.heappush(X, TimeSpecifiedItem('A', 5))\n heapq.heappush(X, TimeSpecifiedItem('B', 5))\n heapq.heappush(X, TimeSpecifiedItem('C', 5))\n heapq.heappush(X, TimeSpecifiedItem('D', 5))\n heapq.heappush(X, TimeSpecifiedItem('E', 5))\n heapq.heappush(X, TimeSpecifiedItem('F', 5))\n ordered = []\n while X:\n ordered.append(heapq.heappop(X).val)\n self.assertEqual(['A', 'B', 'C', 'D', 'E', 'F'], ordered)", "def __init__(self):\n\n self.container2 = []\n heapq.heapify(self.container2)", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def __init__(self):\n self.heap1 = []\n self.heap2 = []\n self.size = 0", "def manage_heap(heap, coordinates, distance):\n\tif distance > SUN_DISTANCE:\n\t\tif len(heap) < k:\n\t\t\theap.append((distance, coordinates))\n\t\t\tif len(heap) == k:\n\t\t\t\theapq._heapify_max(heap)\n\t\telif distance < heap[0][0]:\n\t\t\theapq._heappushpop_max(heap, (distance, coordinates))", "def testOneSize(self):\n hd = HeapDict(size=1)\n hd.push('a', 2)\n hd.push('a', 1)\n hd.push('b', 3)\n hd.push('b', 4)\n self.assertEqual(hd.get_result(), {'a': [2], 'b': [4]})", "def build_heap(arr):\n for i in range(len(arr)-1, -1, -1):\n down_heapify(arr, len(arr), i)", "def _heapify_after_remove(self,ele):\r\n \r\n if self._chk_left(ele):\r\n left = self._left(ele)\r\n find_small_child = left\r\n # below to find which child has small integer\r\n if self._chk_right(ele):\r\n right = self._right(ele)\r\n if self._data[left] > self._data[right]:\r\n find_small_child = right\r\n \r\n if self._data[find_small_child] < self._data[ele]:\r\n self.swap(ele, find_small_child)\r\n self._heapify_after_remove(find_small_child)", "def __init__(self):\n self.heap = []", "def __init__(self):\n self.heap = []", "def __init__(self):\n self.heap = []", "def test_priority_que_pop_and_push(priority_queue_full):\n priority_queue_full.pop()\n priority_queue_full.insert(11, 1)\n assert priority_queue_full._heap[0].priority == 1\n priority_queue_full.pop()\n priority_queue_full.pop()\n priority_queue_full.insert(10, 1)\n assert priority_queue_full.peek() == 10", "def __init__(self):\n self.lower_q = [] # max heap\n self.higher_q = [] # min heap", "def testSorting(self):\n hd = HeapDict(size=2)\n hd.push('a', 1)\n hd.push('a', 2)\n hd.push('b', 3)\n hd.push('b', 2)\n self.assertEqual(hd.get_result(), {'a': [2, 1], 'b': [3, 2]})", "def main():\n heap = MinHeap()\n for i in range(10):\n heap.add(i)\n print(heap.peek())\n for i in range(4):\n heap.poll()\n print(heap.peek())", "def build_heap(data):\n size = len(data)\n for i in range(size//2, -1,-1):\n shiftDown(data, i)", "def __init__(self):\n self.stream_data_left = []\n heapq.heapify(self.stream_data_left)\n self.stream_data_right = []\n heapq.heapify(self.stream_data_right)", "def testArbitraryItems(self):\n hd = HeapDict(size=2)\n item1 = self.PriorityItem(1.0, [None, 'Arbitrary item'])\n item2 = self.PriorityItem(2.0, {'Another item'})\n item3 = self.PriorityItem(3.0, (1, 'Third item'))\n item4 = self.PriorityItem(4.0, 0)\n hd.push(1, item1)\n hd.push(1, item3)\n hd.push(1, item2)\n hd.push(1, item4)\n self.assertEqual(hd.get_result(), {1: [item4, item3]})", "def insert(self, k): \r\n self.heap_array.append(k)\r\n\r\n current_index = len(self.heap_array) - 1\r\n while (current_index > 0):\r\n parent_index = ((current_index-1)//2)\r\n\r\n if int(self.heap_array[current_index]) > int(self.heap_array[parent_index]): # if no vialation of the min heap property \r\n return\r\n else: # if heap property is broken then swap the parent and child that are breaking the prop \r\n self.heap_array[parent_index], self.heap_array[current_index] = self.heap_array[current_index], self.heap_array[parent_index]\r\n current_index = parent_index", "def __len__(self):\n\t\treturn len(self.heap)", "def __init__(self):\n self.heap = [None]", "def __init__(self):\n self.heapList = [0]\n self.currentSize = 0", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def heapify(self, l):\n if not l:\n return\n self.h = [None]\n for i in xrange(0, len(l)):\n self.push(l[i])", "def heapify(self):\n heapify(self._heap)", "def __init__(self):\n self.max_heap = [] # to contain left smaller half, or + 1\n self.min_heap = [] # to contain right bigger half", "def heappush(heap, item):\n pass", "def __init__(self, x):\n self.elements = x\n self._heapify()", "def priority_queue_test(a):\r\n pq = Priority_Queue()\r\n dummy = []\r\n if pq.is_empty() == True:\r\n print('pq is empty.')\r\n \r\n array_to_pq(pq, a)\r\n print('Converting a into a pq...')\r\n \r\n if pq.is_empty() == False:\r\n print('a has been transferred into pq!')\r\n \r\n print('\\nRemoving pq...')\r\n while pq.is_empty() == False:\r\n temp = pq.remove()\r\n print(temp)\r\n dummy.append(temp)\r\n \r\n print('\\pq is empty. Inserting values back into queue...')\r\n while dummy != []:\r\n temp = dummy.pop()\r\n print(temp)\r\n pq.insert(temp)\r\n \r\n print('\\nPushing complete! Peeking...')\r\n print(pq.peek())\r\n \r\n print('\\npq is {} objects long!'.format(len(pq)))\r\n\r\n return", "def testMaxSize(self):\n hd = HeapDict(size=2)\n hd.push('a', 1)\n hd.push('a', 2)\n hd.push('a', 3)\n hd.push('b', 3)\n hd.push('b', 2)\n hd.push('b', 1)\n # The order is always descending.\n self.assertEqual(hd.get_result(), {'a': [3, 2], 'b': [3, 2]})", "def test_priority_que_success_priority(priority_queue):\n priority_queue.insert(10)\n priority_queue.insert(5)\n priority_queue.insert(100, 1)\n priority_queue.insert(10, 1)\n assert priority_queue._heap[0].value == 100", "def build_heap(arr):\n for i in range((len(arr)//2), -1, -1):\n heapify(arr,index=i, size=len(arr)-1)", "def heap_contents(self):\r\n return self.items[1:self.size+1]", "def heapify(self):\r\n if self._size:\r\n start = self._parent(len(self._data)-1) # who'se the last parent?\r\n for index in range(start, -1, -1): # for all parents\r\n self.down_heap(index) # fix your heap\r", "def __init__(self):\n # max_heap stores smaller half\n # min_heap stores larger half\n self.heaps = ([], [])", "def test_priority_que_success_priority_multiple(priority_queue):\n priority_queue.insert(20)\n priority_queue.insert(5)\n priority_queue.insert(100, 5)\n priority_queue.insert(10, 2)\n priority_queue.insert(50, 1)\n assert priority_queue._heap[0].value == 50", "def _heapify_after_add(self,ele):\r\n parent = self._parent(ele)\r\n if ele > 0 and self._data[ele] < self._data[parent]:\r\n self.swap(ele, parent)\r\n self._heapify_after_add(parent)", "def testRepeatedGetResult(self):\n hd = HeapDict(size=2)\n hd.push(1, (1, 10))\n hd.push(1, (1, 20))\n self.assertEqual(hd.get_result(), {1: [(1, 20), (1, 10)]})\n self.assertEqual(hd.get_result(), {1: [(1, 20), (1, 10)]})", "def test_priority_que_success_multiple(priority_queue_full):\n # import pdb; pdb.set_trace()\n assert (priority_queue_full._heap[0].value,\n priority_queue_full._heap[-1].value) == (11, 3)", "def build_heap(self, alist):\r\n if len(alist) > self.capacity:\r\n return False\r\n else:\r\n i = len(alist) // 2\r\n self.size = len(alist)\r\n self.items = [0] + alist[:] + [None]*(self.capacity+1-len(alist))\r\n while (i > 0):\r\n self.perc_down(i)\r\n i = i - 1\r\n return True", "def testIntegerKeys(self):\n hd = HeapDict(size=1)\n hd.push(1, 2)\n self.assertEqual(hd.get_result(), {1: [2]})", "def __delete(self, index):\n heap = self.heap\n if index < len(heap) - 1:\n heap[index] = heap[len(heap) - 1]\n heap.pop()\n else:\n heap.pop()\n return\n\n value = heap[index][self.VALUE]\n while True:\n left_child_index = 2 * index + 1 # L child of k is 2k+1\n right_child_index = 2 * index + 2 # R child of k is 2k+2\n child_index = None\n if right_child_index < len(heap):\n right_child_value = heap[right_child_index][self.VALUE]\n left_child_value = heap[left_child_index][self.VALUE]\n child_index = right_child_index if (right_child_value < left_child_value) else left_child_index\n if child_index is None and left_child_index < len(heap):\n child_index = left_child_index\n\n if child_index is None or value <= heap[child_index][self.VALUE]:\n return\n else:\n self.__swap(child_index, index)\n index = child_index", "def testZeroSize(self):\n hd = HeapDict(size=0)\n hd.push('a', 1)\n hd.push('b', 1)\n self.assertEqual(hd.get_result(), {'a': [], 'b': []})", "def __init__(self):\n # max heap\n self.small = []\n # min heap\n self.large = []", "def __init__(self, heap=[]):\n\n # logger_cagada.debug(\"pero si el orig heap %s\" % heap)\n heapq.heapify(heap)\n # logger_cagada.debug(\"a cihnga el heap %s\" % heap)\n self.heap = heap\n self.entry_finder = dict({i[-1]: i for i in heap})\n # logger_cagada.debug(\"el finder es %s\" % self.entry_finder)\n self.REMOVED = sys.maxsize", "def test_priority_que_success_min_no_priority(priority_queue):\n priority_queue.insert(10)\n priority_queue.insert(5)\n priority_queue.insert(100)\n assert priority_queue._heap[0].value == 10", "def extract_min(self):\r\n if self.is_empty():\r\n return None\r\n min_elem = self.heap_array[0]\r\n aux_elem = self.heap_array.pop()\r\n\r\n if self.is_empty() == False:\r\n self.heap_array[0] = aux_elem\r\n\r\n current_index = 0\r\n left_child_index = (2 * current_index) + 1\r\n current_value = self.heap_array[current_index]\r\n\r\n while left_child_index < len(self.heap_array): # loop that will repeat until no violation of the minheap properties exist\r\n current_min = current_value\r\n\r\n for i in range(2): # this loop is in place so that both children are compared and the smaller of the two is chosen \r\n if (left_child_index + i) > len(self.heap_array)-1: # condition to avoid out of bounds\r\n continue\r\n else:\r\n if int(self.heap_array[left_child_index + i]) < int(current_min): # if child is smaller than parent\r\n current_min = self.heap_array[left_child_index + i ] # set current minimum value\r\n current_min_index = left_child_index + i # and cureent minimim index( index where current minimum value is found )\r\n if current_min == current_value: # if no property is broken (in this case, the parent is actually less than its' children)\r\n break\r\n else: # if propert is broken\r\n self.heap_array[current_index], self.heap_array[current_min_index] = self.heap_array[current_min_index], self.heap_array[current_index] # swap the elements \r\n current_index = current_min_index\r\n left_child_index = int((2 * current_index) + 1)\r\n return min_elem", "def delete(pq):\n\tif not pq.empty():\n\t\tn = len(pq.heap.items)\n\t\tindex = r.randint(1, n-1)\n\t\tdeleted = pq.delete(index)\n\t\tlogging.info(\"delete %s, got %s\", index, deleted)", "def test_insert(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n\n h.insert(7)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(10)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(5)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')", "def __len__(self):\n return len(self.__heap)", "def heapdown(h, k):\n\n #put this value in the correct place\n v = h[k]\n\n while 2 * k < len(h):\n\n #assign j to be the left child\n j = 2 * k\n\n #is there a child to the right\n if j + 1 < len(h):\n\n #is the left child smaller than the right child\n if h[j] < h[j+1]:\n j = j + 1\n\n #if v is greater than its larger child\n if v >= h[j]:\n break\n else:\n h[k] = h[j]\n k = j\n\n h[k] = v", "def __init__(self, value = None):\n if value == None:\n self.ar = []\n else:\n self.ar = list(value)\n self.n = (len(self.ar))\n\n start = self.n//2 - 1\n for i in range(start, -1, -1):\n self.heapify(i)", "def heap_sort(list):\n pass", "def buildHeap(A):\n n = len(A)\n for i in range(n/2-1, -1, -1):\n heapify(A, i, n)", "def __init__(self):\n # min heap for right part, max heap for left part\n self.minHeap_right = []\n self.maxHeap_left = []\n self.tot_num = 0", "def __init__(self, iterable=None):\n self.heap = []\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def flotar(heap, indice):\n padre = (indice-1)//2\n while(indice > 0) and (heap.vector[padre][0] > heap.vector[indice][0]):\n heap.vector[padre], heap.vector[indice] = heap.vector[indice], heap.vector[padre]\n indice = padre\n padre = (padre-1)//2", "def test_remove(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n h.remove(2)\n\n self.assertTrue(Heap.is_heap(data), 'should preserve heap property')\n self.assertNotIn(8, h.data, 'the value corresponding to the index was removed')", "def testTuples(self):\n hd = HeapDict(size=3)\n hd.push(1, (1, 10))\n hd.push(1, (0, 10))\n hd.push(1, (1, 100))\n hd.push(1, (-1, 1000))\n self.assertEqual(hd.get_result(), {1: [(1, 100), (1, 10), (0, 10)]})", "def build_heap(self, arr):\n i = len(arr) // 2\n self.size = len(arr)\n self.heap_list = [-1] + arr[:]\n while i > 0:\n self.percolate_down(i)\n i = i - 1", "def constraint(self):\n with self.mutating:\n self.queue = heapq.nsmallest(self.max_size, self.queue)\n heapq.heapify(self.queue)", "def __len__(self):\n return len(self._heap)", "def __len__(self):\n return len(self._heap)", "def priority_queue_stress_test(self, pq, max_length=None):\n from resources.english import english_words\n words = english_words()\n if max_length:\n words = words[:max_length]\n for w in words:\n pq.enqueue(w, len(w))\n\n # First word out is longest... / Last one out is smallest\n first = pq.dequeue()\n while pq:\n last = pq.dequeue()\n\n # Should be drained\n with self.assertRaises(RuntimeError):\n pq.dequeue()\n\n return (first, last)", "def __init__(self):\n self.min_heap = []\n self.max_heap = []\n self.size_max, self.size_min = 0, 0", "def priority_queue_full():\n from src.priorityq import PriorityQueue\n priority_queue = PriorityQueue()\n priority_queue.insert(15, 5)\n priority_queue.insert(12, 3)\n priority_queue.insert(11, 1)\n priority_queue.insert(6, 2)\n priority_queue.insert(17)\n priority_queue.insert(3)\n return priority_queue", "def flotar(heap, indice):\n padre = (indice-1)//2\n while (padre >= 0) and (heap.vector[padre] > heap.vector[indice]):\n heap.vector[padre], heap.vector[indice] = heap.vector[indice], heap.vector[padre]\n indice = padre\n padre = (padre - 1) // 2", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def heap_select(self, arr, k):\n h = []\n for i in range(len(arr)):\n if i < k:\n heapq.heappush(h, arr[i])\n else:\n heapq.heappush(h, arr[i])\n heapq.heappop(h)\n return h[0]", "def __init__(self):\n self.min_heap = []\n self.max_heap = []", "def __init__(self):\n self.minheap = []\n self.maxheap = []\n self.len_min = self.len_max = 0", "def test_pop(self):\n self.assertRaises(EmptyHeapException, self.minheap.pop)\n self.minheap.heap = [0, 1, 4, 7, 9]\n assert self.minheap.pop() == 1\n assert self.minheap.heap == [0, 4, 9, 7]", "def heapreplace(self, key, value):\n if self.is_empty():\n raise IndexError('Priority queue is empty')\n small = self.min()\n self._data[0]._key = key\n self._data[0]._value = value\n self._down_heap(0)\n return small", "def heap_up(self, index):\n # how can we do this recursively?\n parent_node_index = (index - 1)//2\n while self.store[index].key < self.store[parent_node_index].key and index > 0:\n self.swap(index, parent_node_index)\n index = parent_node_index\n parent_node_index = (index - 1)//2\n else:\n return self.store", "def heappop(heap):\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n Heap.siftup(heap, 0)\n return returnitem\n return lastelt", "def restructureHeap(self):\n\n self.i = 1\n # Storing the elements that already exist in a temporary list\n tempList = []\n for heapElement in self.heap:\n if heapElement != \"NaN\" :\n tempList.append( heapElement )\n\n # Initializing new heap\n self.heap = [\"NaN\"] * self.noOfElements\n\n # Storing all the elements in the temporary list in a continuous fashion in the new heap\n for element in tempList:\n self.insertElement(element, self.i)", "def __init__(self):\n self.__max_heap = []\n self.__min_heap = []", "def test_priority_que_success_multiple_empty(priority_queue):\n priority_queue.insert(15)\n priority_queue.insert(13, 1)\n assert (priority_queue._heap[0].value,\n priority_queue._heap[0].priority,\n priority_queue._heap[1].value) == (13, 1, 15)", "def heapsort(iterable):\n queue = []\n\n [heapq.heappush(queue, item) for item in iterable]\n\n return [heapq.heappop(queue) for i in range(len(queue))]", "def test_validate_factorial_heap_pq(self):\n from ch04.factorial_heap import PQ, validate\n\n end = 10000\n pq = PQ(end)\n for i in range(end):\n pq.enqueue(i, i)\n validate(pq)\n\n last = end-1\n while pq:\n self.assertEqual(last, pq.dequeue())\n last -= 1\n validate(pq)", "def poll(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n item = self.heap[0]\n self.heap[0] = self.heap.pop()\n self.heapify_down()\n return item", "def test_priority_que_pop(priority_queue_full):\n # import pdb; pdb.set_trace()\n assert (priority_queue_full.pop(),\n priority_queue_full.pop(),\n priority_queue_full.pop(),\n priority_queue_full.pop()) == (11, 6, 12, 15)", "def heapify(seq):\n minheap = [0] + seq\n for i in range(len(seq)//2, 0, -1): #len(seq)//2 -= 1 to index 1\n minHeapify(minheap, i, seq)\n seq[:] = minheap[1:]\n return seq", "def build_heap(self):\n n = int((len(self.array) / 2) - 1)\n\n while n >= 0:\n self.heapify_top_down(n)\n n -= 1", "def test_insert(self):\n self.minheap.heap = [0, 1, 4, 6, 9]\n self.minheap.insert(2)\n assert self.minheap.heap == [0, 1, 2, 6, 9, 4]", "def construct_heap(self, elems):\n for e in elems:\n self.n += 1\n self.A.append(e)\n self.pos[e[0]] = self.n\n for i in range(self.n // 2, 0, -1):\n self.combine(i)", "def __build(self) -> None:\n parent_idx = 0\n left_idx = 1\n right_idx = 2\n length = len(self._array)\n\n # While the bottom/end of the min heap has not been reached\n while left_idx < length or right_idx < length:\n\n # initialize the child_idx to the child with the smaller value\n if right_idx < length:\n child_idx = right_idx if self._array[left_idx] > self._array[right_idx] else left_idx\n else:\n child_idx = left_idx\n\n # Swap the parent and child if the child's value is smaller than the parent's value\n if self._array[child_idx] < self._array[parent_idx]:\n self._swap(parent_idx, child_idx)\n parent_idx = child_idx\n right_idx = (2 * child_idx) + 2\n left_idx = (2 * child_idx) + 1\n # Otherwise, break out of the while loop\n else:\n break", "def heapify_down(self):\n index = 0\n while self.has_left_child(index):\n smaller_child_index = self.get_left_child_index(index)\n if self.has_right_child(index) and self.get_right_child(index) < self.get_left_child(index):\n smaller_child_index = self.get_right_child_index(index)\n if self.heap[index] < self.heap[smaller_child_index]:\n break\n else:\n self.swap_values(index, smaller_child_index)\n index = smaller_child_index", "def heap_push_max(heap, item):\n heap.append(item)\n heapq._siftdown_max(heap, 0, len(heap)-1)", "def heap_push(self, value):\n if self.find(value) is None:\n self.table.append(value)\n self.percolate_up(self.get_size() - 1)", "def build_heap(self, A: list):\n self.size = len(A)\n med = (self.size // 2) - 1 #Mid point of array\n for i in range(0, med + 1): #Reverse iteration\n self.heapify(A, med - i) #Reverse iteration", "def heap_sort(self, A):\n pass", "def heap_sort(self, A):\n pass" ]
[ "0.73582697", "0.7054749", "0.6949152", "0.6781712", "0.67741895", "0.6711578", "0.6673364", "0.6665171", "0.66525424", "0.66385454", "0.65639687", "0.6450138", "0.64492726", "0.6448973", "0.6430282", "0.6409417", "0.639338", "0.639338", "0.639338", "0.6392037", "0.63643914", "0.6341557", "0.63273746", "0.63237405", "0.6288639", "0.6270352", "0.6269258", "0.6249829", "0.62279725", "0.62221545", "0.6209808", "0.6209808", "0.6207377", "0.61952156", "0.6192008", "0.61887443", "0.61885923", "0.6186162", "0.61825985", "0.6169612", "0.61603355", "0.61601555", "0.6151227", "0.6140522", "0.61000586", "0.60976714", "0.60972244", "0.6087279", "0.608726", "0.6082392", "0.6075852", "0.6062153", "0.60534525", "0.6043071", "0.60413", "0.60388815", "0.60343325", "0.60340047", "0.60312253", "0.60208046", "0.6013437", "0.6003773", "0.60007024", "0.5999589", "0.5997047", "0.599499", "0.5994804", "0.59944296", "0.59801596", "0.59798294", "0.59798294", "0.59749603", "0.5974365", "0.59713966", "0.5942901", "0.59113485", "0.5910807", "0.5909388", "0.5906986", "0.59059936", "0.5904197", "0.5902201", "0.58913755", "0.5889977", "0.58775496", "0.587656", "0.58716464", "0.5868598", "0.58588374", "0.5856999", "0.5854318", "0.58516186", "0.5841642", "0.5838732", "0.5837089", "0.5834085", "0.58288854", "0.5825903", "0.5823865", "0.5818126", "0.5818126" ]
0.0
-1
Tests that example.com was in the dashboard.
def test_link_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, "example.com")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dashboard_page(self):\r\n\r\n result = self.client.get(\"/dashboard\", follow_redirects = True)\r\n self.assertNotIn(b\"Family Ties - Dashboard\", result.data)", "def test_dashboard_is_up(dashboard_address):\n response = requests.get(f\"{dashboard_address}/health\")\n assert response.status_code == 200\n assert response.text == \"ok\"", "def test_landing_page(self):\n # Create a test client\n client = server.app.test_client()\n\n # Use the test client to make requests\n result = client.get('/', follow_redirects=True)\n\n # Compare result.data with assert method\n self.assertIn(b'<p class=\"navbar-text\">Already have an account?</p>', \n result.data)", "def test_show_on_homepage(self) -> None:\n self.assert_show_on_homepage(apps.wakeup.main.Controller)", "def test_dashboard_page_status(self):\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)", "def test_dashboards_v2_show(self):\n pass", "def test_analytics_id(self):\n response = self.client.get(reverse('home'))\n self.assertContains(response, 'MyAwesomeAnalyticsCode')", "def test_visit(self, client, site, landing_page):\n response = client.get(landing_page.relative_url(site))\n assert response.status_code == 200", "def test_important_page(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n self.assertIn(\"Email\", result.data)", "def test_showing_dietitian_homepage(self):\n\n result = self.client.get(\"/dietitian/1\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Dietitian Dashboard\", result.data)\n\n result = self.client.get(\"/dietitian/2\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def you_should_see_the_dashboard(driver):\n assert wait_on_element(driver, 10, xpaths.dashboard.title)\n assert wait_on_element(driver, 10, xpaths.dashboard.system_Info_Card_Title)", "def test_homepage_anon(self):\r\n\r\n with self.client:\r\n response = self.client.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'United States News', response.data)", "def test_dashboards_v2_link(self):\n pass", "def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n \n self.get_DashboardPage(\"Server Utilization\")\n \n self.get_DashboardPage(\"Total Server Utilization\")\n \n self.logout()", "def test_dashboard_view(self):\n target_url = url_for('dashboard.dashboard_panel')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_dashboard_has_dashboard_in_title(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Dashboard\").click()\n self.assertTrue('Dashboard' in self.browser.title, 'Dashboard did not have \"Dashboard\" in title')", "def test_visit(self, client, site, content_page):\n response = client.get(content_page.relative_url(site))\n assert response.status_code == 200", "def test_health_check(self):\n self.url = reverse(\"health-check\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_dashboards_v2_request_access(self):\n pass", "def test_homepage_redirect_patient(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n\n self.assertIn(b\"Dietitian Dashboard\", result.data)", "def test_get_ok(test_case, page):\n with test_case.app.test_client() as c:\n test_case.assertEqual(200, c.get('dashboard/{}'.format(page)).status_code)", "def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))", "def test_dashboard_loads_properly(self):\n response = self.client.get('your_server_ip:8000/auth/login/expense')\n self.assertEqual(response.status_code, 404)", "def test_tenant_external_domain_should_be_accessible(self):\n response = self.client.get(self.home_url, HTTP_HOST=self.domain.domain)\n self.assertEqual(response.status_code, 200)", "def test_link_registered(self):\n response = self.client.get(reverse('misago:admin:users:accounts:index'))\n\n response = self.client.get(response['location'])\n self.assertContains(response, reverse('misago:admin:users:bans:index'))", "def test_dashboard_bad_urls(self):\n url = reverse('shipping.views.dashboard')\n # Fail\n response = self.client.get(url, dict(av=\"junk\"))\n eq_(response.status_code, 404)\n response = self.client.get(url, dict(ms=\"junk\"))\n eq_(response.status_code, 404)\n\n # to succeed we need sample fixtures\n appver, milestone = self._create_appver_milestone()\n\n # Succeed\n response = self.client.get(url, dict(ms=milestone.code))\n eq_(response.status_code, 200)\n response = self.client.get(url, dict(av=appver.code))\n eq_(response.status_code, 200)", "def test_dashboard(self):\n self.signup('Bo', 'Theo', '[email protected]', 'Bo1995', 'Bo1995')\n rv = self.login('[email protected]', 'Bo1995')\n self.assertIn(b'Create a Recipe Category', rv.data)", "def test_redirection(self):\n dashboard_url = reverse('dashboard')\n self.assertRedirects(self.response, dashboard_url)", "def test_login_required_dashboard(self):\r\n response = self.client.get(reverse('dashboard'))\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['Location'], 'http://testserver/accounts/login?next=/dashboard')", "def test_if_an_user_c_access_administration_panel(client):\n response = client.get(\"/admin/\")\n assert response.status_code != 200", "def test_visit(self, client, site, homepage):\n response = client.get(homepage.relative_url(site))\n assert response.status_code == 200", "def test_dashboard_not_signed(self):\n views_url = ('/dashboard/',\n '/accounts/picture/')\n #create a get request\n for view in views_url:\n response = self.client.get(view)\n #the user was not logged in, the user should be redirected\n self.assertEqual(response.status_code, 302,\n msg=str(response.request))", "def test_showing_patient_homepage(self):\n\n result = self.client.get(\"/patient/1\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Patient Dashboard\", result.data)\n\n result = self.client.get(\"/patient/2\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def test_global_staff_access(self):\r\n self.login(self.global_staff_user)\r\n\r\n # and now should be able to load both\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n for url in urls:\r\n check_for_get_code(self, 200, url)", "def i_am_on_the_zoo_website():\n driver.get(\"http://www.thetestroom.com/webapp/\")", "def test_index_non_authorized(browser, host):\n # Evo goes to home page\n browser.get(host)\n\n # He see the blackdog app title\n assert 'dafipost' in browser.title.lower()\n \n # He see 2 buttons to login and create new account\n assert browser.find_element_by_xpath(\"//*[contains(text(), 'Sign up')]\")", "def test_can_access_admin(self):\n\n #Homepage\n self.browser.get(self.live_server_url + '/admin/')\n\n body = self.browser.find_element_by_tag_name('body')\n\n self.assertIn('Django administration',body.text,\"Cannot get to /admin/\")", "def test_dashboard(admin_client):\n url = reverse(\"admin:index\")\n\n response = admin_client.get(url)\n templates_used = [t.name for t in response.templates]\n\n assert response.status_code == 200\n assert templates_used == [\n \"admin/index.html\",\n \"admin/base_site.html\",\n \"admin/base.html\",\n \"jazzmin/includes/ui_builder_panel.html\",\n ]", "def test_homepage_redirect_patient(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n\n self.assertIn(b\"Patient Dashboard\", result.data)", "def test_tenant_domain_should_be_accessible(self):\n response = self.client.get(self.home_url, HTTP_HOST=self.site.domain)\n self.assertEqual(response.status_code, 200)", "def test_homepage(self):\r\n\r\n result = self.client.get(\"/\")\r\n self.assertIn(b\"Welcome!\", result.data)", "def test_get_dealer_landing_page(self):\n pass", "def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)", "def test_no_redirect(self):\n self.create_user_and_login(\n agreed_to_terms_of_service=True,\n filled_out=True\n )\n\n resp = self.client.get(DASHBOARD_URL)\n assert resp.status_code == 200", "def test_home(self):\n response = self.client.get('/')\n self.assertContains(response, 'Home Page', 1, 200)", "def test_dashboard_no_courses(self):\r\n self.auth_page.visit()\r\n self.dashboard_page.visit()", "def test_homepage(self):\n \n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"What type of user are you?\", result.data)", "def test_view_url_exists_api_alerts(self):\n response = self.client.get('/api/alerts/')\n self.assertEqual(response.status_code, 200)", "def test_notification_schedule(self):\n\n response = self.client.get(self.dashboard_url)\n self.assertEqual(response.status_code, 200)", "def test_notification_schedule(self):\n\n response = self.client.get(self.dashboard_url)\n self.assertEqual(response.status_code, 200)", "def test_view_url_exists_at_desired_location(self):\n response = self.client.get('')\n self.assertEqual(response.status_code, 200)", "def test_microsite_anonymous_homepage_content(self):\r\n\r\n resp = self.client.get('/', HTTP_HOST=MICROSITE_TEST_HOSTNAME)\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # assert various branding definitions on this Microsite\r\n # as per the configuration and Microsite overrides\r\n\r\n self.assertContains(resp, 'This is a Test Microsite Overlay') # Overlay test message\r\n self.assertContains(resp, 'test_microsite/images/header-logo.png') # logo swap\r\n self.assertContains(resp, 'test_microsite/css/test_microsite') # css override\r\n self.assertContains(resp, 'Test Microsite') # page title\r\n\r\n # assert that test course display name is visible\r\n self.assertContains(resp, 'Robot_Super_Course')\r\n\r\n # assert that test course that is outside microsite is not visible\r\n self.assertNotContains(resp, 'Robot_Course_Outside_Microsite')\r\n\r\n # assert that footer template has been properly overriden on homepage\r\n self.assertContains(resp, 'This is a Test Microsite footer')\r\n\r\n # assert that the edX partners section is not in the HTML\r\n self.assertNotContains(resp, '<section class=\"university-partners university-partners2x6\">')\r\n\r\n # assert that the edX partners tag line is not in the HTML\r\n self.assertNotContains(resp, 'Explore free courses from')", "def test_dashboards_v2_list(self):\n pass", "def test_home(self):\n\n with self.client:\n result = self.client.get('/users')\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"col-2\">Users</h1>', result.data)", "def test_not_microsite_anonymous_homepage_content(self):\r\n\r\n resp = self.client.get('/')\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # assert various branding definitions on this Microsite ARE NOT VISIBLE\r\n\r\n self.assertNotContains(resp, 'This is a Test Microsite Overlay') # Overlay test message\r\n self.assertNotContains(resp, 'test_microsite/images/header-logo.png') # logo swap\r\n self.assertNotContains(resp, 'test_microsite/css/test_microsite') # css override\r\n self.assertNotContains(resp, '<title>Test Microsite</title>') # page title\r\n\r\n # assert that test course display name IS NOT VISIBLE, since that is a Microsite only course\r\n self.assertNotContains(resp, 'Robot_Super_Course')\r\n\r\n # assert that test course that is outside microsite IS VISIBLE\r\n self.assertContains(resp, 'Robot_Course_Outside_Microsite')\r\n\r\n # assert that footer template has been properly overriden on homepage\r\n self.assertNotContains(resp, 'This is a Test Microsite footer')", "def test_home(self):\n\t\tresponse = self.client.get('/')\n\t\tself.assertContains(response, 'Home Page', 1, 200)", "def test_view_login(self):\n url = reverse('login:login')\n response = self.client.get(url, follow=True)\n self.assertContains(response, \"Sign In With Google\")", "def test_frontpage(self):\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)", "def test_admin_index(self):\n response = self.client.get('/admin/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Djrill\")", "def test_google_exists(self):\n\t\tself.driver.get(\"http://www.google.com\")\n\t\tself.assertIn(\"Google\", self.driver.title)", "def test_static_tab(self):\r\n # From the course info page, navigate to the static tab\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Test Static Tab')\r\n self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))", "def testConstructDashboardURLSmoke(self):\n stage = self.ConstructStage()\n\n exp_url = ('https://uberchromegw.corp.google.com/i/chromeos/builders/'\n 'x86-generic-paladin/builds/%s' % DEFAULT_BUILD_NUMBER)\n self.assertEqual(stage.ConstructDashboardURL(), exp_url)\n\n stage_name = 'Archive'\n exp_url = '%s/steps/%s/logs/stdio' % (exp_url, stage_name)\n self.assertEqual(stage.ConstructDashboardURL(stage=stage_name), exp_url)", "def test_tenant_marketing_domain_should_be_accessible(self):\n response = self.client.get(\n self.home_url, HTTP_HOST=self.marketing_page.domain)\n self.assertEqual(response.status_code, 200)", "def test_admin_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '<a href=\"/admin/auth/group/\">Group</a>', html=True)\n self.assertContains(response, '<a href=\"/admin/auth/user/\">User</a>', html=True)", "def test_dashboards_v2_share(self):\n pass", "def test_home_page(self):\r\n url = reverse('home')\r\n response = self.client.get(url)\r\n\r\n self.assertEqual(response.status_code, 200)", "def test_homepage(self):\n rc = self.app.get('/')\n assert b'Welcome to Code TA' in rc.data\n assert b'Logout' not in rc.data", "def test_homepage(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Homepage\", result.data)", "def test_redirect_if_not_logged_in(self):\n response = self.client.get(self.account_overview_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, self.login_url)", "def test_homepage_redirect(self):\n with self.client as client:\n resp = client.get(\"/\", follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Davis Test', html)", "def test_home(self):\n\n response = self.client.get(reverse('home'))\n\n assert response.status_code == 200", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def test_data_management_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_management_page()\n self.wait_until_tabs_open()\n tabs = self.selenium.window_handles\n self.selenium.switch_to_window(tabs[1])\n self.wait_until_element_appear('site-name', 10)\n self.check_page_title(self.data_management_config.get('PAGE_TITLE'))", "def test_org_staff_access(self):\r\n self.login(self.org_staff_user)\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 404, url)", "def test_if_home_is_successful(client):\n\n url = reverse(\"home\")\n response = client.get(url)\n assert response.status_code == 200", "def test_user_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'testuser', html=True)\n self.assertContains(response, '[email protected]', html=True)", "def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_helpful_page_view(self):\n target_url = url_for('dashboard.helpful_pages')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_login_can_see_usagelist(self):\n self.client.login(username='testuser', password='q2w3E$R%')\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '1234')\n self.assertContains(response, '2018')\n self.assertContains(response, 'meter_url')\n self.assertContains(response, 'http://testserver/api/v1/meter/1')", "def test_admin_can_login_to_web_portal(admin):", "def test_view_url_accessible_by_name(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)", "def test_showing_dietitian_account(self):\n\n result = self.client.get(\"/dietitian/1/account\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Account Details\", result.data)\n\n result = self.client.get(\"/dietitian/2/account\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def test_shoplists_dashboard_without_login_redirects(self):\n tester = app.test_client(self)\n response = tester.get('/show_shoplists', follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def test_homepage_with_location(self):\r\n\r\n with self.client:\r\n response = self.client.get('/?location=US-CA')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'California News', response.data)\r\n\r\n response = self.client.get('/?location=US-FAKE')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'No data found for that region.', response.data)", "def you_should_see_the_dashboard(driver):\n rsc.Verify_The_Dashboard(driver)\n if wait_on_element(driver, 2, '//h1[contains(.,\"End User License Agreement - TrueNAS\")]'):\n try:\n assert wait_on_element(driver, 2, '//button[@ix-auto=\"button__I AGREE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__I AGREE\"]').click()\n if wait_on_element(driver, 2, xpaths.button.close, 'clickable'):\n driver.find_element_by_xpath(xpaths.button.close).click()\n except ElementClickInterceptedException:\n assert wait_on_element(driver, 2, xpaths.button.close, 'clickable')\n driver.find_element_by_xpath(xpaths.button.close).click()\n assert wait_on_element(driver, 2, '//button[@ix-auto=\"button__I AGREE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__I AGREE\"]').click()", "def test_show_login_page(self):\n with self.client as c:\n\n res = c.get(\"/login\")\n html = res.get_data(as_text=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Don't have an account?\", html)", "def test_org_instructor_access(self):\r\n self.login(self.org_instructor_user)\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.other_org_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 404, url)", "def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)", "def test_health(self):\n self.assert_request('get', '/_health')", "def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))", "def test_view_url_exists_at_desired_location(self):\n response = self.client.get('/movies/')\n self.assertEqual(response.status_code, 200)", "def about_page_test(self):\r\n # default for ENABLE_MKTG_SITE is False.\r\n self.assertEquals(self.get_about_page_link(), \"//localhost:8000/courses/mitX/101/test/about\")", "def test_scan(self):\n self.assertIn(\"index.html\", self.site.items)\n self.assertIn(\"test/test.html\", self.site.items)\n self.assertIn(\"test/test/test.html\", self.site.items)", "def test_show_register_page(self):\n with self.client as c:\n\n res = c.get(\"/register\")\n html = res.get_data(as_text=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Already have an account?\", html)\n self.assertNotIn('<nav class=\"navbar navbar-custom border-bottom border-light navbar-expand-md navbar-dark sticky-top\">', html)", "def test_dns_dashboard(self):\n wait_for_pod_state(\"\", \"kube-system\", \"running\", label=\"k8s-app=kube-dns\")\n validate_dns_dashboard()", "def test_agencies_page(self):\n response = self.client.get(reverse('agencies'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Department of Homeland Security' in content)", "def test_get_tab(self):\n actions.login(ADMIN_EMAIL, is_admin=True)\n response = self.get(self.TAB_URL)\n self.assertEqual(response.status_code, 200)" ]
[ "0.7439744", "0.71425265", "0.6988231", "0.69753", "0.6952687", "0.69291663", "0.69152224", "0.69117343", "0.68734396", "0.68194866", "0.67331254", "0.673261", "0.6697194", "0.66839606", "0.66659987", "0.66591775", "0.6643598", "0.66313016", "0.6605587", "0.65915424", "0.65859234", "0.6574944", "0.65737844", "0.6572512", "0.6559295", "0.6555671", "0.65385664", "0.64996845", "0.64979285", "0.64962834", "0.6484998", "0.6466764", "0.6464762", "0.6463675", "0.6435708", "0.6420004", "0.6417578", "0.6411867", "0.6410266", "0.6398294", "0.6382813", "0.6382581", "0.63825643", "0.63642937", "0.6333519", "0.63228124", "0.6320125", "0.6307601", "0.6307389", "0.6305484", "0.6305484", "0.6299938", "0.6267638", "0.6267505", "0.62450886", "0.6240496", "0.62395436", "0.6237318", "0.62270373", "0.62250763", "0.62232816", "0.62190837", "0.62010956", "0.62001634", "0.6198842", "0.61896104", "0.6180069", "0.61691296", "0.61631984", "0.61596954", "0.6157679", "0.61521167", "0.61435443", "0.61435443", "0.6140768", "0.61337185", "0.61200213", "0.6113951", "0.6110741", "0.6110741", "0.61099", "0.6106055", "0.6105747", "0.6101124", "0.6100539", "0.60848886", "0.6078913", "0.60734093", "0.60673565", "0.6066235", "0.6066081", "0.60652316", "0.6063445", "0.60569626", "0.6047161", "0.6032001", "0.6028922", "0.6024133", "0.6023157", "0.60153383" ]
0.7536914
0
Tests that the admin list found the User and Group admins
def test_admin_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, '<a href="/admin/auth/group/">Group</a>', html=True) self.assertContains(response, '<a href="/admin/auth/user/">User</a>', html=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_calendar_user_admin_list(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def test_cannot_remove_all_admins(self):\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n users = admin_holder.find('ul', {'class': 'users'}).findAll(\n 'li', {'class': 'deleter'})\n assert len(users) == 1\n r = self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'admin1'})\n assert r.json[\n 'error'] == 'You must have at least one user with the Admin role.'\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n users = admin_holder.find('ul', {'class': 'users'}).findAll(\n 'li', {'class': 'deleter'})\n assert len(users) == 1", "def test_admin_user_list_all_users(self):\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, self.users.data)", "def test_09_admin_users_as_admin(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data", "def test_01_admin_index(self):\r\n self.register()\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"There should be an index page for admin users and apps\"\r\n assert \"Settings\" in res.data, err_msg\r\n divs = ['featured-apps', 'users', 'categories', 'users-list']\r\n for div in divs:\r\n err_msg = \"There should be a button for managing %s\" % div\r\n assert dom.find(id=div) is not None, err_msg", "def test_admin_user(self):\n user = self.template_users['staff_user']\n self.client.login(email=user['email'], password=user['password'])\n\n # Admins can see everything\n response = self.client.get(reverse('api:log-list'))\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], self.object_count)\n\n # Deletion should be possible\n response = self.client.post(reverse('api:log-erase'), {\n 'before': str(timezone.now()),\n 'max_severity': LogEntry.ERROR,\n })\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['deleted'], self.object_count)\n self.assertEqual(LogEntry.objects.count(), 0)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def test_admin(self):\n assert(admin)", "def test_users_listed(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n # Assertions\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_number_of_group_admins(self):\n\n group0 = self.test_save(name='group1')\n group1 = self.test_save(name='group2')\n user0 = self.user\n user1 = self.user1\n \n group0.user_set.add(user0)\n group0.user_set.add(user1)\n user0.grant(\"admin\", group0)\n group1.user_set.add(user0)\n group1.user_set.add(user1)\n\n self.assertEqual(number_group_admins(group0), 1)\n self.assertEqual(number_group_admins(group1), 0)\n user1.grant(\"admin\", group1)\n self.assertEqual(number_group_admins(group1), 1)\n user1.grant(\"admin\", group0)\n self.assertEqual(number_group_admins(group0), 2)", "def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)", "def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_users_listed(self):\n # the url is defined in django admin documentation\n # it generate the url for the list of user page\n # it is good using that instead of the url in case it changes\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def user_is_admin(user):\n return user in admins", "def test_is_admin_user(self):\n admin = User.objects.get(email='[email protected]')\n self.assertEqual(admin.is_staff, True)", "def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins", "def test_new_admin_subscriptions(self):\n r = self.app.get('/admin/groups/')\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n with audits('add user test-user to Admin'):\n self.app.post('/admin/groups/add_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n p_nbhd = M.Neighborhood.query.get(name='Projects')\n p = M.Project.query.get(shortname='test', neighborhood_id=p_nbhd._id)\n uid = M.User.by_username('test-user')._id\n for ac in p.app_configs:\n sub = M.Mailbox.subscribed(\n user_id=uid, project_id=p._id, app_config_id=ac._id)\n assert sub, 'New admin not subscribed to app %s' % ac\n\n \"\"\"\n When user is removed from admins group then user must be unsubscribed\n from all the tools in the project\n \"\"\"\n self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n for ac in p.app_configs:\n sub = M.Mailbox.subscribed(\n user_id=uid, project_id=p._id, app_config_id=ac._id)\n assert not sub, 'New admin not unsubscribed to app %s' % ac", "def test_get_all_user(self):\n response = self.client().get(AuthTestCase.admin)\n # assert the response code\n self.assertEqual(response.status_code, 200)", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def test_subroles(self):\n def check_roles(r):\n dev_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[2]\n mem_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[3]\n assert 'All users in Admin group' in dev_holder.text\n assert 'All users in Developer group' in mem_holder.text\n\n r = self.app.get('/admin/groups/')\n\n admin_holder = r.html.find(\n 'table', {'id': 'usergroup_admin'}).findAll('tr')[1]\n admin_id = admin_holder['data-group']\n # test that subroles are intact after user added\n with audits('add user test-user to Admin'):\n r = self.app.post('/admin/groups/add_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)\n # test that subroles are intact after user deleted\n with audits('remove user test-user from Admin'):\n r = self.app.post('/admin/groups/remove_user', params={\n 'role_id': admin_id,\n 'username': 'test-user'})\n r = self.app.get('/admin/groups/')\n check_roles(r)", "def admin_list(message):\n load_users(message._client.users)\n names = list_to_names(user_list.admin_list)\n message.reply('My admins are: {}'.format(\", \".join(names)))", "def test_permissions(self):\n self.assert_('admin' in get_model_perms(Group))", "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_11_admin_user_not_listed_in_search(self):\r\n self.register()\r\n data = {'user': 'john'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def get_admins(self):\n return self.admins_group.user_set.all()", "def test_add_admin_to_org(self):\n pass", "def get_admin_users() -> User:\n return User.objects.filter(group__name__contains=\"admin\")", "def validate_admin(self, request):\n\n self.validate_login(request)\n\n if request.session['id'] not in self.admins:\n handler.logHelper.log_it_visit(request, __name__ + '.validate_admin', authorized=False)\n raise PermissionDenied('You need to be an admin to access this page.')", "def is_admin(self, user):\n return user.name in self.admins", "def test_admin(self):\r\n from django.contrib import admin\r\n admin.autodiscover()\r\n\r\n from adrest.models import AccessKey\r\n self.assertTrue(AccessKey in admin.site._registry)\r\n\r\n from adrest.models import Access\r\n self.assertTrue(Access in admin.site._registry)", "def test_can_access_admin(self):\n\n #Homepage\n self.browser.get(self.live_server_url + '/admin/')\n\n body = self.browser.find_element_by_tag_name('body')\n\n self.assertIn('Django administration',body.text,\"Cannot get to /admin/\")", "def admins(message):\n hf.query_users(message, hf.get_users(), \"admin\")", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n nt.assert_true(self.view.test_func())", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def test_admin_get(self, *args, **kwargs):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = True\n res = self.view.get(self.request, *args, **kwargs)\n nt.assert_equal(res.status_code, 200)", "def check_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n\n return md5((str(id) + \"admin\").encode()).hexdigest() in self.__admins", "def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_view_list(self):\n user = self.user\n group = self.test_save()\n group0 = self.test_save(name='group1')\n group1 = self.test_save(name='group2')\n group2 = self.test_save(name='group3')\n c = Client()\n url = '/groups/'\n \n # anonymous user\n response = c.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n self.assertTemplateUsed(response, 'registration/login.html')\n \n # unauthorized user (user with admin on no groups)\n self.assert_(c.login(username=user.username, password='secret'))\n response = c.get(url)\n self.assertEqual(403, response.status_code)\n \n # authorized (permission)\n user.grant('admin', group)\n user.grant('admin', group1)\n response = c.get(url)\n self.assertEqual(200, response.status_code)\n self.assertEquals('text/html; charset=utf-8', response['content-type'])\n self.assertTemplateUsed(response, 'object_permissions/group/list.html')\n groups = response.context['groups']\n self.assert_(group in groups)\n self.assert_(group1 in groups)\n self.assertEqual(2, len(groups))\n \n # authorized (superuser)\n user.revoke('admin', group0)\n user.revoke('admin', group1)\n user.is_superuser = True\n user.save()\n response = c.get(url)\n self.assertEqual(200, response.status_code)\n self.assertEquals('text/html; charset=utf-8', response['content-type'])\n self.assertTemplateUsed(response, 'object_permissions/group/list.html')\n groups = response.context['groups']\n self.assert_(group in groups)\n self.assert_(group0 in groups)\n self.assert_(group1 in groups)\n self.assert_(group2 in groups)\n self.assertEqual(4, len(groups))", "def admin_edit_admins():\n return user_management_handler(\"show_admin_edit_admins\", \"new_admins\", True)", "async def _ad_list(self, ctx):\n admin_list = self.database.get_admins(ctx.guild.id)\n if len(admin_list) > 0:\n out = \"```\"\n for admin in admin_list:\n admin_name = self.bot.get_user(admin.user_id)\n admin_name = str(admin_name) if admin_name is not None else admin.user_id\n out += f\"{admin_name}\\n\"\n out += \"```\"\n await ctx.send(out)\n else:\n await ctx.send(\"This guild currently has no administrators.\")", "def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n #assert are django checks on http request is 200\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def admin_can_view_all_user_accounts(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertIn('love', str(reply['users'][1]['username']))\n self.assertIn('walker', str(reply['users'][2]['username']))\n self.assertEqual(resp.status_code, 200)", "def administrators(self):\n store = self['__store']\n admin_group = store.get(self.get('admin_group_id', None))\n if admin_group:\n return admin_group.name\n return 'nothing'", "def test_admin_event_admin_list(self):\n response = self.client.get(\"/admin/appointment/event/\")\n self.assertEqual(response.status_code, 200)", "def test_get_users_admin(client: FlaskClient, db_session) -> None:\n username = create_random_username()\n populate_database_with_users(db_session)\n # Admin users are allowed to make the request\n auth_token = create_auth_token(username, admin=True)\n response = get_users(client, auth_token.signed)\n assert response.status_code == HTTPStatus.OK\n # List of users matches all the users in the database\n json_data: t.Optional[dict] = response.get_json()\n assert json_data is not None\n assert \"users\" in json_data\n assert isinstance(json_data[\"users\"], list)\n resp_users: t.List[dict] = json_data[\"users\"]\n db_users = GifSyncUser.get_all()\n assert len(db_users) == len(resp_users)\n for db_user in db_users:\n match_resp_user_list = [\n resp_user for resp_user in resp_users if resp_user.get(\"id\") == db_user.id\n ]\n assert len(match_resp_user_list) == 1\n match_resp_user = match_resp_user_list[0]\n assert match_resp_user.get(\"username\") == db_user.username\n assert isinstance(match_resp_user.get(\"gifs\"), list)\n assert len(match_resp_user[\"gifs\"]) == len(db_user.gifs)", "def test_admin_can_login_to_web_portal(admin):", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )", "def test_admin_user_login(self):\n self.login(\"admin\", \"admin\")\n self.should_see(\"This is your profile, admin.\")", "def test_user_group_controller_list(self):\n pass", "def test_admin_index(self):\n response = self.client.get('/admin/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Djrill\")", "def test_an_admin_view(admin_client):\n response = admin_client.get('/admin/')\n assert status(response) == 'ok'", "def test_get_post_list_admin(self):\n url = reverse('post-list')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "async def test_auth_admin_is_admin(app):\n # Admin user defined in MockPAMAuthenticator.\n name = 'admin'\n user = add_user(app.db, app, name=name, admin=False)\n assert user.admin is False\n cookies = await app.login_user(name)\n assert user.admin is True", "def test_first_user_is_admin(self):\n user = User.objects.create(username='username', email='[email protected]')\n self.assertTrue(user.is_staff)\n self.assertTrue(user.is_superuser)\n user = User.objects.create(username='username2', email='[email protected]')\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)", "def test_update_membership_works_for_group_admins(self):\n get_response = lambda: self.client.put(self.url, self.update_payload)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n\n # alice is a regular group member, admin rights are required:\n self.login_as(\"alice\")\n with self.assertNumQueries(3):\n self.assert_not_authorized(get_response())\n\n self.assertTrue(Membership.objects.get(\n community_id=self.GROUP_ID, user_id=self.USER_ID).is_admin)\n\n # bob is group admin, he can update the data:\n self.login_as(\"bob\")\n with self.assertNumQueries(6): # (3) is admin check (4) get mem (5) update mem (6) get com (?)\n response = get_response()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(list(response.data.keys()), self.expected_keys)\n self.assertEqual(response.data[\"is_admin\"], False)\n\n self.assertFalse(Membership.objects.get(\n community_id=self.GROUP_ID, user_id=self.USER_ID).is_admin)", "def test_user_can_change_admin(self):\n self.assertTrue(self.story.user_can_change(self.admin_user))", "def test_admin_settings(self):\n\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_ADMIN_EMAIL': ''\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertFalse(settings_vars.get('ADMINS', False))\n\n test_admin_email = '[email protected]'\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n 'MICROMASTERS_ADMIN_EMAIL': test_admin_email,\n }, clear=True):\n settings_vars = self.reload_settings()\n self.assertEqual(\n (('Admins', test_admin_email),),\n settings_vars['ADMINS']\n )\n # Manually set ADMIN to our test setting and verify e-mail\n # goes where we expect\n settings.ADMINS = (('Admins', test_admin_email),)\n mail.mail_admins('Test', 'message')\n self.assertIn(test_admin_email, mail.outbox[0].to)", "def test_func(self, user):\n return self.get_object().admin == user", "def test_12_admin_user_search(self):\r\n # Create two users\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n # Signin with admin user\r\n self.signin()\r\n data = {'user': 'juan'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Juan Jose\" in res.data, \"username should be searchable\"\r\n # Check with uppercase\r\n data = {'user': 'JUAN'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n err_msg = \"username search should be case insensitive\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Search fullname\r\n data = {'user': 'Jose'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Juan Jose\" in res.data, \"fullname should be searchable\"\r\n # Check with uppercase\r\n data = {'user': 'JOsE'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n err_msg = \"fullname search should be case insensitive\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Warning should be issued for non-found users\r\n data = {'user': 'nothingExists'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n warning = (\"We didn't find a user matching your query: <strong>%s</strong>\" %\r\n data['user'])\r\n err_msg = \"A flash message should be returned for non-found users\"\r\n assert warning in res.data, err_msg", "def get_admins():\n users = get_users()\n admins = []\n for user in users:\n if user[\"approval_level\"] == \"admin\":\n admins.append(user)\n\n return admins", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def test_13_admin_user_add_del(self):\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n # Signin with admin user\r\n self.signin()\r\n # Add user.id=1000 (it does not exist)\r\n res = self.app.get(\"/admin/users/add/1000\", follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, res.status_code\r\n assert err['error'] == \"User not found\", err\r\n assert err['status_code'] == 404, err\r\n\r\n\r\n # Add user.id=2 to admin group\r\n res = self.app.get(\"/admin/users/add/2\", follow_redirects=True)\r\n assert \"Current Users with Admin privileges\" in res.data\r\n err_msg = \"User.id=2 should be listed as an admin\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Remove user.id=2 from admin group\r\n res = self.app.get(\"/admin/users/del/2\", follow_redirects=True)\r\n assert \"Current Users with Admin privileges\" not in res.data\r\n err_msg = \"User.id=2 should be listed as an admin\"\r\n assert \"Juan Jose\" not in res.data, err_msg\r\n # Delete a non existant user should return an error\r\n res = self.app.get(\"/admin/users/del/5000\", follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, res.status_code\r\n assert err['error'] == \"User.id not found\", err\r\n assert err['status_code'] == 404, err", "def test_00_first_user_is_admin(self):\r\n self.register()\r\n user = db.session.query(User).get(1)\r\n assert user.admin == 1, \"User ID:1 should be admin, but it is not\"", "def test_01_admin_index_authenticated(self):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"tester2\", email=\"[email protected]\",\r\n password=\"tester\")\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg", "def get_drink_admins(self):\n admins = self.group('drink')\n return admins", "async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None:\n requester = await auth.get_user_by_req(request)\n await assert_user_is_admin(auth, requester)", "def is_loot_admin(self, id):\n users=self.execute(TABELLE['id_users']['select']['from_id'],(id,))\n if not users: return False\n if not isinstance(users, list): users=[users]\n\n for elem in users:\n if elem['id']==id:\n if elem['admin'] or elem['loot_admin']: return True\n else: break\n\n return False", "def get_list_of_admins() -> List[User]:\n return DBDiscussionSession.query(User).filter(User.group == Group.ADMIN).all()", "def test_fac_admin_page(self):\n self.login(self.fac_admin.user.username)\n self._got_to_fac_admin_page()\n self.check_page_title(self.admin_config.get('FAC_ADMIN').get('PAGE_TITLE'))\n self.check_page_contains_ids(self.admin_config.get('FAC_ADMIN').get('ADMIN_LINKS'))", "def has_admins(cls, server, bucket=None):\n\t\tres = cls.find_on({'type': 'user', 'admin': True}, server, bucket)\n\t\treturn True if res and len(res) > 0 else False", "def test_users_groups_get(self):\n pass", "def show_admin_edit_admins():\n return render_admin_page(\"admin-ea.html\")", "def test_22_admin_list_categories(self):\r\n self.create()\r\n # Anonymous user\r\n url = '/admin/categories'\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Anonymous users should be redirected to sign in\"\r\n assert dom.find(id='signin') is not None, err_msg\r\n\r\n # Authenticated user but not admin\r\n self.signin(email=self.email_addr2, password=self.password)\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Non-Admin users should get 403\"\r\n assert res.status_code == 403, err_msg\r\n self.signout()\r\n\r\n # Admin user\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Admin users should be get a list of Categories\"\r\n assert dom.find(id='categories') is not None, err_msg", "def test_list_user(self):\n pass", "def test_admin_calendar_setting_admin_list(self):\n response = self.client.get(\"/admin/appointment/calendarsetting/\")\n self.assertEqual(response.status_code, 200)", "async def test_rendering_template_admin(\n hass: HomeAssistant, mock_api_client: TestClient, hass_admin_user: MockUser\n) -> None:\n hass_admin_user.groups = []\n resp = await mock_api_client.post(const.URL_API_TEMPLATE)\n assert resp.status == HTTPStatus.UNAUTHORIZED", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def test_page_list_admin(self):\n user = self.get_superuser()\n title_1 = 'page'\n title_2 = 'inner'\n title_3 = 'page 3'\n page = create_page(title_1, 'page.html', 'en', published=True)\n page_2 = create_page(title_2, 'page.html', 'en', published=True, parent=page)\n page_3 = create_page(title_3, 'page.html', 'en', published=False)\n\n with self.login_user_context(user):\n url = reverse('api:page-list')\n response = self.client.get(url, format='json')\n self.assertEqual(len(response.data), 3)\n for page in response.data:\n self.assertIn(page.get('title'), {title_1, title_2, title_3})", "def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1", "def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()", "def test_groups_group_users_get(self):\n pass", "def test_groups_group_users_get(self):\n pass", "def test_if_an_user_c_access_administration_panel(client):\n response = client.get(\"/admin/\")\n assert response.status_code != 200", "def test_admin_alarm_admin_list(self):\n response = self.client.get(\"/admin/appointment/alarm/\")\n self.assertEqual(response.status_code, 200)", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role", "def return_admin_list(request):\n del request\n return return_user_list(Administrador)", "def test_listing_supplies_admin(self):\n request = self.factory.get(\n '/api/supplies')\n force_authenticate(request, user=self.testadmin)\n response = SupplyListView.as_view()(request)\n # admin can browse the data\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_admin_user_login(browser):\n login_page = LoginPage(browser)\n login_page.start()\n\n login_page.login(email=\"[email protected]\",\n password=\"admin1234\")\n admin_page = AdminPage(browser)\n admin_page.start()\n\n # Check that admin has sufficient right to access all functionality \n assert admin_page.get_side_bar_menu_item(AdminPage.REPORTS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.REWARDS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.CAMPAIGNS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.LOYALTIES_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.TRANSACTION_RULES_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.MERCHANTS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.CUSTOMER_MANAGEMENT_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.SETTINGS_MENU_ITEM).size != 0\n assert admin_page.get_side_bar_menu_item(AdminPage.BUSINESS_INTELLIGENCE_MENU_ITEM).size != 0", "def test_admin_calendar_admin_list(self):\n response = self.client.get(\"/admin/appointment/calendar/\")\n self.assertEqual(response.status_code, 200)", "def admin():\n aaa.require(role='admin', fail_redirect='/sorry_page')\n return dict(\n current_user=aaa.current_user,\n users=aaa.list_users(),\n roles=aaa.list_roles()\n )" ]
[ "0.7459246", "0.7412557", "0.74001384", "0.7367604", "0.7157066", "0.70924866", "0.70496404", "0.70242584", "0.70158803", "0.7010552", "0.69874895", "0.696997", "0.69406426", "0.6914392", "0.6912082", "0.6910139", "0.69032836", "0.68800515", "0.68568987", "0.6850447", "0.6785174", "0.67598534", "0.6746246", "0.67421323", "0.67271703", "0.6719937", "0.6703436", "0.6695416", "0.6657125", "0.66529626", "0.6643797", "0.66429865", "0.66306996", "0.66269577", "0.66261125", "0.66261125", "0.66261125", "0.66261125", "0.6622833", "0.6622833", "0.66173565", "0.66172385", "0.6605403", "0.65941435", "0.6585494", "0.6577901", "0.65727097", "0.65635234", "0.6527152", "0.65152746", "0.6515259", "0.65133065", "0.65130764", "0.6509623", "0.6492783", "0.6480932", "0.64762014", "0.64690286", "0.6464319", "0.6447266", "0.64449114", "0.64298713", "0.6426913", "0.6419436", "0.6411495", "0.64083487", "0.64002776", "0.63803947", "0.63759667", "0.63702047", "0.6344561", "0.63434684", "0.634159", "0.6335698", "0.63342494", "0.63323957", "0.63291824", "0.6324913", "0.631526", "0.63044435", "0.6299182", "0.62989575", "0.6297778", "0.62955624", "0.6288127", "0.62829167", "0.6275611", "0.6272906", "0.626028", "0.625219", "0.6245874", "0.6245874", "0.62439007", "0.6241904", "0.62155455", "0.6215221", "0.6214901", "0.62029725", "0.6191766", "0.6187469" ]
0.79272
0
Tests that the testuser was found.
def test_user_list(self): response = self.client.get('/tests/dashboard/') self.assertEqual(response.status_code, 200) self.assertContains(response, 'testuser', html=True) self.assertContains(response, '[email protected]', html=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user(self):\n return True", "def test_contains_user(self):\n print('(' + self.test_contains_user.__name__+')',\n self.test_contains_user.__doc__)\n self.assertTrue(self.connection.contains_user(PATIENT_USERNAME))\n self.assertTrue(self.connection.contains_user(DOCTOR_USERNAME))", "def test_find_user_by_username(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n user_exists = User.user_exist(\"test\")\n self.assertTrue(user_exists)", "def test_01_verify_if_given_user_is_present(self):\n print(\"Verify if given user is present\")\n response = self.session.typicode_get_api(self.get_users_api)\n\n # Validates if response code is 200\n self.assertEqual(response.status_code, 200, \"Received status code \" + str(response.status_code) +\n \" instead of 200\")\n\n # Validates if response is not empty\n self.assertNotEqual(len(response.json()), 0, \"User response is empty\")\n\n # Validates if the username key is present in response\n username_response = get_response_for_given_path(response.json(), self.search_username_key)\n self.assertNotEquals(username_response, False, \"Searched string is not present\")\n\n # Validates if the given username is present in response\n user_present = check_given_value_present_in_list(username_response, self.search_username)\n self.assertTrue(user_present, \"Username \" + self.search_username + \"is not present in the response\")", "def test_check_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n test_user.check_user(\"test\", \"walIas15\")", "def test_get_user(self):\n print('(' + self.test_get_user.__name__+')', self.test_get_user.__doc__)\n # test for patient\n self.assertDictContainsSubset(\n self.connection.get_user(PATIENT_USERNAME), PATIENT)\n # test for doctor\n self.assertDictContainsSubset(\n self.connection.get_user(DOCTOR_USERNAME), DOCTOR)", "def test_user(self):\n u = self.d.user('example')\n self.assertEqual(u.username, 'example')\n self.assertEqual(u.name, 'Example Sampleman')", "def test_get_user_exists(self):\n # First make the user\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now get the user data and verify it is correct\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n data = json.loads(resp.data)\n for key in ['first_name', 'last_name', 'userid', 'groups']:\n assert key in data\n assert data['first_name'] == self.test_user1_first\n assert data['last_name'] == self.test_user1_last\n assert data['userid'] == self.test_user1_userid\n for groupid in self.test_user1_groups:\n assert groupid in data['groups']", "def test_read_user(self):\n pass", "def test_verify_valid_user(self, rp_logger):\n\n test_name = sys._getframe().f_code.co_name\n\n rp_logger.info(\"###### TEST EXECUTION STARTED :: \" +\n test_name + \" ######\")\n\n first_name = data_reader.get_data(test_name, 'FirstName')\n last_name = data_reader.get_data(test_name, 'LastName')\n email = data_reader.get_data(test_name, 'Email')\n\n with allure.step(\"Verify whether user exists\"):\n result = base_api.verify_valid_user(\n email, first_name, last_name)\n exe_status.mark_final(test_step=test_name, result=result)", "def test_search_user(self):\n self.maya.save_profile()\n user = Profile.search_users(self.maya.username)\n self.assertTrue(user.username==\"Maya\")", "def test_existence(self):\n self.assertTrue(User.objects.filter(username='rcm').exists())", "def test_get_user_by_username(self):\n\t\tusername_in_db = server.get_user_by_username('Natasha')\n\t\tself.assertTrue(username_in_db, 'Query did not fetch user object.')\n\t\tusername_not_in_db = server.get_user_by_username('xyz')\n\t\tself.assertFalse(username_not_in_db, 'Query fetched user that did not exist (xyz).')", "def assert_user_exists(self, user_id):\n result = self.con.execute(\n 'SELECT id FROM registered_user WHERE id = ? AND active = 1',\n (user_id,)\n ).fetchone()\n if result is None:\n raise err.UnknownUserError(user_id)", "def test_get_user(self):\n user = User(self.client, \"test-user\", {})\n\n self.assertEqual(user.username, \"test-user\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertTrue(user.restricted)\n self.assertTrue(user.tfa_enabled)\n self.assertIsNotNone(user.ssh_keys)", "def test_users_username_get(self):\n pass", "def test_user_by_username(self):\n username = make_user(self.client)['username']\n resp = self.client.get('/user/'+username,\n headers=api_headers())\n json_resp = json.loads(resp.data.decode('utf-8'))\n self.assertEqual(json_resp['status'], 'user found')\n self.assertEqual(json_resp['user']['username'], username)", "def testPersonIsUser(self):\n member = self.portal.portal_membership.getMemberById('abc123')\n self.failUnless(member,\"%s\" % member)", "def test_get_user_if_exists(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details, user=user)\n self.assertDictEqual(actual, {'is_new': False})", "def test_api_user_get(self):\n pass", "def test_fetch_user(self):\n\n self.register_user()\n\n self.assertEqual(self.fetch_user_details().status_code, 200)\n\n self.assertTrue(self.fetch_user_details(\n ).json[\"data\"][0][\"username\"] == 'Bjorn')", "def test_empty_user_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the username field please\")", "def test_user(self):\n self.assertEqual(self.settings.USER, getpass.getuser())", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def test_user_id_get(self):\n pass", "def test_correct_user(self):\n\n self.create_user('Henrik', 1)\n self.create_user('Henrik2', 2)\n self.create_user('Test', 3)\n\n resp = self.client.get('/api/users/Henrik/')\n self.assertEquals(resp.json['total-users'], 2)", "def test_username(self):\n self.assertEqual(self.user.username, \"daron\")", "def testGetUser(self):\n UserAPI().create([(u'user', u'secret', u'User', u'[email protected]')])\n user = getUser(u'user')\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n result = yield self.facade.getUser(session, u'user')\n self.assertEqual(u'user', result.username)\n self.assertEqual(str(user.objectID), result.objectId)\n self.assertEqual(u'User', result.name)\n self.assertEqual(u'USER', result.role)", "def test_user(self):\n\n user = User.query.filter(User.user_fname == \"Smokey\").first()\n self.assertEqual(user.user_fname, \"Smokey\")", "def test_check_user(self):\n self.new_users = User('Dennis', 'Kiplangat', 'kiplangat18')\n self.new_users.save_user()\n user2 = User('Enock', 'kip', 'mankip')\n user2.save_user()\n\n for user in User.users_list:\n if user.first_name == user2.first_name and user.password == user2.password:\n current_user = user.first_name\n return current_user", "def inner_test(param: models.User):\n self.assertEqual(param, user)", "def test_users(self):\n users = (\"root\", \"matlab\")\n for user in users:\n with self.subTest(username=user):\n self.assertTrue(self.host.user(user).exists)", "def test_serve_user(self):\n pass", "def test_good_user(self):\n user = self.datautils.create_user()\n self.request.user = user\n self.request.matchdict = {'user_id': int(user.id)}\n result = user_id_get_view(self.request)['d']\n expected = {\n 'id': user.id,\n 'username': user.username,\n 'created': user.created,\n 'email': user.email,\n }\n self.assertEqual(result, expected)", "def test_010_query_users(self):\n\n testflow.step(\"Querying for users\")\n assert self.query_cli.run(what='user')[0], \"Failed to search for users\"", "def test_get_single_user(self):\n user = add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get(f'/users/{user.id}')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue('created_at' in data['data'])\n self.assertIn('neilb', data['data']['username'])\n self.assertIn('[email protected]', data['data']['email'])\n self.assertIn('success', data['status'])", "def test_retrive_user(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['email'], self.user.email)\n self.assertEqual(res.data['name'], self.user.name)\n self.assertNotIn('password', res.data)", "def test_search_users(self):\n users = Profile.search_user(\"hey\")\n self.assertTrue(len(users) == 1)", "def test_verify_invalid_user(self, rp_logger):\n\n test_name = sys._getframe().f_code.co_name\n\n rp_logger.info(\"###### TEST EXECUTION STARTED :: \" +\n test_name + \" ######\")\n\n first_name = data_reader.get_data(test_name, 'FirstName')\n last_name = data_reader.get_data(test_name, 'LastName')\n email = data_reader.get_data(test_name, 'Email')\n\n with allure.step(\"Verify whether user exists\"):\n result = base_api.verify_valid_user(\n email, first_name, last_name)\n exe_status.mark_final(test_step=test_name, result=result)", "def test_show(self):\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@alice\", str(response.data))", "def test_detail(self, client, users):\n user = users[0]\n url = reverse('users:detail', args=(user.pk,))\n response = client.get(url)\n assert response.status_code == 200\n assert user.username in str(response.content)", "def test_func(self, user, **kwargs):\n return self.get_object().authorized_user(user)", "def test_user_profile(self):\n\n with self.client:\n result = self.client.get('/users/whisky-test')\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"display-4\">\\n whisky-test\\n </h1>', result.data)", "def test_get_bad_user(self):\r\n user = UserMgr.get(username=u'noexist')\r\n\r\n self.assertEqual(\r\n user,\r\n None,\r\n \"Should not find a non-existant user: \" + str(user))", "def test_list_user(self):\n pass", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def test_users_get(self):\n pass", "def test_users_get(self):\n pass", "def test_get_user_by_name(self):\n\n # Select on empty set\n selected = self.user_api.get_user_by_name(MAGEN_USER['username'])\n self.assertFalse(selected.success)\n self.assertIsNone(selected.documents)\n\n # Insert user\n inserted = self.user_api.insert_user(MAGEN_USER)\n self.assertTrue(inserted.success)\n\n # Select user by username\n selected = self.user_api.get_user_by_name(MAGEN_USER['username'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents, MAGEN_USER)", "def testValidUserResult(self):\r\n \r\n result = self._ldapPrincipalSearcher.searchPrincipal(_VALID_USER_QUERY, constants.SEARCH_MODE_USER_ONLY)\r\n self.assertEquals(len(result), 1)\r\n self.assertEquals(result[0], _MAPPED_USER)", "def test_user_auth(self):\n self.new_user.save_login()\n test_user=User(\"trinity\",\"[email protected]\",\"123\")\n test_user.save_login()\n self.assertTrue(self.new_user.users_auth(\"trinity\",\"123\"))", "def test_get_single_user_is_missing(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/999')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])", "def test_resource_user_resource_find_users_get(self):\n pass", "def test_12_admin_user_search(self):\r\n # Create two users\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"[email protected]\", password=\"juan\")\r\n self.signout()\r\n # Signin with admin user\r\n self.signin()\r\n data = {'user': 'juan'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Juan Jose\" in res.data, \"username should be searchable\"\r\n # Check with uppercase\r\n data = {'user': 'JUAN'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n err_msg = \"username search should be case insensitive\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Search fullname\r\n data = {'user': 'Jose'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n assert \"Juan Jose\" in res.data, \"fullname should be searchable\"\r\n # Check with uppercase\r\n data = {'user': 'JOsE'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n err_msg = \"fullname search should be case insensitive\"\r\n assert \"Juan Jose\" in res.data, err_msg\r\n # Warning should be issued for non-found users\r\n data = {'user': 'nothingExists'}\r\n res = self.app.post('/admin/users', data=data, follow_redirects=True)\r\n warning = (\"We didn't find a user matching your query: <strong>%s</strong>\" %\r\n data['user'])\r\n err_msg = \"A flash message should be returned for non-found users\"\r\n assert warning in res.data, err_msg", "def test_user_registration(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(1,result,\"User registration successful\")", "def test_get_user_id(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n # for patient\n self.assertEqual(\n PATIENT_ID, self.connection.get_user_id(PATIENT_USERNAME))\n # for doctor\n self.assertEqual(\n DOCTOR_ID, self.connection.get_user_id(DOCTOR_USERNAME))", "def test_func(self):\n return (Student.objects.filter(user=self.request.user).exists())", "def test_get_user_by_uuiduser_uuid_get(self):\n pass", "def test_get_single_user_no_id(self):\n add_user(\"neilb\", \"[email protected]\")\n with self.client:\n response = self.client.get('/users/blah')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])", "def test_func(self, user):\n return self.get_object().admin == user", "def test_user_exists(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'abcd1234',\n 'name': 'Test',\n }\n\n # call the create function above\n create_user(**payload)\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Check if statuscode returns a HTTP400 bad request\n # becos user already exist\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def test_user(self):\n user = User.objects.get(username=\"test01\")\n print(user.username)\n print(user.email)\n self.assertEqual(user.email,\"[email protected]\")\n # cat = Animal.objects.get(name=\"cat\")\n # self.assertEqual(lion.speak(), 'The lion says \"roar\"')\n # self.assertEqual(cat.speak(), 'The cat says \"meow\"')", "def test_register_user_correct(self):\n result = self.client.post(\"/users\", data={\"username\":\"test_user2\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Player created! Please login\", result.data)", "def test_getUser(self):\n\t\turl = \"/users/2/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"id\"], 2)\n\t\tself.assertEqual(response.data[\"username\"], \"testUser2\")", "def test_no_user_exists(self):\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {})", "def test_user_exists(self):\n data = {'email': self.user.email}\n response = self.client.post(self.url, data=data)\n\n expected_response_code = 200\n self.assertEqual(expected_response_code, response.status_code)\n self.assertTrue(response.data.get('exists'))", "def test_user_view(self):\n with self.app.app_context():\n u = user(save=True)\n\n response = self.client.get('/user/%s' % u.slug)\n eq_(response.status_code, 200)\n\n response = self.client.get('/user/not-a-real-user')\n eq_(response.status_code, 404)", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_get_users(self):\n pass", "def test_fake_user(self):\n user = User.objects.get(username=\"a\")\n\n tester = DropboxAuthTester()\n self.assertFalse(tester.can_auth(user))", "def test_add_user(self):\n pass", "def test_known_user(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n known_user = get_user_model().objects.create_user(\n \"test_auth_backend_user1\",\n email=\"[email protected]\",\n public_username=\"ashley\",\n lti_consumer=consumer,\n lti_remote_user_id=\"ashley\",\n )\n\n user_count = get_user_model().objects.count()\n\n auth_user = self._authenticate(\n {\n \"user_id\": \"643f1625-f240-4a5a-b6eb-89b317807963\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"lis_person_sourcedid\": \"ashley\",\n },\n passport,\n )\n self.assertEqual(known_user, auth_user)\n self.assertEqual(user_count, get_user_model().objects.count())", "def test_main_with_users(self):\n add_user('michael', '[email protected]')\n add_user('fletcher', '[email protected]')\n with self.client:\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'<h1>All Users</h1>', response.data)\n self.assertNotIn(b'<p>No users!</p>', response.data)\n self.assertIn(b'michael', response.data)\n self.assertIn(b'fletcher', response.data)", "def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def testGet(self):\n userdict = {'id': 1,\n 'objectID': u'04585bec-28cf-4a21-bc3e-081f3ed62680',\n 'username': u'testuser',\n 'passwordHash': 'hash',\n 'fullname': u'Test User',\n 'email': u'[email protected]',\n 'role': Role.ANONYMOUS.id}\n self.cache.set('user:testuser', json.dumps(userdict))\n result = self.userCache.get(u'testuser')\n user = result.results\n self.assertEqual(1, user.id)\n self.assertEqual('04585bec-28cf-4a21-bc3e-081f3ed62680',\n str(user.objectID))\n self.assertEqual(u'testuser', user.username)\n self.assertEqual('hash', user.passwordHash)\n self.assertEqual(u'Test User', user.fullname)\n self.assertEqual(u'[email protected]', user.email)\n self.assertEqual(Role.ANONYMOUS, user.role)", "def test_get_user_non_exist_id(self):\n print('(' + self.test_get_user_non_exist_id.__name__+')',\n self.test_get_user_non_exist_id.__doc__)\n self.assertIsNone(self.connection.get_user(NON_EXIST_PATIENT_USERNAME))", "def test_regular_user_login(self):\n self.login(\"user\", \"user\")\n self.should_see(\"This is your profile, user.\")", "def testGetUserWithoutData(self):\n self.store.commit()\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n deferred = self.facade.getUser(session, u'unknown')\n error = yield self.assertFailure(deferred, TNoSuchUser)\n self.assertEqual(u'unknown', error.name)", "def test_user_username_get(self):\n response = self.client.open('//user/{username}'.format(username='username_example'),\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_user_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n #assert are django checks on http request is 200\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='[email protected]', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, '[email protected]')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)", "def test_user_exist(self):\n data = {\n 'email': '[email protected]',\n 'password': 'testtest',\n 'first_name': 'Test test',\n 'last_name': 'Test'\n } \n sigin_in_user(**data)\n res = self.client.post(SIGN_IN_USER_URL, data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_get_username(self):\r\n user = UserMgr.get(username=u'admin')\r\n self.assertEqual(\r\n user.id,\r\n 1,\r\n \"Should have a user id of 1: \" + str(user.id))\r\n self.assertEqual(\r\n user.username,\r\n 'admin',\r\n \"Should have a username of admin: \" + user.username)", "def test_jenkins_user_shib(self):\n ju = JenkinsUser.objects.get(username=\"shib_id\")\n self.assertEqual(ju.jenkinsuserprofile.shib_uid, \"shib_id\")\n self.assertTrue(ju.jenkinsuserprofile.is_shib_user())", "def test_get_user_by_emailuser_email_get(self):\n pass", "def test_validate_when_user_found(self, view, mget_user):\n assert view.validate() is None", "def test_single_success():\n test_username = \"test_user\"\n\n user = UserFactory.create(username=test_username, is_active=True)\n UserSocialAuthFactory.create(user=user, provider=\"edX\")\n\n assert user.is_active is True\n assert \"retired_email\" not in user.email\n assert UserSocialAuth.objects.filter(user=user).count() == 1\n\n COMMAND.handle(\"retire_users\", users=[test_username])\n\n user.refresh_from_db()\n assert user.is_active is False\n assert \"retired_email\" in user.email\n assert UserSocialAuth.objects.filter(user=user).count() == 0", "def test_set_user_status(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_confirmed_username(self):\n pass", "def test_get_user_id_unknown_user(self):\n print('(' + self.test_get_user_id.__name__+')',\n self.test_get_user_id.__doc__)\n self.assertIsNone(self.connection.get_user_id(\n NON_EXIST_PATIENT_USERNAME))", "def test_02_get_api_with_user_username(self):\n print(\"Verify get users api with username query\")\n response = self.session.typicode_get_api_with_query(self.get_users_api, 'username=' + self.search_username)\n # Validates if response code is 200\n self.assertEqual(response.status_code, 200, \"Received status code \" + str(response.status_code) +\n \" instead of 200\")\n\n # Validates if response is not empty\n self.assertNotEqual(len(response.json()), 0, \"User response is empty\")\n\n # Validates if the username key is present in response\n username_response = get_response_for_given_path(response.json(), self.search_username_key)\n self.assertNotEquals(username_response, False, \"Searched string is not present\")\n\n # Validates if the given username is present in response\n user_present = check_given_value_present_in_list(username_response, self.search_username)\n self.assertTrue(user_present, \"Username \" + self.search_username + \"is not present in the response\")", "def test_empty_second_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"[email protected]\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the second name field please\")", "def getTestUser():\n allUsers = User.objects.all()\n if len(allUsers) > 0 :\n return allUsers[0]\n else :\n return User.objects.create_user(username='profiletester',\n email='[email protected]',\n password='superduperpassword2000')", "def test_signup(self):\n resp = self.client.post(self.signup_url, self.test_credential)\n self.assertEqual(resp.status_code, 200)\n registed_user = User.objects.filter(username=self.test_credential['username'],\n is_active=False)\n self.assertTrue(registed_user)" ]
[ "0.76748985", "0.7660696", "0.7656335", "0.75119334", "0.7449909", "0.73115385", "0.72937554", "0.72691184", "0.72514313", "0.7239464", "0.7218272", "0.71586215", "0.7152206", "0.71307015", "0.71049184", "0.7088999", "0.70759594", "0.7050742", "0.70313215", "0.7025326", "0.70037955", "0.69977516", "0.6993884", "0.69851875", "0.6979498", "0.6958494", "0.6957096", "0.69545877", "0.69463795", "0.691", "0.6908604", "0.6906426", "0.6893684", "0.6875936", "0.6874798", "0.68556815", "0.6846918", "0.68395305", "0.6837517", "0.6835761", "0.68310356", "0.68066186", "0.6803971", "0.67927474", "0.679041", "0.6788923", "0.67772245", "0.67772245", "0.6771092", "0.676039", "0.67577374", "0.67573", "0.67491126", "0.674583", "0.67294544", "0.6726717", "0.67138946", "0.668761", "0.6686776", "0.6679386", "0.666904", "0.6668766", "0.666441", "0.66606635", "0.6653453", "0.66469944", "0.66436356", "0.6625471", "0.662541", "0.66253877", "0.6617975", "0.66129184", "0.6606892", "0.6606752", "0.6604077", "0.6603851", "0.6597832", "0.6586912", "0.6586281", "0.65610594", "0.65583485", "0.6554554", "0.65508324", "0.6550115", "0.6548714", "0.6544257", "0.6529391", "0.6528047", "0.6522439", "0.6517769", "0.6516132", "0.651333", "0.651333", "0.651333", "0.65104324", "0.6507092", "0.6505283", "0.65025985", "0.65010387", "0.64966404" ]
0.6536369
86
Initialize a new FullyConnectedNet.
def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10, dropout=0, use_batchnorm=False, reg=0.0, weight_scale=1e-2, dtype=np.float32, seed=None): self.use_batchnorm = use_batchnorm self.use_dropout = dropout > 0 self.reg = reg self.num_layers = 1 + len(hidden_dims) self.dtype = dtype self.params = {} if type(hidden_dims) != list: raise ValueError('hidden_dim has to be a list') self.L = len(hidden_dims) + 1 self.N = input_dim self.C = num_classes dims = [self.N] + hidden_dims + [self.C] Ws = {'W' + str(i + 1): weight_scale * np.random.randn(dims[i], dims[i + 1]) for i in range(len(dims) - 1)} b = {'b' + str(i + 1): np.zeros(dims[i + 1]) for i in range(len(dims) - 1)} self.params.update(b) self.params.update(Ws) # When using dropout we need to pass a dropout_param dictionary to each # dropout layer so that the layer knows the dropout probability and the mode # (train / test). You can pass the same dropout_param to each dropout layer. # Cast all parameters to the correct datatype for k, v in self.params.iteritems(): self.params[k] = v.astype(dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_network(self):\n raise NotImplementedError", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.fulllayer1 = FullyConnectedLayer(n_input, hidden_layer_size)\n self.reglayer1 = ReLULayer()\n self.fulllayer2 = FullyConnectedLayer(hidden_layer_size, n_output)", "def __init__(self, *args):\n _snap.TModeNet_swiginit(self, _snap.new_TModeNet(*args))", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def __init__(self, in_channels=3, n_classes=21):\n super(U_Net, self).__init__()\n\n self.layer_0 = UNet_Encoder_Particular(in_channels, 64)\n\n self.layer_1 = UNet_Encoder(64, 128)\n self.layer_2 = UNet_Encoder(128, 256)\n self.layer_3 = UNet_Encoder(256, 512)\n self.layer_4 = UNet_Encoder(512, 512)\n\n self.layer_7 = UNet_Decoder(1024, 256)\n self.layer_8 = UNet_Decoder(512, 128)\n self.layer_9 = UNet_Decoder(256, 64)\n self.layer_10 = UNet_Decoder(128, 64)\n\n self.layer_11 = UNet_Decoder_Particular(64, n_classes)", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def __init__(self, backboneNet, projection_head) -> None:\n super(SimCLR, self).__init__()\n self.Net = backboneNet\n self.projection_head = projection_head", "def setup_net(self):\n pass", "def __init__(self, **kwargs):\n #super(Net, self).__init__()\n nn.Module.__init__(self)\n # Build CNN\n module, shapes, optim = build_neuron_network(**kwargs)\n self._configuration = kwargs\n self.add_module('cnn', module)\n self.shapes = shapes\n # Loss and optimization\n self.criterion = nn.MSELoss(reduction='mean')\n self.optimizer = optim\n self._kwargs = kwargs", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def __init__(self):\n super(CustomNetwork, self).__init__()\n self.fc1 = nn.Linear(28*28, 500)\n self.fc2 = nn.Linear(500, 256)\n self.fc3 = nn.Linear(256, 10)\n self.loss = Loss()", "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.input_layer = FullyConnectedLayer(n_input, hidden_layer_size)\n self.relu = ReLULayer()\n self.output_layer = FullyConnectedLayer(hidden_layer_size, n_output)\n self.W_in = None\n self.W_out = None\n self.B_in = None\n self.B_out = None\n # TODO Create necessary layers", "def __init__(self, nclasses, device):\n super(HybridNN, self).__init__(nclasses, device)\n self.data_dev = qml.device(device, wires=self.req_qub_out)\n self.device = device\n self.model_dev = None\n self.nn = None\n self.bias = True", "def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)", "def __init__(self, in_channels=3, n_classes=21):\n super(UpNet, self).__init__()\n\n self.layer_1 = UpNetLayer_ParticularEncoder_2(in_channels, 64, 2)\n self.layer_2 = UpNetLayer_Encoder(64, 128, 2)\n self.layer_3 = UpNetLayer_Encoder(128, 256, 3)\n self.layer_4 = UpNetLayer_Encoder(256, 512, 3)\n self.layer_6 = UpNetLayer_ParticularEncoder(512, 1024, 3)\n\n self.layer_inter = UpNetLayer_Dropout()\n\n self.layer_7 = UpNetLayer_Decoder_Particular(1024, 512, 3)\n self.layer_8 = UpNetLayer_Decoder(512, 256, 3)\n self.layer_9 = UpNetLayer_Decoder(256, 128, 3)\n self.layer_10 = UpNetLayer_Decoder(128, 64, 2)\n self.layer_11 = UpNetLayer_Decoder_Particular_2(64, n_classes, 2)", "def __init__(self):\n super(FcNet, self).__init__()\n\n # get size of some layers\n start_num = 48\n max_num = 200\n mid_num = 50\n end_num = 8\n \n # define regressor\n self.regress = nn.Sequential(\n nn.Linear(start_num,max_num,bias=True),\n nn.Sigmoid(),\n nn.Linear(max_num,mid_num,bias = True),\n nn.Sigmoid(),\n nn.Linear(mid_num,end_num, bias = True),\n nn.Sigmoid()\n )", "def __init__(self, nInputs, nOutputs, hiddenLayersDims, outputActivationFunctions = None, outputActivationDerivatives = None, hiddenActivationFunctions = None,\\\n\t\t\t\t hiddenActivationDerivatives = None): \n\n\t\tself._nInputs = nInputs\n\t\tself._nOutputs = nOutputs\n\n\t\tself._nHiddenLayers, self._nUnitsPerLayer = hiddenLayersDims\n\n\t\tself._outputActivationFunctions = outputActivationFunctions\n\t\tself._outputActivationDerivatives = outputActivationDerivatives\n\n\t\tself._hiddenActivationFunctions = hiddenActivationFunctions\n\t\tself._hiddenActivationDerivatives = hiddenActivationDerivatives\n\n\t\tself.initialiseActivationFunctions()\n\n\t\tself.initialiseNetwork()\n\n\t\tself._nBranches = len(self.collectAllBranches())", "def __init__(self):\n super(SimpleNet, self).__init__()\n\n self.conv_layers = None\n self.fc_layers = None\n self.loss_criterion = None\n\n #######################################################################\n # Student code begins\n #######################################################################\n\n self.conv_layers = nn.Sequential(\n nn.Conv2d(1, 10, kernel_size=5, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3),\n nn.Conv2d(10, 20, kernel_size=5, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3)\n )\n\n conv_out = int(20*5*5)\n\n self.fc_layers = nn.Sequential(\n nn.Linear(conv_out, 100),\n nn.Linear(100, 15)\n )\n\n self.loss_criterion = nn.MSELoss(reduction='mean')\n\n #######################################################################\n # Student code ends\n #######################################################################", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def __init__(self):\n torch.nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-1]) # Remove pool5.\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 36)", "def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]", "def __init__(self, latent_space, input_features):\r\n\r\n self._latent_space = latent_space\r\n self._input_cells = input_features\r\n\r\n self._encoder = None\r\n self._decoder = None\r\n self._autoencoder = None\r\n self._configure_network()", "def __init__(self, num_visible, num_hidden, act_func='logistic'):\n\n print('Initializing network... ', end='')\n sys.stdout.flush()\n\n self.num_visible = num_visible\n self.num_hidden = num_hidden\n \n #self.reconstructed = np.zeros((self.num_examples, self.num_visible))\n\n self.weights = 0.1 * np.random.randn(num_visible, num_hidden)\n self.v_bias = np.zeros((1, num_visible))\n self.h_bias = -4.0 * np.ones((1, num_hidden))\n\n self.w_inc = np.zeros((num_visible, num_hidden))\n self.v_inc = np.zeros((1, num_visible))\n self.h_inc = np.zeros((1, num_hidden))\n\n if act_func == 'chaotic':\n self.act_func = self.chaotic_logistic\n else:\n self.act_func = self.logistic\n\n print('Done!')\n return", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def __init__(self, *args):\n _snap.TCrossNet_swiginit(self, _snap.new_TCrossNet(*args))", "def __init__(self):\r\n torch.nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.features = torchvision.models.vgg19_bn(pretrained=False).features\r\n self.features = torch.nn.Sequential(*list(self.features.children())\r\n [:-1]) # Remove pool5.\r\n # Linear classifier.\r\n self.fc = torch.nn.Linear(512**2, 11)", "def __init__(self):\r\n super(HarrisNet, self).__init__()\r\n\r\n image_gradients_layer = ImageGradientsLayer()\r\n\r\n\r\n # (1) ImageGradientsLayer: Compute image gradients Ix Iy. Can be\r\n # approximated by convolving with sobel filter.\r\n # (2) EigenvalueApproxLayer: Compute S_xx, S_yy and S_xy, the output is\r\n # a tensor of size num_image x 3 x width x height\r\n # (3) CornerResponseLayer: Compute R matrix, the output is a tensor of\r\n # size num_image x 1 x width x height\r\n # (4) NMSLayer: Perform non-maximum suppression, the output is a tensor\r\n # of size num_image x 1 x width x height\r\n\r\n layer_1 = ChannelProductLayer()\r\n layer_2 = SecondMomentMatrixLayer()\r\n layer_3 = CornerResponseLayer()\r\n layer_4 = NMSLayer()\r\n\r\n self.net = nn.Sequential(\r\n image_gradients_layer,\r\n layer_1,\r\n layer_2,\r\n layer_3,\r\n layer_4\r\n )", "def init_target_net(self, sess):\n sess.run(self.init_target_net_op)", "def initialize_network(self):\n if self.trainer is None:\n # -- Initialize from beginning and start training, since no model is provided -- #\n super().initialize_network() # --> This updates the corresponding variables automatically since we inherit this class\n \n # -- Create a Multi Head Generic_UNet from the current network using the provided split and first task name -- #\n # -- Do not rely on self.task for initialization, since the user might provide the wrong task (unintended), -- #\n # -- however for self.plans, the user needs to extract the correct plans_file path by himself using always the -- #\n # -- first task from a list of tasks since the network is build using the plans_file and thus the structure might vary -- #\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n # -- Add the split to the already_trained_on since it is simplified by now -- #\n self.already_trained_on[str(self.fold)]['used_split'] = self.mh_network.split\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n return # Done with initialization\n\n # -- Some sanity checks and loads.. -- #\n # -- Check if the trainer contains plans.pkl file which it should have after sucessfull training -- #\n if 'fold_' in self.trainer.output_folder:\n # -- Remove the addition of fold_X from the output_folder, since the plans.pkl is outside of the fold_X directories -- #\n plans_dir = self.trainer.output_folder.replace('fold_', '')[:-1]\n else:\n # -- If no fold_ in output_folder, everything is fine -- #\n plans_dir = self.trainer.output_folder\n \n assert isfile(join(plans_dir, \"plans.pkl\")), \"Folder with saved model weights must contain a plans.pkl file..\"\n\n # -- Check that the trainer type is as expected -- #\n assert isinstance(self.trainer, (nnUNetTrainerV2, nnUNetTrainerMultiHead)), \"The trainer needs to be nnUNetTrainerV2 or nnUNetTrainerMultiHead..\"\n\n # -- If the trainer is already of Multi Head type, there should also be a pkl file with the sets it has already been trained on ! -- #\n if isinstance(self.trainer, nnUNetTrainerMultiHead): # If model was trained using nnUNetTrainerV2, the pickle file won't exist\n self.already_trained_on = load_json(join(self.trained_on_path, self.extension+'_trained_on.json'))\n \n # -- Load the model and parameters -- #\n # -- NOTE: self.trainer is a Multi Head Network, so it has a model, body and heads. -- #\n print(\"Loading trainer and setting the network for training\")\n self.trainer.load_final_checkpoint(train=True) # Load state_dict of the final model\n\n # -- Set mh_network -- #\n # -- Make it to Multi Head network if it is not already -- #\n # -- Use the first task in tasks_joined_name, since this represents the corresponding task name, whereas self.task -- #\n # -- is the task to train on, which is not equal to the one that will be initialized now using a pre-trained network -- #\n # -- (prev_trainer). -- #\n if isinstance(self.trainer, nnUNetTrainerV2):\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.trainer.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n else: # Already Multi Head type\n self.mh_network = self.trainer#.mh_network\n # -- Ensure that the split that has been previously used and the current one are equal -- #\n # -- NOTE: Do this after initialization, since the splits might be different before but still lead to the same level after -- #\n # -- simplification. -- #\n prev_split = self.already_trained_on[str(self.fold)]['used_split']\n assert self.mh_network.split == prev_split,\\\n \"To continue training on the fold {} the same split, ie. \\'{}\\' needs to be provided, not \\'{}\\'.\".format(self.fold, self.mh_network.split, prev_split)\n # -- Delete the prev_split --> not necessary anymore -- #\n del prev_split\n \n # -- Set self.network to the model in mh_network --> otherwise the network is not initialized and not in right type -- #\n self.network = self.mh_network.model", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def __init__(self, *args):\n _snap.TMMNet_swiginit(self, _snap.new_TMMNet(*args))", "def __init__(self, model_config):\n # Training Parameters\n self.__learning_rate = model_config[\"cnnLearningRate\"]\n\n # Network Parameters\n self.__num_classes = model_config[\"numClasses\"]\n self.__weight_decay = 1e-4\n self.__num_gpus = model_config[\"numGpus\"]\n self.__use_csnn = model_config[\"useCsnn\"]\n\n self.__csnn = Csnn(model_config)", "def __init__(self, vgg_net):\n super().__init__()\n # create a conv layer that corresponds to the first linear layer\n linear1 = vgg_net.classifier[0]\n conv = nn.Conv2d(512, 4096, 7, 7)\n\n # copy data into it\n conv.bias.data.copy_(linear1.bias.data)\n conv.weight.data.view(4096, -1).copy_(linear1.weight.data)\n\n # replace the layer in the sequential classifier part\n vgg_net.classifier = nn.Sequential(\n conv, nn.Flatten(1), *vgg_net.classifier[1:]\n )\n\n self.vgg_net = vgg_net", "def __init__(self, *args):\n _snap.TNEANet_swiginit(self, _snap.new_TNEANet(*args))", "def __init__(self, network: Network):\n self.graph = network.graph", "def __init__(self, net, batch=False):\n\n super().__init__()\n self.batch = batch\n self.net = net\n self.input = Input(self.net.layers()[0],\n self.net.layers()[1].inputSize(), batch)\n self.hiddens = []\n for i in range(1, len(net.layers())-1):\n nextLen = net.layers()[i+1].inputSize()\n self.hiddens.append(Hidden(net.layers()[i], nextLen, batch))\n self.output = Output(self.net.layers()[-1])", "def test_init_net_simple(self):\n net = ecn.NeuralNet(2, (2,), 1)\n self.assertEqual(2, len(net.weights.keys()))\n self.assertEqual((2, 3), np.shape(net.weights['h0']))\n self.assertEqual((1, 3), np.shape(net.weights['y']))\n print('Finished testing simple neural net init\\n')", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n conv1_size=5, conv1_n_chan=32, conv1_n_pool=2,\n conv2_size=5, conv2_n_chan=64, conv2_n_pool=2,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.conv1_size = conv1_size\n self.conv1_n_chan = conv1_n_chan\n self.conv1_n_pool = conv1_n_pool\n self.conv2_size = conv2_size\n self.conv2_n_chan = conv2_n_chan\n self.conv2_n_pool = conv2_n_pool\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.conv1_size = net_architecture['conv1_size']\n self.conv1_n_chan = net_architecture['conv1_n_chan']\n self.conv1_n_pool = net_architecture['conv1_n_pool']\n self.conv2_size = net_architecture['conv2_size']\n self.conv2_n_chan = net_architecture['conv2_n_chan']\n self.conv2_n_pool = net_architecture['conv2_n_pool']\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Convert input image to tensor with channel as last dimension\n # x_image = [-1 x im-height x im-width x n-input-channels]\n x_image_temp = tf.reshape(self.x, [-1,\n self.n_input_channels,self.y_res,self.x_res])\n x_image = tf.transpose(x_image_temp, [0,2,3,1])\n\n #########################################################\n # Set up convolutional layer 1\n # W = [im-height x im-width x n-input-channels x n-output-channels])\n self.conv1_shape = [self.conv1_size, self.conv1_size,\n self.n_input_channels, self.conv1_n_chan]\n self.W_conv1 = tf.Variable( tf.truncated_normal(\n shape=self.conv1_shape, stddev=0.1))\n self.b_conv1 = tf.Variable( tf.constant(0.1,\n shape=[self.conv1_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv1_lin = tf.nn.conv2d( x_image, self.W_conv1,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv1_relu = tf.nn.relu( self.conv1_lin + self.b_conv1 )\n\n # Max pooling\n self.conv1_kernel = [1, self.conv1_n_pool, self.conv1_n_pool, 1]\n self.conv1_pool = tf.nn.max_pool( self.conv1_relu,\n ksize=self.conv1_kernel, strides=self.conv1_kernel, padding='SAME')\n\n #########################################################\n # Convolutional layer 2\n self.conv2_shape = [self.conv2_size, self.conv2_size,\n self.conv1_n_chan, self.conv2_n_chan]\n self.W_conv2 = tf.Variable( tf.truncated_normal(\n shape=self.conv2_shape, stddev=0.1 ) )\n self.b_conv2 = tf.Variable( tf.constant(0.1,\n shape=[self.conv2_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv2_lin = tf.nn.conv2d( self.conv1_pool, self.W_conv2,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv2_relu = tf.nn.relu( self.conv2_lin + self.b_conv2 )\n\n # Max pooling\n self.conv2_kernel = [1, self.conv2_n_pool, self.conv2_n_pool, 1]\n self.conv2_pool = tf.nn.max_pool( self.conv2_relu,\n ksize=self.conv2_kernel, strides=self.conv2_kernel, padding='SAME')\n\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = [self.fc1_y_size * self.fc1_x_size * self.conv2_n_chan,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Flatten output from conv2\n self.conv2_pool_flat = tf.reshape(\n self.conv2_pool, [-1, self.fc1_shape[0]] )\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.conv2_pool_flat,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet_1, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 1024, 3)\n self.layer_6 = SegnetLayer_Encoder(1024, 1024, 3)\n\n self.layer_7 = SegnetLayer_Decoder(1024, 1024, 3)\n self.layer_8 = SegnetLayer_Decoder(1024, 512, 3)\n self.layer_9 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_10 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_11 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_12 = SegnetLayer_Decoder(64, n_classes, 2)", "def init_efficientnet(num_classes: int) -> nn.Module:\n\n return EfficientNet.from_pretrained('efficientnet-b1', num_classes=num_classes)", "def __init__( self, config: 'bittensor.config' = None ):\n if config == None: config = neuron.config()\n self.config = config; neuron.check_config( self.config ); print ( self.config )\n bittensor.logging (\n config = self.config,\n logging_dir = self.config.neuron.full_path,\n )\n self.device = torch.device(\n device = self.config.neuron.device\n )\n self.wallet = bittensor.wallet(\n config = self.config\n )\n self.dendrite = bittensor.dendrite(\n config = self.config,\n wallet = self.wallet\n )\n self.subtensor = bittensor.subtensor(\n config = self.config\n )\n self.metagraph = bittensor.metagraph(\n config = self.config\n )\n self.axon = bittensor.axon (\n config = self.config,\n wallet = self.wallet,\n forward_callback = self.forward,\n backward_callback = self.backward\n )\n self.dataset = bittensor.dataloader (\n config = self.config\n )\n self.router = SGMOERouter(\n config = self.config\n ).to( self.device )\n self.nucleus = GPT2Nucleus(\n config = self.config,\n routing_callback = self.route\n ).to( self.device )\n self.optimizer = torch.optim.SGD(\n [\n {\"params\": self.router.parameters()},\n {\"params\": self.nucleus.parameters()}\n ],\n lr = self.config.neuron.learning_rate,\n weight_decay = self.config.neuron.weight_decay,\n )\n self.tensorboard = SummaryWriter(\n log_dir = self.config.neuron.tensorboard_dir\n )\n self.mechanism_weights = torch.ones( [0] )\n self.epoch = 0\n self.global_step = 0\n self.epoch_loss = math.inf/2\n self.best_epoch_loss = math.inf", "def test_init(self):\n network = PerceptronNetwork(\n [\n PerceptronLayer.blank(4, 2, 'layer1', ['a', 'b', 'c', 'd']),\n PerceptronLayer.blank(2, 2, 'layer2', ['a', 'b', 'c', 'd'])\n ]\n )\n self.assertIsNotNone(network)", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def __init__(self, functions=None, variables=None, global_resource=None):\n self.ssa = NetworkEnsemble()\n if functions is None:\n self.ssa.functions = dict()\n else:\n self.ssa.functions = functions\n if variables is None:\n self.ssa.variables = dict()\n else:\n self.ssa.variables = variables\n if global_resource is None:\n self.ssa.global_resource = dict()\n else:\n self.ssa.global_resource = global_resource", "def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.x,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def __init__(self, *args):\n _snap.TMMNetModeNetI_swiginit(self, _snap.new_TMMNetModeNetI(*args))", "def __init__(self, classes=2622):\n super().__init__()\n self.conv1 = _ConvBlock(3, 64, 64)\n self.conv2 = _ConvBlock(64, 128, 128)\n self.conv3 = _ConvBlock(128, 256, 256, 256)\n self.conv4 = _ConvBlock(256, 512, 512, 512)\n self.conv5 = _ConvBlock(512, 512, 512, 512)\n self.dropout = torch.nn.Dropout(0.5)\n self.fc1 = torch.nn.Linear(7 * 7 * 512, 4096)\n self.fc2 = torch.nn.Linear(4096, 4096)\n self.fc3 = torch.nn.Linear(4096, classes)", "def __init__(self, resnet, num_classes):\n super(FineTune, self).__init__()\n\n # Everything except the last linear layer\n self.features = nn.Sequential(*list(resnet.children())[:-1])\n num_ftrs = resnet.fc.in_features\n self.classifier = nn.Sequential(\n nn.Linear(num_ftrs, num_classes)\n )\n\n # # Freeze those weights\n # for param in self.features.parameters():\n # param.requires_grad = False", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)", "def __init__(self, *args):\n _snap.TModeNetNodeI_swiginit(self, _snap.new_TModeNetNodeI(*args))", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])", "def __init__(self):\n super(CNet, self).__init__()\n\n self.init_param_range = (-0.08, 0.08)\n\n ## Lookup tables for the state, action and previous action.\n self.action_lookup = nn.Embedding(3, 128)\n\n # self.state_dict_lookup = nn.Embedding(48, 128)\n self.own_c_lookup = nn.Embedding(129, 128)\n self.own_s_lookup = nn.Embedding(129, 128)\n\n self.th_1_lookup = nn.Embedding(115, 128)\n self.th_2_lookup = nn.Embedding(115, 128)\n self.th_3_lookup = nn.Embedding(115, 128)\n\n self.f_1_lookup = nn.Embedding(96, 128)\n self.f_2_lookup = nn.Embedding(96, 128)\n self.f_3_lookup = nn.Embedding(96, 128)\n self.f_4_lookup = nn.Embedding(96, 128)\n\n self.bu_msg_lookup = nn.Embedding(5, 128)\n\n # self.state_tensor_lookup = nn.Embedding(48, 128)\n self.i_t_lookup = nn.Embedding(24, 128)\n self.lives_lookup = nn.Embedding(10, 128)\n\n self.prev_action_lookup = nn.Embedding(91, 128)\n\n # RNN to approximate the agent’s action-observation history.\n self.rnn = nn.GRU(input_size=128, hidden_size=128, num_layers=2)\n\n # 2 layer MLP with batch normalization, for producing output from RNN top layer.\n self.output = nn.Sequential(\n nn.Linear(128, 128),\n # nn.BatchNorm1d(128),\n nn.ReLU(),\n nn.Linear(128, 90)\n )", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_dropout=1.0, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Set up dropout option for inputs\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.x_drop = tf.nn.dropout(self.x, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.x_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def __init__(self):\n super(enc_clf, self).__init__()\n\n self.fc1 = nn.Linear(784, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 512)\n self.fc4 = nn.Linear(512, 10)", "def init_model(self):\n cxnlib.CXNNetInitModel(self.handle)", "def __init__(self, module: torch.nn.Module, loss: torch.nn.Module,\n input_node_name='0',\n output_node_name='output', label_node_name='label',\n loss_node_name='loss',\n events: List[d5.ExecutorEvent] = [],\n device: d5.DeviceType = None, with_outputs = False):\n # Do not call super() here!\n self.network = PyTorchNativeNetwork(module)\n self.devname = 'cuda' if device is None or device.is_gpu() else 'cpu'\n self.events = events\n self.model = module.to(self.devname)\n self.is_training = True\n self.loss = loss.to(self.devname) if loss is not None else None\n self.innode = input_node_name\n self.outnode = output_node_name\n self.labelnode = label_node_name\n self.lossnode = loss_node_name\n self.with_outputs = with_outputs", "def __init__(self, mode, cfg):\n super(DMCM, self).__init__()\n\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n\n # Matrix network does not need weight initialization because there can\n # be no vanishing gradients.\n self.conv_net.apply(_init_weights_xavier)", "def set_network(self, pair_blocks=1, base_channels=512, layers=5):\n\n # store architecture\n self.pair_blocks = pair_blocks\n self.base_channels = base_channels\n self.layers = layers\n\n self.net = Network(pair_blocks, base_channels, layers, self.device)\n self.train_loader.index = 0\n\n self._loaded = False\n self.time_stamp_path = None", "def __init__(self, in_channels=3, in_channels1=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_11 = SegnetLayer_Encoder(in_channels1, 64, 2)\n self.layer_12 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_13 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_14 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_15 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_16 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_17 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_18 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_19 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_110 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_1110 = UNet_Decoder_Particular(n_classes * 2, n_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def _build_network(self):\n pass", "def __init__(self, neuron_count):\n # The current state of the thermal network.\n self.current_state = [0.0] * neuron_count\n\n # The weights.\n self.weights = np.zeros( [neuron_count*neuron_count] )\n\n # The neuron count.\n self.neuron_count = neuron_count", "def __init__(self):\n self.topology = None\n self.learningRate = None\n self.momentum = None\n self.name = None\n self.size = None\n #self._hiddenActiv_fun_key = None\n #self._outActiv_fun_key = None\n #self.output_activation = None\n #self.hidden_activation = None", "def __init__(self):\n super().__init__()\n \n # convolutional layers\n self.conv1 = nn.Conv2d(1, 16, kernel_size=3) # 16x(14-2)x(14-2) = 16x12x12\n self.conv2 = nn.Conv2d(16, 32, kernel_size=3) # 32x10x10 => pooling = 32x5x5\n \n # fully connected layers\n self.fc1 = nn.Linear(32 * 5 * 5, 64)\n self.fc2 = nn.Linear(64, 10)\n self.fc3 = nn.Linear(20, 10)\n self.fc4 = nn.Linear(10, 1)\n \n # regularizers\n self.drop = nn.Dropout(0.1)\n self.drop2d = nn.Dropout2d(0.1)\n self.pool = nn.MaxPool2d(kernel_size=2)\n self.bn2d = nn.BatchNorm2d(16, affine=False)\n self.bn = nn.BatchNorm1d(64, affine=False)\n\n # activation functions\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n \n # Initialize weights\n self.apply(self.weights_init)", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def __init__(self):\r\n # A dummy layer does nothing\r\n self.weights = np.zeros(shape=(input.shape[1], 10))\r\n bias = np.zeros(shape=(10,))\r\n pass", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def __init__(self, c):\n super(DeepConv, self).__init__(self)\n\n self.block_1 = BaseBlock(c, 16)\n self.block_2 = BaseBlock(16, 32)\n self.block_3 = BaseBlock(32, 32)\n\n self._body = nn.Sequential(self.block_1, self.block_2, self.block_3)", "def __init__(self):\n self.layers = []\n self.best_loss = None", "def trainNet():", "def __init__(\n self, state_dim, action_dim, sizes, activations, use_batch_norm: bool = False\n ) -> None:\n super().__init__()\n assert state_dim > 0, \"state_dim must be > 0, got {}\".format(state_dim)\n assert action_dim > 0, \"action_dim must be > 0, got {}\".format(action_dim)\n self.state_dim = state_dim\n self.action_dim = action_dim\n assert len(sizes) == len(\n activations\n ), \"The numbers of sizes and activations must match; got {} vs {}\".format(\n len(sizes), len(activations)\n )\n\n # The last layer gives the concentration of the distribution.\n self.fc = FullyConnectedNetwork(\n [state_dim] + sizes + [action_dim],\n activations + [\"linear\"],\n use_batch_norm=use_batch_norm,\n )", "def __init__(self,\n n_occupancy: int = 3,\n n_neighbor_sites_list: int = 19,\n n_permutation_list: int = 6,\n n_task: int = 1,\n dropout_rate: float = 0.4,\n n_conv: int = 2,\n n_features: int = 44,\n sitewise_n_feature: int = 25,\n **kwargs):\n\n def init_weights(m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n\n model = LCNN(n_occupancy, n_neighbor_sites_list, n_permutation_list,\n n_task, dropout_rate, n_conv, n_features,\n sitewise_n_feature)\n model.apply(init_weights)\n loss = L2Loss()\n output_types = ['prediction']\n super(LCNNModel, self).__init__(model,\n loss=loss,\n output_types=output_types,\n **kwargs)", "def __init__(self, N_sym, n_nodes, activations, N_element, bias = True, scaling = None):\n super(MultiLayerNet, self).__init__()\n N_layers = len(n_nodes)\n if N_layers == 0:\n self.net = torch.nn.Linear(N_sym, N_element, bias = bias)\n else:\n layers = []\n for n in range(N_layers):\n if n == 0:\n layers += [torch.nn.Linear(N_sym, n_nodes[n], bias = bias)]\n layers += [activations[n]]\n else:\n layers += [torch.nn.Linear(n_nodes[n-1], n_nodes[n], bias = bias)]\n layers += [activations[n]]\n layers += [torch.nn.Linear(n_nodes[-1], N_element, bias = bias)]\n self.net = torch.nn.Sequential(*layers)\n \n self.scaling = scaling", "def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,\n weight_scale=1e-3, reg=0.0):\n self.params = {}\n self.reg = reg\n\n ############################################################################\n # TODO: Initialize the weights and biases of the two-layer net. Weights #\n # should be initialized from a Gaussian centered at 0.0 with #\n # standard deviation equal to weight_scale, and biases should be #\n # initialized to zero. All weights and biases should be stored in the #\n # dictionary self.params, with first layer weights #\n # and biases using the keys 'W1' and 'b1' and second layer #\n # weights and biases using the keys 'W2' and 'b2'. #\n ############################################################################\n self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)\n self.params['b2'] = np.zeros(num_classes)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################", "def __init__(self, *args):\n _snap.TNEGraph_swiginit(self, _snap.new_TNEGraph(*args))", "def test_ctor(self):\r\n # the network model itself\r\n model = densenet.DenseNet(\r\n depth=40,\r\n Block=densenet.BasicBlock,\r\n growth_rate=12,\r\n compression_rate=1.0,\r\n mask=True,\r\n num_classes=100,\r\n )\r\n num_params = model_utils.get_model_num_params(model)\r\n\r\n self.assertAlmostEqual(num_params, 1.06, places=1) # around 1.7\r\n self.assertEqual(model_utils.get_num_conv2d_layers(model), 40)", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "def __init__(self,\n image_channels,\n num_classes):\n super().__init__()\n\n self.model = torchvision.models.resnet18(pretrained=True)\n self.model.fully_connected = nn.Linear(224, 10)", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def __init__(self, settings):\n super(CaffeNet, self).__init__(settings)\n\n self._range_scale = 1.0 # not needed; image already in [0,255]\n\n \n #ULF[todo]: explain, make this a setting\n self._net_channel_swap = (2,1,0)\n #self._net_channel_swap = None\n if self._net_channel_swap:\n self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])\n else:\n self._net_channel_swap_inv = None\n\n\n # (1) import caffe library\n #\n sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))\n import caffe\n print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__\n\n # Check if the imported caffe provides all required functions\n self._check_caffe_version(caffe)\n \n # Set the mode to CPU or GPU.\n # Note: in the latest Caffe versions, there is one Caffe object\n # *per thread*, so the mode must be set per thread!\n # Here we set the mode for the main thread; it is also separately\n # set in CaffeProcThread.\n if settings.caffevis_mode_gpu:\n caffe.set_mode_gpu()\n print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'\n else:\n caffe.set_mode_cpu()\n print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'\n print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'\n\n\n # (2) load the caffe model\n # \n # ULF[hack]: make Caffe silent - there should be a better\n # (i.e. official) way to do so. We only want to suppress\n # the info (like network topology) while still seeing warnings\n # and errors!\n suppress_output = (hasattr(self.settings, 'caffe_init_silent')\n and self.settings.caffe_init_silent)\n\n if suppress_output:\n # open 2 file descriptors\n null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]\n # save the current file descriptors to a tuple\n original_fds = os.dup(1), os.dup(2)\n # put /dev/null fds on stdout (1) and stderr (2)\n os.dup2(null_fds[0], 1)\n os.dup2(null_fds[1], 2)\n\n self.net = caffe.Classifier(\n settings.caffevis_deploy_prototxt,\n settings.caffevis_network_weights,\n mean = None, # Set to None for now, assign later # self._data_mean,\n channel_swap = self._net_channel_swap,\n raw_scale = self._range_scale,\n )\n \n if suppress_output:\n # restore file original descriptors for stdout (1) and stderr (2)\n os.dup2(original_fds[0], 1)\n os.dup2(original_fds[1], 2)\n # close the temporary file descriptors\n os.close(null_fds[0])\n os.close(null_fds[1])\n print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'\n\n self._init_data_mean()\n self._check_force_backward_true()", "def __init__(self, *args):\n _snap.TDirNet_swiginit(self, _snap.new_TDirNet(*args))", "def initialize_network_los() -> bool:\n return True", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def initialize_networkHandler(self):\n\t\tself.networkHandler = NetworkHandler(\n\t\t\tself.callbackQueue,\n\t\t\tself.received_order,\n\t\t\tself.set_light_callback,\n\t\t\tself.newOrderQueue,\n\t\t\tself.startedOrderQueue,\n\t\t\tself.lost_connection\n\t\t\t)", "def __init__(self, net, batch):\n self.net = net\n self.train_batch_is(batch)\n self.image_height = len(batch.image_array[0][0])\n self.image_width = len(batch.image_array[0][0][0])\n self.net.reset_forward()", "def __init__(self, name, config):\n super(RelationalNetwork, self).__init__(name, RelationalNetwork, config)\n\n # Get key mappings.\n self.key_feature_maps = self.stream_keys[\"feature_maps\"]\n self.key_question_encodings = self.stream_keys[\"question_encodings\"]\n self.key_outputs = self.stream_keys[\"outputs\"]\n\n # Retrieve input sizes from globals.\n self.feature_maps_height = self.globals[\"feature_maps_height\"]\n self.feature_maps_width = self.globals[\"feature_maps_width\"]\n self.feature_maps_depth = self.globals[\"feature_maps_depth\"]\n self.question_encoding_size = self.globals[\"question_encoding_size\"]\n \n # Create \"object\" coordinates.\n self.obj_coords = []\n for h in range(self.feature_maps_height):\n for w in range(self.feature_maps_width):\n self.obj_coords.append((h,w))\n\n # Calculate input size to the g_theta: two \"objects\" + question (+ optionally: image size)\n input_size = 2 * self.feature_maps_depth + self.question_encoding_size\n\n # Create the module list.\n modules = []\n\n # Retrieve dropout rate value - if set, will put dropout between every layer.\n dropout_rate = self.config[\"dropout_rate\"]\n\n # Create the model, i.e. the \"relational\" g_theta network.\n g_theta_sizes = self.config[\"g_theta_sizes\"]\n if type(g_theta_sizes) == list and len(g_theta_sizes) > 1:\n # First input dim.\n input_dim = input_size\n for hidden_dim in g_theta_sizes:\n # Add linear layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n # Add activation and dropout.\n modules.append( torch.nn.ReLU() )\n if (dropout_rate > 0):\n modules.append( torch.nn.Dropout(dropout_rate) )\n # Remember input dim of next layer.\n input_dim = hidden_dim\n\n # Add output layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n\n self.logger.info(\"Created g_theta network with {} layers\".format(len(g_theta_sizes)+1))\n\n else:\n raise ConfigurationError(\"'g_theta_sizes' must contain a list with numbers of neurons in g_theta layers (currently {})\".format(self.hidden_sizes))\n\n # Export output_size to globals.\n self.output_size = g_theta_sizes[-1]\n self.globals[\"output_size\"] = self.output_size\n\n # Finally create the sequential model out of those modules.\n self.g_theta = torch.nn.Sequential(*modules)", "def __init__(self, nx, nodes):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) is not int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n # weights vector for the hidden layer\n # default mean is 0\n # default stddev is 1\n self.__W1 = np.random.normal(size=(nodes, nx))\n # The bias for the hidden layer. Upon instantiation,\n # it should be initialized with 0’s.\n self.__b1 = np.zeros((nodes, 1))\n # The activated output for the hidden layer. Upon instantiation,\n # it should be initialized to 0\n self.__A1 = 0\n # weights vector for the output neuron\n # default mean is 0\n # default stddev is 1\n self.__W2 = np.random.normal(size=(1, nodes))\n # bias for the output neuron\n self.__b2 = 0\n # activated output for the output neuron (prediction)\n self.__A2 = 0", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]", "def __init__(self, in_channels=3):\n super().__init__()\n model_list = nn.ModuleList()\n model_list.append(\n ConvBlock(in_channels, 64, leaky=True, instance_norm=False, bias=True))\n model_list.append(ConvBlock(64, 128, leaky=True,\n instance_norm=True, bias=False))\n model_list.append(ConvBlock(128, 256, leaky=True,\n instance_norm=True, bias=False))\n model_list.append(ConvBlock(256, 512, leaky=True,\n instance_norm=True, bias=False, stride=1))\n model_list.append(nn.Conv2d(512, 1, kernel_size=4,\n stride=1, padding=1, bias=True))\n self.model = nn.Sequential(*model_list)\n\n self._initialize_params()" ]
[ "0.69023705", "0.6850007", "0.6721665", "0.65706", "0.6532473", "0.64673805", "0.64505595", "0.64458954", "0.6434368", "0.64332235", "0.64150655", "0.6372491", "0.636863", "0.6341182", "0.6330495", "0.63025963", "0.6276662", "0.62390316", "0.6235152", "0.61963755", "0.61900246", "0.61785716", "0.6175203", "0.61713195", "0.6170169", "0.615478", "0.614945", "0.61450905", "0.61366504", "0.61155593", "0.6106042", "0.61050206", "0.610115", "0.6089653", "0.60848165", "0.60667264", "0.6065172", "0.6061645", "0.60612935", "0.60600924", "0.6053745", "0.6049151", "0.6041936", "0.6038751", "0.60334927", "0.6027372", "0.6026258", "0.602363", "0.6019186", "0.6014522", "0.60068935", "0.59956425", "0.59808713", "0.5972463", "0.59662366", "0.5958748", "0.5956478", "0.5951118", "0.59432024", "0.59353065", "0.59336025", "0.59329677", "0.59303707", "0.5929328", "0.59269255", "0.5915176", "0.5906102", "0.59045327", "0.5903651", "0.5902044", "0.58976835", "0.5888555", "0.5886091", "0.5883432", "0.58750266", "0.5859851", "0.585297", "0.58522326", "0.5840696", "0.5839813", "0.58355284", "0.5835302", "0.5831391", "0.581727", "0.58137083", "0.5809756", "0.5804382", "0.5799944", "0.57997555", "0.57984453", "0.5796514", "0.57909", "0.57896507", "0.57896507", "0.57896507", "0.5785398", "0.57821995", "0.57678145", "0.5767623", "0.57550913", "0.57536525" ]
0.0
-1
Compute loss and gradient for the fullyconnected net.
def loss(self, X, y=None): X = X.astype(self.dtype) mode = 'test' if y is None else 'train' # We are gonna store everythin in a dictionnary hidden hidden = {} hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:])) for i in range(self.L): idx = i + 1 # Naming of the variable w = self.params['W' + str(idx)] b = self.params['b' + str(idx)] h = hidden['h' + str(idx - 1)] # Computing of the forward pass. # Special case of the last layer (output) if idx == self.L: h, cache_h = affine_forward(h, w, b) hidden['h' + str(idx)] = h hidden['cache_h' + str(idx)] = cache_h # For all other layers else: h, cache_h = affine_relu_forward(h, w, b) hidden['h' + str(idx)] = h hidden['cache_h' + str(idx)] = cache_h scores = hidden['h' + str(self.L)] # If test mode return early if mode == 'test': return scores # Computing of the loss data_loss, dscores = softmax_loss(scores, y) reg_loss = 0 for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']: reg_loss += 0.5 * self.reg * np.sum(w * w) loss = data_loss + reg_loss # Backward pass hidden['dh' + str(self.L)] = dscores for i in range(self.L)[::-1]: idx = i + 1 dh = hidden['dh' + str(idx)] h_cache = hidden['cache_h' + str(idx)] if idx == self.L: dh, dw, db = affine_backward(dh, h_cache) hidden['dh' + str(idx - 1)] = dh hidden['dW' + str(idx)] = dw hidden['db' + str(idx)] = db else: dh, dw, db = affine_relu_backward(dh, h_cache) hidden['dh' + str(idx - 1)] = dh hidden['dW' + str(idx)] = dw hidden['db' + str(idx)] = db # w gradients where we add the regulariation term list_dw = {key[1:]: val + self.reg * self.params[key[1:]] for key, val in hidden.iteritems() if key[:2] == 'dW'} # Paramerters b list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'} # Parameters gamma list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'} # Paramters beta list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'} grads = {} grads.update(list_dw) grads.update(list_db) grads.update(list_dgamma) grads.update(list_dbeta) return loss, grads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def compute_loss(self):", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()", "def loss(self, X, y=None):\r\n\r\n # Findout if it's trainig or test time\r\n mode = 'train'\r\n if y is None:\r\n mode = 'test'\r\n\r\n # Set the mode for batch normalization and dropout parameters if needed.\r\n if self.use_batch_norm:\r\n for bn_param in self.bn_params:\r\n bn_param['mode'] = mode\r\n if self.use_dropout:\r\n self.dropout_params['mode'] = mode\r\n\r\n # Compute the forward pass fo the cnn.\r\n caches = []\r\n input_layer = X\r\n for i in range(1, self.num_conv_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = conv_bn_relu_pool_forward(input_layer, w, b, gamma, beta,\r\n self.conv_params, self.bn_params[i-1], \r\n self.pool_params)\r\n else:\r\n layer_score, layer_cache = conv_relu_pool_forward(input_layer, w, b, self.conv_params, \r\n self.pool_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the fully connected net.\r\n num_layers = self.num_conv_layers + self.num_hidden_layers\r\n for i in range(self.num_conv_layers+1, num_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = affine_bn_relu_forward(input_layer, w, b, gamma, beta,\r\n self.bn_params[i-1],\r\n dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n else:\r\n layer_score, layer_cache = affine_relu_forward(input_layer, w, b, dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the output layer.\r\n w = self.params['W{}'.format(i+1)]\r\n b = self.params['b{}'.format(i+1)]\r\n scores, output_cache = affine_forward(input_layer, w, b)\r\n\r\n # If testing time return the scores\r\n if mode == 'test':\r\n return scores\r\n\r\n # Compute the loss\r\n loss, dscores = softmax_loss(scores, y)\r\n\r\n # Add regularization to the loss and the corresponding gradient.\r\n grads = {}\r\n for i in range(1, num_layers+2):\r\n w = 'W{}'.format(i)\r\n loss += 0.5 * self.reg * np.sum(self.params[w]**2)\r\n grads[w] = self.reg * self.params[w]\r\n\r\n # Compute the gradients using backprop on the fully connected net.\r\n # Start with the output layer\r\n w = 'W{}'.format(num_layers+1)\r\n b = 'b{}'.format(num_layers+1)\r\n dx, dw, db = affine_backward(dscores, output_cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n for i in range(num_layers, self.num_conv_layers, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = affine_bn_relu_backward(dx, cache, self.use_dropout)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = affine_relu_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n # Compute the gradeints using backprop on the convolutional layers.\r\n for i in range(self.num_conv_layers, 0, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = conv_bn_relu_pool_backward(dx, cache)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = conv_relu_pool_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n return loss, grads", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def compute_gradients_and_update(batch_y0, batch_yN):\n with tf.GradientTape() as g:\n pred_y = node_network(tb, batch_y0)\n loss = tf.reduce_mean(tf.abs(pred_y - batch_yN))\n grads = g.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n return loss", "def compute_gradients(self):\n wlist = self._neural_net.weights()\n blist = self._neural_net.biases()\n\n nmatrices = len(wlist)\n weight_grad = []\n bias_grad = []\n\n cost_function = self._cost_function\n weight_der = WeightDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n biase_der = BiasDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n for layer in range(nmatrices):\n weight_grad.append(np.zeros(wlist[layer].shape))\n bias_grad.append(np.zeros(blist[layer].shape))\n\n rows, cols = wlist[layer].shape\n for i in range(rows):\n for j in range(cols):\n loc = ParameterLocation(layer=layer, row=i, column=j)\n weight_grad[layer][i][j] = weight_der.partial_derivative(loc)\n\n for row in range(rows):\n loc = ParameterLocation(layer=layer, row=row, column=0)\n bias_grad[layer][row] = biase_der.partial_derivative(loc)\n\n return weight_grad, bias_grad", "def loss_gradient(self, x, y):\n x_preproc = self._apply_processing(x)\n x_defences, y_defences = self._apply_defences(x_preproc, y, fit=False)\n\n # Adjust the shape of y for loss functions that do not take labels in one-hot encoding\n if self._reduce_labels:\n y_defences = np.argmax(y_defences, axis=1)\n\n grads = self._loss_grads([x_defences, y_defences])[0]\n grads = self._apply_defences_gradient(x_preproc, grads)\n grads = self._apply_processing_gradient(grads)\n assert grads.shape == x_preproc.shape\n\n return grads", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize", "def train(self) -> None:\n for _ in range(self.epochs):\n for x, y in zip(self.x_train, self.y_train):\n\n weights_gradient = [\n None for weight in self.weights\n ] # Initializing weight gradients for each layer which are going to be used to update the weights in the network.\n\n biases_gradient = [\n None for bias in self.biases\n ] # Initializing bias gradients for each layer which are going to be used to update the biases in the network.\n\n activation = np.expand_dims(x, axis=1)\n activations = [\n activation\n ] # A list for storing all the activations when doing forward propagation\n\n values = (\n []\n ) # A list for storing weight * x + bias values without applying the activation function.\n\n for weight, bias in zip(self.weights, self.biases):\n value = np.dot(weight, activation) + bias\n values.append(value)\n\n activation = self.sigmoid(value)\n activations.append(activation)\n\n \"\"\"\n Calculating the error delta from output layer to be propagated backwards in the network. It is calculated\n by taking the derivative of the loss function, which in our case is MSE, and multiply with derivate of\n the sigmoid function applied on the value that entered the last layer of the network.\n \"\"\"\n\n error_delta = (activations[-1] - y) * self.sigmoid_derivative(\n values[-1]\n )\n\n weights_gradient[-1] = np.dot(\n error_delta, activations[-2].T\n ) # Setting error delta multiplied with the second last layer activations as weight gradient for last layer.\n\n biases_gradient[-1] = error_delta # Setting error delta as bias gradient for last layer.\n\n \"\"\"\n This for-loop does the same as the code from line 128 - 136, but for each layer in the network.\n Thus, the error is propagated backwards in the network, and the gradients for each layer are set.\n \"\"\"\n for layer in range(2, self.total_layers):\n error_delta = np.dot(\n self.weights[-layer + 1].T, error_delta\n ) * self.sigmoid_derivative(values[-layer])\n\n weights_gradient[-layer] = np.dot(\n error_delta, activations[-layer - 1].T\n )\n\n biases_gradient[-layer] = error_delta\n\n self.weights = [\n weight - self.lr * weight_gradient\n for weight, weight_gradient in zip(self.weights, weights_gradient)\n ] # Updating the weights of the network by w_i - learning_rate * nabla w_i (w_i is the weight matrix at layer i, and nabla w_i is weight gradient.)\n\n self.biases = [\n bias - self.lr * bias_gradient\n for bias, bias_gradient in zip(self.biases, biases_gradient)\n ] # Updating the biases of the network by b_i - learning_rate * nabla b_i (b_i is the bias vector at layer i, and nabla b_i is weight gradient.)", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def checkBatchGradient():\n\n from mynnet import InputLayer\n\n n,b,d,o = (1, 4, 3, 7) # sequence length, batch size, hidden size, output size\n input_size = 10\n \n lstm = create_cell(input_size, (n,b,d,o))\n\n X = np.random.randn(n,b,input_size)\n c0 = np.random.randn(b,d)\n \n print \"c0:\", c0\n\n # batch forward backward\n H, Ct = lstm.forward(X, c0)\n wrand = np.random.randn(*H.shape)\n loss = np.sum(H * wrand) # weighted sum is a nice hash to use I think\n dH = wrand\n dX, dW, dV, dc0 = lstm.backward(dH)\n\n def fwd():\n h, _ = lstm.forward(X, c0)\n return np.sum(h * wrand)\n\n # now gradient check all\n delta = 1e-7\n rel_error_thr_warning = 1e-2\n rel_error_thr_error = 1\n tocheck = [X, lstm.W, lstm.V, c0]\n grads_analytic = [dX, dW, dV, dc0]\n names = ['X', 'W', 'V', 'c0']\n for j in xrange(len(tocheck)):\n mat = tocheck[j]\n dmat = grads_analytic[j]\n name = names[j]\n # gradcheck\n for i in xrange(mat.size):\n old_val = mat.flat[i]\n mat.flat[i] = old_val + delta\n loss0 = fwd()\n mat.flat[i] = old_val - delta\n loss1 = fwd()\n mat.flat[i] = old_val\n\n grad_analytic = dmat.flat[i]\n grad_numerical = (loss0 - loss1) / (2 * delta)\n\n if grad_numerical == 0 and grad_analytic == 0:\n rel_error = 0 # both are zero, OK.\n status = 'OK'\n elif abs(grad_numerical) < 1e-7 and abs(grad_analytic) < 1e-7:\n rel_error = 0 # not enough precision to check this\n status = 'VAL SMALL WARNING'\n else:\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n status = 'OK'\n if rel_error > rel_error_thr_warning: status = 'WARNING'\n if rel_error > rel_error_thr_error: status = '!!!!! NOTOK'\n\n # print stats\n print '%s checking param %s index %s (val = %+8f), analytic = %+8f, numerical = %+8f, relative error = %+8f' \\\n % (status, name, `np.unravel_index(i, mat.shape)`, old_val, grad_analytic, grad_numerical, rel_error)", "def train_1layer_network(x_train, y_train):\n W = np.random.normal(0, 1, (2, ))\n b = np.random.normal(0, 1, (1, ))\n n_epoch = 1000\n lr = 0.2\n for i in range(n_epoch):\n cost, dW, db = compute_cost_gradient1(x_train, y_train, W, b)\n W -= lr * dW\n b -= lr * db\n print('epoch {}: cost = {}'.format(i+1, cost))\n return W, b", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)\n scores, scores_cache = affine_forward(hidden_out, W3, b3)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n\n # Compute loss and gradients\n loss, dscores = softmax_loss(scores, y)\n dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)\n dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)\n dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)\n\n # Regularization\n loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)\n grads['W3'] = grads['W3'] + self.reg * self.params['W3']\n grads['W2'] = grads['W2'] + self.reg * self.params['W2']\n grads['W1'] = grads['W1'] + self.reg * self.params['W1']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def train_step(self):\n\n # Custom gradient\n if self.model.computed_gradient:\n outputs = self.model.compute_all(next(self.dataset_it))\n gradients = outputs[\"gradients\"]\n\n # Compute\n else:\n with tf.GradientTape() as tape:\n outputs = self.model.compute_all(next(self.dataset_it))\n gradients = tape.gradient(outputs[\"loss\"], self.params)\n\n # Apply\n self.optimizer.apply_gradients(zip(gradients, self.params))\n\n return outputs", "def train_2layer_network(x_train, y_train):\n W = np.random.normal(0, 1, (2, ))\n V = np.random.normal(0, 1, (2, ))\n U = np.random.normal(0, 1, (2, ))\n b0 = np.random.normal(0, 1, (1, ))\n b1 = np.random.normal(0, 1, (1, ))\n b2 = np.random.normal(0, 1, (1, ))\n n_epoch = 4000\n lr = 0.3\n for i in range(n_epoch):\n cost, dW, dV, dU, db0, db1, db2 = compute_cost_gradient2(x_train, y_train, W, V, U, b0, b1, b2)\n W -= (lr * dW)\n V -= (lr * dV)\n U -= (lr * dU)\n b0 -= (lr * db0)\n b1 -= (lr * db1)\n b2 -= (lr * db2)\n print('epoch {}: cost = {}'.format(i+1, cost))\n return W, V, U, b0, b1, b2", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):\n _, net_loss = net.compute_loss(\n inputdata=images,\n labels=labels,\n name='shadow_net',\n reuse=is_net_first_initialized\n )\n\n if optimizer is not None:\n grads = optimizer.compute_gradients(net_loss)\n else:\n grads = None\n\n return net_loss, grads", "def loss(self, X_batch, y_batch, learning_rate=1e-3, one_vs_all_index=-1, reg=True):\n #########################################################################\n # TODO: #\n # calculate the loss and the derivative #\n #########################################################################\n loss = 0\n for i in range(X_batch.shape[0]):\n if one_vs_all_index == -1:\n loss += -(y_batch[i] * (np.dot(self.w.T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.w.T, X_batch[i])))\n else:\n if reg:\n reg = (learning_rate / 2 * X_batch.shape[0]) * np.sum(np.power(self.ws[one_vs_all_index], 2))\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + reg\n else:\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i])))\n gradients = np.zeros(X_batch.shape[1])\n if one_vs_all_index == -1:\n dot = np.dot(X_batch, self.w)\n else:\n dot = np.dot(X_batch, self.ws[one_vs_all_index])\n logists = sigmod(dot)\n diff = y_batch - logists\n for index in range(X_batch.shape[0]):\n if one_vs_all_index != -1:\n if reg:\n dot = np.dot(X_batch[index], diff[index])\n gradients[1:] += dot[1:] + (learning_rate / X_batch.shape[0]) * self.ws[one_vs_all_index][1:]\n gradients[0] += dot[0]\n else:\n gradients += np.dot(X_batch[index], diff[index])\n else:\n gradients += np.dot(X_batch[index], diff[index])\n\n return loss, gradients / X_batch.shape[0] # 取均值免得步长过大直接nan\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def train(network,X,y):\r\n \r\n # Get the layer activations\r\n layer_activations = forward(network,X)\r\n logits = layer_activations[-1]\r\n \r\n # Compute the loss and the initial gradient\r\n loss = softmax_crossentropy_with_logits(logits,y)\r\n loss_grad = grad_softmax_crossentropy_with_logits(logits,y)\r\n \r\n for i in range(1, len(network)):\r\n loss_grad = network[len(network) - i].backward(layer_activations[len(network) - i - 1], loss_grad)\r\n #loss_grad = network[0].backward(X, loss_grad)\r\n return np.mean(loss)", "def compute_grad(W, x, y, loss_c, config):\n\n # Lazy import of propper model\n if config.model_type == \"linear_svm\":\n from utils.linear_svm import model_grad\n elif config.model_type == \"logistic_regression\":\n from utils.logistic_regression import model_grad\n else:\n raise ValueError(\"Wrong model type {}\".format(\n config.model_type))\n\n dW, db = model_grad(loss_c, x, y)\n dW += config.reg_lambda * l2_grad(W)\n\n return dW, db", "def add_training_op(self, loss):\n ### YOUR CODE HERE (~1-2 lines)\n opt = tf.train.AdamOptimizer(self.config.lr)\n grads_vars = opt.compute_gradients(loss)\n capped_grads_vars = [(tf.clip_by_value(g, -Config.max_grad_norm, Config.max_grad_norm), v)\n for g, v in grads_vars] # gradient capping\n train_op = opt.apply_gradients(capped_grads_vars, tf.Variable(0, trainable=False))\n ### END YOUR CODE\n return train_op", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h", "def loss_grad(dataset, params):\n grads = [grad(dataset[0][i], dataset[1][i], params) for i in range(len(dataset[0]))]\n return np.mean(grads, axis=0)", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def update_grhs():\n init_gradient()\n costs_per_batch = []\n for i in range(n_train_batches):\n c = update_gradient_batch(i,*args)\n costs_per_batch.append(c)\n return numpy.mean(costs_per_batch,axis=0)", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def ComputeGradients(self, input_data: list, target_output_data: list):\n delta = 1e-6\n normal_cost = self.Cost(input_data, target_output_data)\n\n # Evaluate Gradient for Hidden Layer Biases\n for i in range(self.hidden_layer_biases.shape[0]):\n original_bias_value = self.hidden_layer_biases[i]\n self.hidden_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_layer_biases[i] = original_bias_value\n self.hidden_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Output Layer Biases\n for i in range(self.output_layer_biases.shape[0]):\n original_bias_value = self.output_layer_biases[i]\n self.output_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.output_layer_biases[i] = original_bias_value\n self.output_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for i in range(self.input_to_hidden_weights.shape[0]):\n for h in range(self.input_to_hidden_weights.shape[1]):\n original_bias_value = self.input_to_hidden_weights[i, h]\n self.input_to_hidden_weights[i, h] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.input_to_hidden_weights[i, h] = original_bias_value\n self.input_to_hidden_weights_gradient[i, h] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for h in range(self.hidden_to_output_weights.shape[0]):\n for o in range(self.hidden_to_output_weights.shape[1]):\n original_bias_value = self.hidden_to_output_weights[h, o]\n self.hidden_to_output_weights[h, o] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_to_output_weights[h, o] = original_bias_value\n self.hidden_to_output_weights_gradient[h, o] = (plusdelta_cost - normal_cost) / delta", "def train(net):\n\n # Set SGD hyperparameters\n n_iter = 200 # number of iterations of SGD\n learning_rate = 1e-3 # learning rate for SGD\n momentum = .99 # momentum parameter for SGD\n batch_size = 100 # number of data points in each mini-batch\n\n # Initialize binary cross-entropy loss function\n loss_fn = nn.BCELoss()\n\n # Initialize SGD optimizer with momentum\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\n\n # Placeholder to save loss at each iteration\n track_loss = []\n\n # Loop over iterations\n for i in range(n_iter):\n\n # Sample minibatch of oriented grating stimuli\n stimuli, tilt = sample_stimuli(batch_size)\n\n # Evaluate loss and update network weights\n out = net(stimuli) # predicted probability of tilt right\n loss = loss_fn(out, tilt) # evaluate loss\n optimizer.zero_grad() # clear gradients\n loss.backward() # compute gradients\n optimizer.step() # update weights\n \n # Keep track of loss at each iteration\n track_loss.append(loss.item())\n\n # Track progress\n if (i + 1) % (n_iter / 10) == 0:\n print('iteration %i | loss: %.3f | percent correct: %.2f%%' % (i + 1, loss.item(), 100 * pcorrect(out, tilt)))\n \n # Plot loss\n plt.plot(track_loss)\n plt.xlabel('iterations of SGD')\n plt.ylabel('binary cross-entropy loss')\n plt.xlim([0, None])\n plt.ylim([0, None])\n plt.show()", "def backward(\n self, X: np.ndarray, y: np.ndarray, lr: float, reg: float = 0.0\n ) -> float:\n y_hat = self.forward(X)\n\n y_one_hot = self.one_hot_encode(y)\n loss = CrossEntropy.forward(y_one_hot, y_hat)\n\n d_layer = CrossEntropy.backward(y, y_hat)\n\n w_grads = []\n b_grads = []\n\n for idx, layer in reversed(list(enumerate(self.layers))):\n # Not output layer\n if (idx + 1) < len(self.layers):\n next_layer = self.layers[idx + 1]\n\n d_layer = d_layer.dot(next_layer.w.T)\n d_layer = layer.activation_func.backward(d_layer, layer.activated_out)\n\n d_w = layer.linear_in.T.dot(d_layer) + 2 * reg * layer.w\n d_b = np.sum(d_layer, axis=0)\n\n w_grads.insert(0, d_w)\n b_grads.insert(0, d_b)\n\n self.optimizer.step(self.layers, w_grads, b_grads, lr)\n\n if self.norm_weights:\n w_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n b_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n for layer in self.layers:\n layer.w /= w_norm\n layer.b /= b_norm\n\n return loss", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def compile(self, optimizer, lr):\n \n #clip_morm = 0.1\n self.loss_f = None\n with self.graph.as_default():\n \n tvars = tf.trainable_variables()\n ft_vars = [v for v in tvars if \"_fe\" in v.name] \n lab_vars = [v for v in tvars if \"_dc\" not in v.name]\n dom_vars = [v for v in tvars if \"_lp\" not in v.name]\n\n print()\n print(\" ft updates:\", ft_vars)\n print(\"96x3 updates:\", lab_vars)\n print(\" 1x3 updates:\", dom_vars)\n print()\n\n # `tf.nn.softmax_cross_entropy_with_logits` is deprcated.\n # https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits\n self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels, logits=self.output, name='cross_entropy')\n self.loss_adv = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels_adv, logits=self.output_adv, name='cross_entropy_adv')\n \n #grads_and_vars = optimizer.compute_gradients(loss, var_list=tf_vars)\n #clipped_grads_and_vars = [(tf.clip_by_norm(grad, clip_norm=clip_norm), var) for grad, var in grads_and_vars]\n \n self.loss_fe = - lam * self.loss_adv\n # `tf.control_dependencies` is necessary if `tf.layers.batch_normalization` is in the model\n # https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n self.train_step_adv = optimizer(lr).minimize(self.loss_fe, name='minimize_fe', var_list=ft_vars)\n self.train_step = optimizer(lr).minimize(self.loss, name='minimize', var_list=lab_vars)\n self.train_step_adv = optimizer(lr).minimize(self.loss_adv, name='minimize_adv', var_list=dom_vars)\n\n # Initialize all `tf.Variable`.\n self.session.run(tf.global_variables_initializer())", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param['mode'] = mode\n if self.normalization=='batchnorm':\n for bn_param in self.bn_params:\n bn_param['mode'] = mode\n ############################################################################\n # TODO: Implement the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n # #\n # When using dropout, you'll need to pass self.dropout_param to each #\n # dropout forward pass. #\n # #\n # When using batch normalization, you'll need to pass self.bn_params[0] to #\n # the forward pass for the first batch normalization layer, pass #\n # self.bn_params[1] to the forward pass for the second batch normalization #\n # layer, etc. #\n ############################################################################\n cache = {} # 需要存储反向传播需要的参数\n cache_dropout = {}\n hidden = X\n for i in range(self.num_layers - 1):\n if self.normalization == 'batchnorm':\n hidden,cache[i+1] = affine_bn_relu_forward(hidden,\n self.params['W' + str(i+1)],\n self.params['b' + str(i+1)],\n self.params['gamma' + str(i+1)],\n self.params['beta' + str(i+1)],\n self.bn_params[i])\n elif self.normalization == 'layernorm':\n hidden, cache[i + 1] = affine_ln_relu_forward(hidden,\n self.params['W' + str(i + 1)],\n self.params['b' + str(i + 1)],\n self.params['gamma' + str(i + 1)],\n self.params['beta' + str(i + 1)],\n self.bn_params[i])\n else:\n hidden , cache[i+1] = affine_relu_forward(hidden,self.params['W' + str(i+1)],\n self.params['b' + str(i+1)])\n if self.use_dropout:\n hidden , cache_dropout[i+1] = dropout_forward(hidden,self.dropout_param)\n # 最后一层不用激活层\n scores, cache[self.num_layers] = affine_forward(hidden , self.params['W' + str(self.num_layers)],\n self.params['b' + str(self.num_layers)])\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n ############################################################################\n # TODO: Implement the backward pass for the fully-connected net. Store the #\n # loss in the loss variable and gradients in the grads dictionary. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # When using batch/layer normalization, you don't need to regularize the scale #\n # and shift parameters. #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, grads = 0.0, {}\n loss, dS = softmax_loss(scores , y)\n # 最后一层没有relu激活层\n dhidden, grads['W' + str(self.num_layers)], grads['b' + str(self.num_layers)] \\\n = affine_backward(dS,cache[self.num_layers])\n loss += 0.5 * self.reg * np.sum(self.params['W' + str(self.num_layers)] * self.params['W' + str(self.num_layers)])\n grads['W' + str(self.num_layers)] += self.reg * self.params['W' + str(self.num_layers)]\n\n for i in range(self.num_layers - 1, 0, -1):\n loss += 0.5 * self.reg * np.sum(self.params[\"W\" + str(i)] * self.params[\"W\" + str(i)])\n # 倒着求梯度\n if self.use_dropout:\n dhidden = dropout_backward(dhidden,cache_dropout[i])\n if self.normalization == 'batchnorm':\n dhidden, dw, db, dgamma, dbeta = affine_bn_relu_backward(dhidden, cache[i])\n grads['gamma' + str(i)] = dgamma\n grads['beta' + str(i)] = dbeta\n elif self.normalization == 'layernorm':\n dhidden, dw, db, dgamma, dbeta = affine_ln_relu_backward(dhidden, cache[i])\n grads['gamma' + str(i)] = dgamma\n grads['beta' + str(i)] = dbeta\n else:\n dhidden, dw, db = affine_relu_backward(dhidden, cache[i])\n grads['W' + str(i)] = dw + self.reg * self.params['W' + str(i)]\n grads['b' + str(i)] = db\n return loss, grads", "def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def _compute_gradients(self, config):\n with tf.GradientTape() as tape:\n all_loss = self._compute_loss(**config)\n # Compute gradients wrt input image\n total_loss = all_loss[0]\n return tape.gradient(total_loss, config['init_image']), all_loss", "def gradient(self, inputs):\n raise NotImplementedError", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n num_FC = self.num_FC\n num_CNN = self.num_CNN\n total_layer = self.num_FC + self.num_CNN\n \n cache = {}\n pre_layer_output = X\n for i in range(0, num_CNN):\n W_name = \"W\" + str(i)\n b_name = \"b\" + str(i)\n conv_param_name = \"conv_param\" + str(i)\n gamma_name = \"gamma\" + str(i)\n beta_name = \"beta\" + str(i)\n bn_param_name = \"bn_param\" + str(i)\n pool_param_name = \"pool_param\" + str(i)\n\n w = self.params[W_name]\n b = self.params[b_name]\n conv_param = self.fix_params[conv_param_name]\n gamma = self.params[gamma_name]\n beta = self.params[beta_name]\n bn_param = self.fix_params[bn_param_name]\n pool_param = self.fix_params[pool_param_name]\n \n pre_layer_output, cache_layer = cnn_batch_relu_pool_forward(pre_layer_output, \n w, b, conv_param, \n gamma, beta, bn_param, \n pool_param)\n cache[i] = cache_layer\n \n for i in range(0, num_FC):\n W_name = \"W\" + str(i + num_CNN)\n b_name = \"b\" + str(i + num_CNN)\n gamma_name = \"gamma\" + str(i + num_CNN)\n beta_name = \"beta\" + str(i + num_CNN)\n bn_param_name = \"bn_param\" + str(i + num_CNN)\n drop_name = \"drop_ratio\" + str(i + num_CNN)\n\n w = self.params[W_name]\n b = self.params[b_name]\n gamma = self.params[gamma_name]\n beta = self.params[beta_name]\n bn_param = self.fix_params[bn_param_name]\n dropout_param = self.fix_params[drop_name]\n\n pre_layer_output, cache_layer = affine_batch_relu_drop_forward(pre_layer_output, \n w, b, \n gamma, beta, bn_param, \n dropout_param)\n cache[i + num_CNN] = cache_layer\n \n W_name = \"W\" + str(total_layer)\n b_name = \"b\" + str(total_layer)\n w = self.params[W_name]\n b = self.params[b_name]\n scores, cache[total_layer] = affine_forward(pre_layer_output, w, b)\n if y is None:\n return scores\n \n loss, grads = 0, {}\n \n loss, dUpLayer = softmax_loss(scores, y)\n loss = loss + 0.5 * self.reg * np.sum(w**2)\n \n cache_layer = cache[total_layer]\n dUpLayer, grads[W_name], grads[b_name] = affine_backward(dUpLayer, cache_layer)\n grads[W_name] = grads[W_name] + self.reg * self.params[W_name]\n\n for i in range(0, num_FC):\n layer_index = num_FC + num_CNN -1 - i\n W_name = \"W\" + str(layer_index)\n b_name = \"b\" + str(layer_index)\n gamma_name = \"gamma\" + str(layer_index)\n beta_name = \"beta\" + str(layer_index)\n\n cache_layer = cache[layer_index]\n dUpLayer, grads[W_name], grads[b_name], grads[gamma_name], grads[beta_name] = affine_batch_relu_drop_backward(dUpLayer, cache_layer)\n\n loss = loss + 0.5 * self.reg * np.sum(self.params[W_name]**2)\n grads[W_name] = grads[W_name] + self.reg * self.params[W_name]\n grads[gamma_name] = grads[gamma_name] + self.reg * self.params[gamma_name]\n\n for i in range(0, num_CNN):\n\n layer_index = num_CNN -1 - i\n\n W_name = \"W\" + str(layer_index)\n b_name = \"b\" + str(layer_index)\n conv_param_name = \"conv_param\" + str(layer_index)\n gamma_name = \"gamma\" + str(layer_index)\n beta_name = \"beta\" + str(layer_index)\n\n cache_layer = cache[layer_index]\n dUpLayer, grads[W_name], grads[b_name], grads[gamma_name], grads[beta_name] = cnn_batch_relu_pool_backward(dUpLayer, cache_layer)\n\n loss = loss + 0.5 * self.reg * np.sum(self.params[W_name]**2)\n grads[W_name] = grads[W_name] + self.reg * self.params[W_name]\n grads[gamma_name] = grads[gamma_name] + self.reg * self.params[gamma_name]\n \n return loss, grads", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def calculate_validation_loss(self):\n self.network.train()\n self.validation_average_loss = self.calculate_average_loss(self.validation_dataloader)", "def train_CNN(self, train_dataset, validation_dataset, lr = 0.01, epochs_num = 100, batch_size = 40, alpha = 0, momentum = 0.9):\n threshold = 0.5\n # optimizer = SGD(self.parameters(), lr = lr, weight_decay = alpha, momentum = momentum)\n optimizer = Adam(self.parameters())\n\n criterion = nn.BCELoss()\n\n train_losses = []\n\n validation_losses = []\n\n f1_scores_validations = []\n precisions_validations = []\n recalls_validations = []\n\n epochs = []\n\n start = time.time()\n\n remaining_time = 0\n\n train_dataloader = DataLoader(train_dataset, batch_size = batch_size, collate_fn = PPD.collate_data)\n best_f1score_validation = 0\n patience = 0\n for epoch in range(epochs_num):\n\n super(CNN, self).train()\n\n for i_batch, sample_batched in enumerate(train_dataloader):\n\n input = sample_batched[0]\n\n target = sample_batched[1].float()\n #optimizer.zero_grad() clears x.grad for every parameter x in the optimizer. It’s important to call this before loss.backward(), otherwise you’ll accumulate the gradients from multiple passes.\n self.zero_grad()\n\n output = self(input)\n\n train_loss = criterion(output, target)\n #loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. These are accumulated into x.grad for every parameter x. In pseudo-code: x.grad += dloss/dx\n train_loss.backward()\n #optimizer.step updates the value of x using the gradient x.grad. For example, the SGD optimizer performs: x += -lr * x.grad\n optimizer.step()\n\n super(CNN, self).eval()\n\n validation_segments, validation_labels = PPD.collate_data(validation_dataset)\n\n validation_loss = criterion(self(validation_segments.long()), validation_labels.float())\n\n f1_scores_validation = self.f1_score(self(validation_segments.long()), validation_labels.float(), threshold)[0]\n precisions_validation = self.f1_score(self(validation_segments.long()), validation_labels.float(), threshold)[1]\n recalls_validation = self.f1_score(self(validation_segments.long()), validation_labels.float(), threshold)[2]\n\n if (ceil(f1_scores_validation * 100) / 100) <= (ceil(best_f1score_validation * 100) / 100):\n patience = patience + 1\n else:\n best_f1score_validation = f1_scores_validation\n patience = 0\n\n\n end = time.time()\n\n remaining_time = remaining_time * 0.90 + ((end - start) * (epochs_num - epoch + 1) / (epoch + 1)) * 0.1\n\n remaining_time_corrected = remaining_time / (1 - (0.9 ** (epoch + 1)))\n\n epoch_str = \"last epoch finished: \" + str(epoch)\n\n progress_str = \"progress: \" + str((epoch + 1) * 100 / epochs_num) + \"%\"\n\n time_str = \"time: \" + str(remaining_time_corrected / 60) + \" mins\"\n\n sys.stdout.write(\"\\r\" + epoch_str + \" -- \" + progress_str + \" -- \" + time_str)\n\n sys.stdout.flush()\n\n train_losses.append(train_loss.item())\n\n validation_losses.append(validation_loss.item())\n\n f1_scores_validations.append(f1_scores_validation)\n precisions_validations.append(precisions_validation)\n recalls_validations.append(recalls_validation)\n\n epochs.append(epoch)\n # if patience == 15:\n # break\n\n print(\"\\n\" + \"Training completed. Total training time: \" + str(round((end - start) / 60, 2)) + \" mins\")\n\n return epochs, train_losses, validation_losses, f1_scores_validations, precisions_validations, recalls_validations", "def _optimize(self):\n # Retrieve all trainable variables\n train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n # Compute the gradient (return a pair of variable and their respective gradient)\n grads = self.optimizer.compute_gradients(loss=self.loss, var_list=train_variables)\n self.train_dis = self.optimizer.apply_gradients(grads, global_step=self.global_step)", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### YOUR CODE HERE: forward propagation\n affine_1 = np.dot(data, W1) + b1\n sigmoid_1 = sigmoid(affine_1)\n affine_2 = np.dot(sigmoid_1, W2) + b2\n scores = sigmoid(affine_2)\n cost = - np.sum(np.multiply(np.log(softmax(scores)), labels))\n ### END YOUR CODE\n \n ### YOUR CODE HERE: backward propagation\n cross_entropy_grad_ = cross_entropy_grad(scores, labels)\n sigmoid_2_grads = sigmoid_input_grad(cross_entropy_grad_, scores)\n x_2_grad, gradW2, gradb2 = affine_grads(sigmoid_2_grads, sigmoid_1, W2, b2)\n sigmoid_1_grads = sigmoid_input_grad(x_2_grad, sigmoid_1)\n x_1_grad, gradW1, gradb1 = affine_grads(sigmoid_1_grads, data, W1, b1)\n ### END YOUR CODE\n \n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n return cost, grad", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def backward_pass(total_loss):\n\n # Get the tensor that keeps track of step in this graph or create one if not there\n global_step = tf.train.get_or_create_global_step()\n\n # Print summary of total loss\n tf.summary.scalar('Total_Loss', total_loss)\n\n # Decay the learning rate\n dk_steps = int((FLAGS.epoch_size / FLAGS.batch_size) * 75)\n lr_decayed = tf.train.cosine_decay_restarts(FLAGS.learning_rate, global_step, dk_steps)\n\n # Compute the gradients. NAdam optimizer came in tensorflow 1.2\n opt = tf.contrib.opt.NadamOptimizer(learning_rate=lr_decayed, beta1=FLAGS.beta1,\n beta2=FLAGS.beta2, epsilon=0.1)\n\n # Compute the gradients\n gradients = opt.compute_gradients(total_loss)\n\n # Apply the gradients\n train_op = opt.apply_gradients(gradients, global_step, name='train')\n\n # Add histograms for the trainable variables. i.e. the collection of variables created with Trainable=True\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Maintain average weights to smooth out training\n variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_avg_decay, global_step)\n\n # Applies the average to the variables in the trainable ops collection\n variable_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([train_op, variable_averages_op]): # Wait until we apply the gradients\n dummy_op = tf.no_op(name='train') # Does nothing. placeholder to control the execution of the graph\n\n return dummy_op", "def D_loss_basic(self, netD, real, fake):\n # Real\n D_real = netD(real)\n D_real_loss = self.GANLoss(D_real, True, True)\n # fake\n D_fake = netD(fake)\n D_fake_loss = self.GANLoss(D_fake, False, True)\n # loss for discriminator\n D_loss = (D_real_loss + D_fake_loss) * 0.5\n # gradient penalty for wgan-gp\n if self.gan_mode == 'wgangp':\n gradient_penalty, gradients = base_function.cal_gradient_penalty(netD, real, fake)\n D_loss +=gradient_penalty\n\n D_loss = D_loss * self.loss_d_weight\n D_loss.backward()\n\n return D_loss", "def loss(self, X, y=None):\r\n mode = 'test' if y is None else 'train'\r\n\r\n if self.dropout_param is not None:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batchnorm:\r\n for bn_param in self.bn_params:\r\n bn_param[mode] = mode\r\n\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n gamma1, beta1 = self.params['gamma1'], self.params['beta1']\r\n gamma2, beta2 = self.params['gamma2'], self.params['beta2']\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': int((filter_size - 1) / 2)}\r\n\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\r\n\r\n scores = None\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n alpha = 0.1\r\n csrp1, csrp1_cache = conv_sbn_lrelu_pool_forward(X, W1, b1, gamma1, beta1, self.bn_params[0], conv_param, pool_param, alpha)\r\n abr1, abr1_cache = affine_bn_lrelu_forward(csrp1, W2, b2, gamma2, beta2, self.bn_params[1], alpha)\r\n scores, out_cache = affine_forward(abr1, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n loss, grads = 0, {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n loss, dp = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * np.sum(\r\n np.sum(W1 ** 2) + np.sum(W2 ** 2) + np.sum(W3 ** 2)\r\n )\r\n dp, dw3, db3 = affine_backward(dp, out_cache)\r\n dp, dw2, db2, dgamma2, dbeta2 = affine_bn_lrelu_backward(dp, abr1_cache)\r\n dp, dw1, db1, dgamma1, dbeta1 = conv_sbn_lrelu_pool_backward(dp, csrp1_cache)\r\n grads['W1'] = dw1 + self.reg * W1\r\n grads['W2'] = dw2 + self.reg * W2\r\n grads['W3'] = dw3 + self.reg * W3\r\n grads['b1'] = db1\r\n grads['b2'] = db2\r\n grads['b3'] = db3\r\n grads['gamma2'] = dgamma2\r\n grads['gamma1'] = dgamma1\r\n grads['beta2'] = dbeta2\r\n grads['beta1'] = dbeta1\r\n \r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def loss(self, X, y=None):\n\t\tmode = 'test' if y is None else 'train'\n\t\tif self.dropout_param is not None:\n\t\t\tself.dropout_param['mode'] = mode\n\t\tif self.use_batchnorm:\n\t\t\tfor bn_param in self.bn_params:\n\t\t\t\tbn_param[mode] = mode\n\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\tW5, b5 = self.params['W5'], self.params['b5']\n\t\t\n\t\tgamma1, beta1 = self.params['gamma1'], self.params['beta1']\n\t\tgamma2, beta2 = self.params['gamma2'], self.params['beta2']\n\t\tgamma3, beta3 = self.params['gamma3'], self.params['beta3']\t\n\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size1 = W1.shape[2]\n\t\tconv_param1 = {'stride': 1, 'pad': (filter_size1 - 1) / 2}\n\t\tfilter_size2 = W2.shape[2]\n\t\tconv_param2 = {'stride': 1, 'pad': (filter_size2 - 1) / 2}\n\t\t\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\t\t\n\t\tscores = None\n\t\n\t\t# Convolutional layers\t\n\t\tz1, cache1 = conv_relu_forward(X, W1, b1, conv_param1)\n\t\tz2, cache2 = conv_relu_pool_forward(z1, W2, b2, conv_param2, pool_param)\n\t\tz3, cache3 = spatial_batchnorm_forward(z2, gamma1, beta1, self.bn_params[1])\n\n\t\t# Fully Connected layers\n\t\tz4, cache4 = affine_relu_bn_forward(z3, W3, b3, gamma2, beta2, self.bn_params[2])\n\t\tz4, cache9 = dropout_forward(z4, self.dropout_params)\n\n\t\t# Output layer\n\t\tz6, cache6 = affine_forward(z4, W5, b5)\n\t\tz7, cache7 = batchnorm_forward(z6, gamma3, beta3, self.bn_params[3])\n\t\t#z8, cache8 = dropout_forward(z7, self.dropout_params)\n\t\tscores = z7\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W1'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W5'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W3'], 2).sum())\n\t\t\n\t\t#dx8 = dropout_backward(dout, cache8)\n\t\tdx7, grads['gamma3'], grads['beta3'] = batchnorm_backward(dout, cache7)\n\t\tdx6, grads['W5'], grads['b5'] = affine_backward(dx7, cache6)\n\t\tdx6 = dropout_backward(dx6, cache9)\n\t\tdx4, grads['W3'], grads['b3'], grads['gamma2'], grads['beta2'] = affine_relu_bn_backward(dx6, cache4)\n\t\t\n\t\tdx3, grads['gamma1'], grads['beta1'] = spatial_batchnorm_backward(dx4, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = conv_relu_pool_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_backward(dx2, cache1)\n\t\t\n\t\treturn loss, grads", "def update(self, loss=None, inputs=None, targets=None, outputs=None):\n\n # TODO: add gradient accumulation\n\n self.optimizer.zero_grad(set_to_none=self.none_grad)\n\n if self.grad_scaler:\n self.grad_scaler.scale(loss).backward()\n self.grad_scaler.step(self.optimizer)\n\n if self.clip_grad:\n self.grad_scaler.unscale_(self.optimizer)\n self.clip_grad(self.model.parameters())\n self.grad_scaler.update()\n else:\n loss.backward()\n\n if self.clip_grad:\n self.clip_grad(self.model.parameters())\n\n self.optimizer.step()", "def worker(D,graph=None):\n\n if graph ==None:\n graph = tf.Graph()\n # Build Tensorflow graph which computes gradients of the model with one mini-batch of examples\n with graph.as_default():\n \n # Get input and labels for learning from D\n inputs, labels = D\n logits = mdnn.CNN_model(inputs,graph)\n \n # Calculate loss.\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=logits))\n \n optimizer = tf.train.GradientDescentOptimizer(0.1)\n grads = optimizer.compute_gradients(loss)\n with tf.variable_scope(\"\",reuse=True):\n grads_var = {var.op.name:tf.Variable(tf.zeros(var.get_shape()),trainable=False,name=var.op.name+\"_grad\",collections=[\"W_grad\"]) for _,var in grads}\n train_op = [grads_var[var.op.name].assign(grad) for grad,var in grads]\n \n # Build an initialization operation.\n init = tf.global_variables_initializer()\n\n \n # Tensorflow op to update parameters from PS\n get_W = df.get_w(graph,\"W_global\")\n\n\n with tf.Session() as sess:\n #Initialize the TF variables\n sess.run([init])\n tf.train.start_queue_runners(sess=sess)\n iteration = 0\n s = sck.socket(sck.AF_INET, sck.SOCK_STREAM)\n s.connect((FLAGS.ip_PS, FLAGS.port))\n \n while iteration < FLAGS.iter_max:\n #Get the parameters from the PS\n com.send_msg(s,\"\",\"GET_W\")\n cmd,data= com.recv_msg(s)\n iteration,W= com.decode_variables(data)\n s.close()\n \n #Update the parameters\n sess.run(get_W,{key+\"_delta:0\":value for key,value in W.items()})\n \n #Compute gradients stored in Tensorflow variables\n inp,log,lab,loss_values,_ =sess.run([inputs,logits,labels,loss,train_op])\n\n print \"Loss\",loss_values\n \n #Encode the update with the local timer (iteration)\n update = com.encode_variables(sess,\"W_grad\",iteration,compression=FLAGS.compression_rate)\n \n #Push the update to PS\n s = sck.socket(sck.AF_INET, sck.SOCK_STREAM)\n s.connect((FLAGS.ip_PS, FLAGS.port))\n \n com.send_msg(s,update,\"PUSH\")\n print \"Worker\",FLAGS.id_worker,\" is closed\"", "def fully_connected_model(input_size, num_labels, num_hidden_nodes,\n valid_dataset, test_dataset, batch_size,\n learning_rate, beta = 0.0, dropout_prob = 0.0,\n exp_decay = None, method = 'gd'):\n def create_model(weights, inputs, labels = None):\n hidden_units = inputs\n num_hidden_layers = len(weights) // 2 - 1\n regularisation_term = tf.zeros([1])\n\n for l in range(num_hidden_layers):\n cur_weights = weights[2*l]\n cur_biases = weights[2*l + 1]\n\n hidden_units = tf.nn.relu(tf.matmul(hidden_units, cur_weights) + cur_biases)\n if labels is not None:\n # If labels are specified, the graph will be used for training,\n # so we apply dropout.\n hidden_units = tf.nn.dropout(hidden_units, 1 - dropout_prob)\n\n regularisation_term = regularisation_term + tf.nn.l2_loss(cur_weights)\n\n # Output layer.\n cur_weights = weights[-2]\n cur_biases = weights[-1]\n out_logits = tf.matmul(hidden_units, cur_weights) + cur_biases\n out_prob = tf.nn.softmax(out_logits)\n regularisation_term = regularisation_term + tf.nn.l2_loss(cur_weights)\n\n if labels is not None:\n # Only when training.\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(out_logits, labels))\n loss = loss + beta * regularisation_term\n return out_prob, loss\n\n return out_prob\n\n graph = tf.Graph()\n with graph.as_default():\n tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, input_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n # Variables.\n weights = create_fully_connected_weights(input_size, num_labels, num_hidden_nodes)\n\n # Training computation.\n train_prediction, loss = create_model(weights, tf_train_dataset, tf_train_labels)\n valid_prediction = create_model(weights, tf_valid_dataset)\n test_prediction = create_model(weights, tf_test_dataset)\n\n # Optimizer.\n global_step = tf.Variable(0)\n\n if exp_decay is not None:\n learning_rate = tf.train.exponential_decay(\n learning_rate, global_step,\n exp_decay['decay_steps'], exp_decay['decay_rate'], exp_decay['staircase'])\n\n optimizer = None\n if method == 'gd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n loss, global_step=global_step)\n elif method == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(\n loss, global_step=global_step)\n else:\n raise Exception('Unknown optimiser.')\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': tf_train_dataset,\n 'labels_ph': tf_train_labels }\n tf_predictions = [train_prediction, valid_prediction, test_prediction]\n\n return tf_graph, optimizer, loss, tf_predictions", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param['mode'] = mode\n if self.normalization=='batchnorm':\n for bn_param in self.bn_params:\n bn_param['mode'] = mode\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n # #\n # When using dropout, you'll need to pass self.dropout_param to each #\n # dropout forward pass. #\n # #\n # When using batch normalization, you'll need to pass self.bn_params[0] to #\n # the forward pass for the first batch normalization layer, pass #\n # self.bn_params[1] to the forward pass for the second batch normalization #\n # layer, etc. #\n ############################################################################\n if not self.use_dropout:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n cache, scores = self._AffRelu_Loss(X)\n elif self.normalization is \"batchnorm\":\n cache, scores = self._AffBatchRelu_Loss(X)\n elif self.normalization is \"layernorm\":\n cache, scores = self._AffLayerRelu_Loss(X)\n else:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n cache, scores = self._AffReluDrop_Loss(X)\n elif self.normalization is \"batchnorm\":\n cache, scores = self._AffBatchReluDrop_Loss(X)\n elif self.normalization is \"layernorm\":\n cache, scores = self._AffLayerReluDrop_Loss(X)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n \n ############################################################################\n # TODO: Implement the backward pass for the fully-connected net. Store the #\n # loss in the loss variable and gradients in the grads dictionary. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # When using batch/layer normalization, you don't need to regularize the scale #\n # and shift parameters. #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n loss, dscores = softmax_loss(scores, y)\n if not self.use_dropout:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n grads, l2_loss = self._AffRelu_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"batchnorm\":\n grads, l2_loss = self._AffBatchRelu_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"layernorm\":\n grads, l2_loss = self._AffLayerRelu_Backprop(dscores, cache)\n loss += l2_loss\n else:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n grads, l2_loss = self._AffReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"batchnorm\":\n grads, l2_loss = self._AffBatchReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"layernorm\":\n grads, l2_loss = self._AffLayerReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def __cnnNetFn(self, input, is_training):\n with tf.variable_scope('CNN'):\n conv1 = tf.layers.conv2d(input, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv1_bn = tf.layers.batch_normalization(conv1)\n conv2 = tf.layers.conv2d(conv1_bn, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv2_bn = tf.layers.batch_normalization(conv2)\n conv2_pool = tf.layers.max_pooling2d(conv2_bn, 2, 2, padding='SAME')\n conv2_drop = tf.layers.dropout(conv2_pool, rate=0.2, training=is_training)\n\n conv3 = tf.layers.conv2d(conv2_drop, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv3_bn = tf.layers.batch_normalization(conv3)\n conv4 = tf.layers.conv2d(conv3_bn, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv4_bn = tf.layers.batch_normalization(conv4)\n conv4_pool = tf.layers.max_pooling2d(conv4_bn, 2, 2, padding='SAME')\n conv4_drop = tf.layers.dropout(conv4_pool, rate=0.3, training=is_training)\n\n conv5 = tf.layers.conv2d(conv4_drop, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv5_bn = tf.layers.batch_normalization(conv5)\n conv6 = tf.layers.conv2d(conv5_bn, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv6_pool = tf.layers.max_pooling2d(conv6, 2, 2, padding='SAME')\n\n csnn_features = tf.stop_gradient(self.__csnn.getTrainOp(input))\n csnn_features = tf.identity(csnn_features)\n if self.__use_csnn:\n joint_features = tf.concat((conv6_pool, csnn_features), axis=3)\n else:\n joint_features = conv6_pool\n\n conv6_bn = tf.layers.batch_normalization(joint_features)\n\n conv7 = tf.layers.conv2d(conv6_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv7_bn = tf.layers.batch_normalization(conv7)\n conv8 = tf.layers.conv2d(conv7_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv8_bn = tf.layers.batch_normalization(conv8)\n conv8_pool = tf.layers.max_pooling2d(conv8_bn, 2, 2, padding='SAME')\n conv8_drop = tf.layers.dropout(conv8_pool, rate=0.4, training=is_training)\n\n flat = tf.contrib.layers.flatten(conv8_drop)\n logits = tf.layers.dense(flat, self.__num_classes)\n return logits, csnn_features", "def test_network_fine_tuning_loss(self):\n height = 128\n width = 128\n num_features = 3\n batch_size = 2\n\n # Create the graph.\n input_image_a = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n input_image_b = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n final_flow, previous_flows = self.pwc_net.get_forward(input_image_a, input_image_b)\n\n image_a = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_a[:, 10:height - 10, 10:width - 10, :] = 1.0\n image_b = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_b[:, 5:height - 5, 5:width - 5, :] = 1.0\n dummy_flow = np.ones(shape=[batch_size, height, width, 2], dtype=np.float32)\n\n self.sess.run(tf.global_variables_initializer())\n trainable_vars = tf.trainable_variables(scope='pwc_net')\n\n # Check that the gradients are flowing.\n grad_op = tf.gradients(tf.reduce_mean(final_flow), trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n\n # Get the losses.\n gt_placeholder = tf.placeholder(shape=[None, height, width, 2], dtype=tf.float32)\n training_loss = self.pwc_net.get_fine_tuning_loss(previous_flows, gt_placeholder)\n # Check the loss.\n loss_value = self.sess.run(training_loss, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n self.assertNotAlmostEqual(loss_value[0], 0.0)\n\n # Check the gradients.\n loss_grad_ops = tf.gradients(training_loss, trainable_vars + [input_image_a, input_image_b])\n self.assertGreater(len(loss_grad_ops), 0)\n for grad in loss_grad_ops:\n self.assertNotEqual(grad, None)\n grads = self.sess.run(loss_grad_ops, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n for grad in grads:\n self.assertNotAlmostEqual(0.0, np.sum(grad))", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])", "def train(self, features, labels, optimizer, loss_scale=None):\n loss, gradients = self.compute_gradients(\n features,\n labels,\n optimizer,\n loss_scale=loss_scale,\n )\n optimizer.apply_gradients(list(zip(gradients, self.trainable_weights)))\n return loss", "def gradient(self, node, output_grad):\r\n return [output_grad]\r\n \"\"\"higher accuracy notice notice here\"\"\"", "def backward_val(self):\n self.loss_similarity = [NCC(warped_img, self.batch_fixed) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [NCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:]) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def _add_train_op(self):\n self._lr_rate = tf.maximum(\n self._hps.min_lr, # min_lr_rate.\n tf.train.exponential_decay(self._hps.lr, self.global_step, 30000, 0.98))\n \n \n # Take gradients of the trainable variables w.r.t. the loss function to minimize\n loss_to_minimize = self._total_loss if self._hps.coverage else self._loss\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n\n # Clip the gradients\n with tf.device(self._get_gpu(self._num_gpus-1)):\n grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)\n\n # Add a summary\n tf.summary.scalar('global_norm', global_norm)\n\n # Apply adagrad optimizer\n if self._hps.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)\n\n elif self._hps.optimizer == 'adam': \n # Adam\n optimizer = tf.train.AdamOptimizer()\n \n elif self._hps.optimizer == 'sgd':\n # SGD\n optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)\n tf.summary.scalar('learning rate', self._lr_rate)\n \n else:\n raise Exception('Invalid optimizer: ', self._hps.optimizer)\n\n with tf.device(self._get_gpu(self._num_gpus-1)):\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=self.global_step, name='train_step')", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def _batch_gradient_descent(self, X, y, lr, epochs):\n\n # Initialize the bias and weights.\n _, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n for i in range(epochs):\n # Calculate and sum the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X, y)\n\n # Show the gradient of each epoch.\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)\n\n # Update the bias and weight by gradient of current epoch\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def loss(self, X, y=None, lambda_reg=0.0):\n \n # Unpack variables from the params dictionary\n N, D = X.shape\n\n # Compute the forward pass\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n scores, cache_list = self.network_forward(X)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute for the loss. This should include L2 regularization for #\n # the weights of each layer. #\n #############################################################################\n loss_softmax, dloss_softmax = self.softmax_cross_entropy_loss(scores, y)\n loss = loss_softmax\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute the derivatives of the weights and biases. Store the #\n # results in the grads dictionary. For example, grads['W1'] should store #\n # the gradient on the weights W of the first layer, and be a matrix of #\n # same size. #\n #############################################################################\n grads = self.network_backward(dloss_softmax, cache_list)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def SoftEntropy(nn_last_layer, correct_label, learning_rate): \n \n loss = tf2.math.reduce_sum( tf2.nn.softmax_cross_entropy_with_logits(tf2.stop_gradient(correct_label), nn_last_layer))\n \n #obtain training operation\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate, epsilon = 1e-8) #Note default value of epsilon 1e-8 results in instability after few epochs\n \n #clip the gradients\n gvs = optimizer.compute_gradients(loss)\n #capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n training_operation = optimizer.apply_gradients(gvs)\n\n return training_operation, loss", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def run(self, arch : 'str'):\n if not hasattr(self, arch):\n print(\"Unrecognized neural net type %s\" % arch)\n sys.exit(1)\n\n # placeholders for feature vector and labels\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y_ = tf.placeholder(tf.float32, shape=[None, 10])\n \n # dropout probability\n self.keep_prob = tf.placeholder(tf.float32)\n\n # reshape input image\n self.x_image = tf.reshape(x, [-1,28,28,1])\n \n # get the output node from the architecture-defining object\n obj = getattr(self, arch)()\n y_conv = obj.arch(self)\n\n # define the loss function here (TODO: parameterize?). We use cross-entropy\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))\n\n # define the gradient update method here (TODO: parameterize?)\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # summary-op for tensorboard\n summary_op = tf.scalar_summary(\"training accuracy\", accuracy)\n self.summary_writer.add_graph(self.sess.graph)\n\n # init tensorflow variables \n self.sess.run(tf.initialize_all_variables())\n\n # stochastic gradient descent (mini-batch training)\n # TODO: parameterize numbers used in here)\n for i in range(500):\n batch = self.data.train.next_batch(50)\n\n # gather summary and write, every 100 steps\n if i%100 == 0:\n\n summary_op_str = self.sess.run(summary_op, feed_dict={\n x:batch[0], y_: batch[1], self.keep_prob: 1.0})\n self.summary_writer.add_summary(summary_op_str, i)\n print(summary_op_str)\n\n self.sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], self.keep_prob: 0.5})\n\n # get test accuracy\n print(\"test accuracy %g\"%(self.sess.run(accuracy, feed_dict={\n x: self.data.test.images, y_: self.data.test.labels, self.keep_prob: 1.0})))\n\n self.sess.close()", "def backPropagate(self):\n\n # application of the chain rule to find derivative of the loss function with respect to weights2 and weights1\n d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))\n d_weights1 = np.dot(self.input.T, (np.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))\n\n # update the weights with the derivative (slope) of the loss function\n self.weights1 += d_weights1\n self.weights2 += d_weights2", "def policy_gradient(self, loader):\n\n net = nn.DataParallel(self.agent, device_ids=self.gpus)\n total_steps = len(loader)\n \n for step_idx, src in enumerate(tqdm(loader, desc='Calculating policy gradient...', leave=False)):\n\n # Decode fragments and smiles, and get loss\n frags, smiles, loss = self.getBatchOutputs(net, src)\n \n # Get rewards\n reward = self.env.getRewards(smiles, frags=frags)\n\n # Filter out molecules with multiple fragments by setting reward to 0\n if self.no_multifrag_smiles:\n reward = np.array([r if s.count('.') == 0 else [0] for s,r in zip(smiles, reward)])\n reward = torch.Tensor(reward).to(self.device)\n \n # Train model with policy gradient\n self.optim.zero_grad()\n loss = loss * ( reward - self.beta )\n loss = -loss.mean()\n loss.backward()\n self.optim.step()\n\n self.monitor.saveProgress(step_idx, None, total_steps, None, loss=loss.item())\n \n return loss.item()", "def compute_bp_update(self, loss, retain_graph=False):\n\n if self.bias is not None:\n grads = torch.autograd.grad(loss, [self.weights, self.bias],\n retain_graph=retain_graph)\n else:\n grads = torch.autograd.grad(loss, self.weights,\n retain_graph=retain_graph)\n\n return grads", "def verify_gradients(self):\n\n print 'WARNING: calling verify_gradients reinitializes the learner'\n\n rng = np.random.mtrand.RandomState(1234)\n\n self.seed = 1234\n self.sizes = [4, 5]\n self.initialize(20, 3)\n example = (rng.rand(20) < 0.5, 2)\n input, target = example\n epsilon = 1e-6\n self.lr = 0.1\n self.decrease_constant = 0\n\n self.fprop(input, target)\n self.bprop(input, target) # compute gradients\n\n import copy\n emp_grad_weights = copy.deepcopy(self.weights)\n\n for h in range(len(self.weights)):\n for i in range(self.weights[h].shape[0]):\n for j in range(self.weights[h].shape[1]):\n self.weights[h][i, j] += epsilon\n a = self.fprop(input, target)\n self.weights[h][i, j] -= epsilon\n\n self.weights[h][i, j] -= epsilon\n b = self.fprop(input, target)\n self.weights[h][i, j] += epsilon\n\n emp_grad_weights[h][i, j] = (a - b) / (2. * epsilon)\n\n print 'grad_weights[0] diff.:', np.sum(np.abs(self.grad_weights[0].ravel() - emp_grad_weights[0].ravel())) / \\\n self.weights[0].ravel().shape[0]\n print 'grad_weights[1] diff.:', np.sum(np.abs(self.grad_weights[1].ravel() - emp_grad_weights[1].ravel())) / \\\n self.weights[1].ravel().shape[0]\n print 'grad_weights[2] diff.:', np.sum(np.abs(self.grad_weights[2].ravel() - emp_grad_weights[2].ravel())) / \\\n self.weights[2].ravel().shape[0]\n\n emp_grad_biases = copy.deepcopy(self.biases)\n for h in range(len(self.biases)):\n for i in range(self.biases[h].shape[0]):\n self.biases[h][i] += epsilon\n a = self.fprop(input, target)\n self.biases[h][i] -= epsilon\n\n self.biases[h][i] -= epsilon\n b = self.fprop(input, target)\n self.biases[h][i] += epsilon\n\n emp_grad_biases[h][i] = (a - b) / (2. * epsilon)\n\n print 'grad_biases[0] diff.:', np.sum(np.abs(self.grad_biases[0].ravel() - emp_grad_biases[0].ravel())) / \\\n self.biases[0].ravel().shape[0]\n print 'grad_biases[1] diff.:', np.sum(np.abs(self.grad_biases[1].ravel() - emp_grad_biases[1].ravel())) / \\\n self.biases[1].ravel().shape[0]\n print 'grad_biases[2] diff.:', np.sum(np.abs(self.grad_biases[2].ravel() - emp_grad_biases[2].ravel())) / \\\n self.biases[2].ravel().shape[0]", "def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)", "def update(network: nn.Module, data: DataLoader, loss: nn.Module, \n opt: optim.Optimizer) -> list:\n error_list = []\n counter = 0\n for x, y in data:\n \n print(x.float()[0])\n pred = network(x.float())\n print(pred, y.float())\n\n le = loss(pred, y.float())\n error_list.append(le)\n opt.zero_grad()\n le.backward()\n opt.step()\n counter += 1\n \n print(\"Loss: \", torch.mean(torch.tensor(error_list)) )\n \n return torch.tensor(error_list)", "def train(self, num_epochs: int):\n learn_rate = 0.02\n\n images, labels = self._mn_data.load_training()\n indices = [i for i in range(len(images))]\n\n for epoch in range(num_epochs):\n random.shuffle(indices) # Avoids modifying the actual lists\n epoch_cost = 0\n i = 0\n\n # Go through the training data in batches\n while i < len(indices):\n print(i, \"---------------------------------------------------------\")\n\n if i >= 800:\n break\n\n start = i\n end = i + batch_size\n batch_indices = indices[start:end]\n\n dw = [[[0 for _ in range(perceptron.size_w())] for perceptron in layer] for layer in self._network]\n db = [[0 for _ in layer] for layer in self._network]\n\n # Take a single image from the batch\n for index in batch_indices:\n # print(\"ex\")\n result = self.feed_forward(images[index])\n epoch_cost += self.cost(result, labels[index]) # Creates self._desired_changes\n\n # Backpropagate starting from the last (output) layer\n for j in range(len(self._network)-1, -1, -1):\n layer = self._network[j]\n prev_act_values = self._layer_inputs[j]\n function_name = layer[0].get_activation().name()\n\n if j > 0:\n next_desired_changes = [0.0 for _ in self._network[j-1]]\n else:\n next_desired_changes = None\n\n if function_name == \"relu\":\n leakage = self._relu.get_leakage()\n\n # Look at each perceptron\n for k in range(len(layer)):\n perceptron = layer[k]\n dc_da = self._desired_changes[k]\n\n if function_name == \"sigmoid\":\n dc_da *= self._sigmoid(perceptron.z) * (1 - self._sigmoid(perceptron.z))\n # print(perceptron.z, sig_delta)\n # print(dc_da)\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n elif function_name == \"relu\":\n dc_da *= leakage if perceptron.z < 0 else 1\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n # print(\"dcda\", dc_da)\n\n if next_desired_changes:\n # print(\"nd\", next_desired_changes)\n self._desired_changes = next_desired_changes\n\n # End of sample image loop\n # print(dw[1:])\n # break\n\n # Update weights and biases\n for j in range(len(self._network)):\n layer = self._network[j]\n\n for k in range(len(layer)):\n perceptron = layer[k]\n\n perceptron.change_weights_and_bias(dw[j][k], db[j][k])\n\n # print(dw[1:])\n # print(db)\n\n i += batch_size\n\n print(\"Epoch {} completed out of {} with loss {}\".format(epoch + 1, num_epochs, epoch_cost))", "def update(self):\n\n # Update W (gradient should be up-to-date)\n _projected_step(self.W, self.gW, 1.0 / self.lipschitz_W())\n\n # Update H (need to recompute residuals since W was updated).\n self.cache_resids()\n self.cache_gH()\n _projected_step(self.H, self.gH, self.step_size)\n\n # Update residuals and gradient computation for W (for next iteration).\n self.cache_resids()\n self.cache_gW()\n\n # Return loss\n return self.loss", "def train(X_train, y_train, X_test, y_test, net):\n \n # convert X, y to tensors:\n X_train = torch.tensor(X_train, dtype=torch.float32)\n y_train = torch.tensor(y_train, dtype=torch.float32)\n \n X_test = torch.tensor(X_test, dtype=torch.float32)\n y_test = torch.tensor(y_test, dtype=torch.float32)\n\n # iterator:\n train_set = TensorDataset(X_train, y_train)\n train_loader = DataLoader(train_set, batch_size, shuffle=True)\n\n test_set = TensorDataset(X_test, y_test)\n test_loader = DataLoader(test_set, batch_size, shuffle=True)\n\n # optimizer:\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n loss = nn.MSELoss()\n\n # loss accumulator:\n time_line = []\n train_metric = []\n test_metric = []\n\n # loop:\n for epoch in range(epochs):\n # update parameters:\n for Xb, yb in train_loader:\n train_ls = loss(net(Xb), yb)\n optimizer.zero_grad()\n train_ls.backward()\n optimizer.step()\n # update train and test losses:\n with torch.no_grad():\n if not epoch % 50:\n time_line.append(epoch)\n metric = 0\n for Xb, yb in train_loader:\n metric += loss(net(Xb), yb) / batch_size\n train_metric.append(metric)\n metric = 0\n for Xb, yb in test_loader:\n metric += loss(net(Xb), yb) / batch_size\n test_metric.append(metric)\n # verbose:\n print('Epoch: ', epoch)\n\n # final report of the losses: \n print('Train loss.....{0:6.3f}'.format(train_metric[-1]))\n print('Test loss......{0:6.3f}'.format(test_metric[-1]))\n\n # plot losses with respect to epochs:\n plt.plot(time_line, train_metric, color='b')\n plt.plot(time_line, test_metric, color='r')\n plt.show()", "def run_code_for_training_with_CrossEntropy_and_BCE_Losses(self, net):\n filename_for_out1 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"label.txt\"\n filename_for_out2 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"regres.txt\"\n FILE1 = open(filename_for_out1, 'w')\n FILE2 = open(filename_for_out2, 'w')\n net = copy.deepcopy(net)\n net = net.to(self.dl_studio.device)\n criterion1 = nn.CrossEntropyLoss()\n# criterion2 = self.dl_studio.DetectAndLocalize.IOULoss(self.dl_studio.batch_size)\n criterion2 = nn.BCELoss()\n optimizer = optim.SGD(net.parameters(), \n lr=self.dl_studio.learning_rate, momentum=self.dl_studio.momentum)\n for epoch in range(self.dl_studio.epochs): \n running_loss_labeling = 0.0\n running_loss_regression = 0.0 \n for i, data in enumerate(self.train_dataloader):\n gt_too_small = False\n inputs, bbox_gt, labels = data['image'], data['bbox'], data['label']\n if self.dl_studio.debug_train and i % 1000 == 999:\n print(\"\\n\\n[iter=%d:] Ground Truth: \" % (i+1) + \n ' '.join('%5s' % self.dataserver_train.class_labels[labels[j].item()] for j in range(self.dl_studio.batch_size)))\n inputs = inputs.to(self.dl_studio.device)\n labels = labels.to(self.dl_studio.device)\n bbox_gt = bbox_gt.to(self.dl_studio.device)\n optimizer.zero_grad()\n outputs = net(inputs)\n outputs_label = outputs[0]\n bbox_pred = outputs[1]\n if self.dl_studio.debug_train and i % 500 == 499:\n inputs_copy = inputs.detach().clone()\n inputs_copy = inputs_copy.cpu()\n bbox_pc = bbox_pred.detach().clone()\n bbox_pc[bbox_pc<0] = 0\n bbox_pc[bbox_pc>31] = 31\n _, predicted = torch.max(outputs_label.data, 1)\n print(\"[iter=%d:] Predicted Labels: \" % (i+1) + \n ' '.join('%10s' % self.dataserver_train.class_labels[predicted[j].item()] \n for j in range(self.dl_studio.batch_size)))\n for idx in range(self.dl_studio.batch_size):\n i1 = int(bbox_gt[idx][1])\n i2 = int(bbox_gt[idx][3])\n j1 = int(bbox_gt[idx][0])\n j2 = int(bbox_gt[idx][2])\n k1 = int(bbox_pc[idx][1])\n k2 = int(bbox_pc[idx][3])\n l1 = int(bbox_pc[idx][0])\n l2 = int(bbox_pc[idx][2])\n print(\" gt_bb: [%d,%d,%d,%d]\"%(j1,i1,j2,i2))\n print(\" pred_bb: [%d,%d,%d,%d]\"%(l1,k1,l2,k2))\n inputs_copy[idx,0,i1:i2,j1] = 255\n inputs_copy[idx,0,i1:i2,j2] = 255\n inputs_copy[idx,0,i1,j1:j2] = 255\n inputs_copy[idx,0,i2,j1:j2] = 255\n inputs_copy[idx,2,k1:k2,l1] = 255 \n inputs_copy[idx,2,k1:k2,l2] = 255\n inputs_copy[idx,2,k1,l1:l2] = 255\n inputs_copy[idx,2,k2,l1:l2] = 255\n self.dl_studio.display_tensor_as_image(\n torchvision.utils.make_grid(inputs_copy, normalize=True),\n \"see terminal for TRAINING results at iter=%d\" % (i+1))\n mask_regress = torch.zeros(self.dl_studio.batch_size,32,32,requires_grad=False)\n mask_gt = torch.zeros(self.dl_studio.batch_size, 32,32)\n for k,out_regres in enumerate(bbox_pred):\n x1,y1,x2,y2 = bbox_pred[k].tolist()\n x1_gt,y1_gt,x2_gt,y2_gt = bbox_gt[k].tolist()\n x1,y1,x2,y2 = [int(item) if item >0 else 0 for item in (x1,y1,x2,y2)]\n x1_gt,y1_gt,x2_gt,y2_gt = [int(item) if item>0 else 0 for item in (x1_gt,y1_gt,x2_gt,y2_gt)]\n if abs(x1_gt - x2_gt)<5 or abs(y1_gt-y2_gt) < 5: gt_too_small = True\n mask_regress_np = np.zeros((32,32), dtype=bool)\n mask_gt_np = np.zeros((32,32), dtype=bool)\n mask_regress_np[y1:y2,x1:x2] = 1\n mask_gt_np[y1_gt:y2_gt, x1_gt:x2_gt] = 1\n mask_regress[k,:,:] = torch.from_numpy(mask_regress_np)\n mask_regress.reqiures_grad=True\n mask_gt[k,:,:] = torch.from_numpy(mask_gt_np)\n mask_gt.reqiures_grad=True \n loss_labeling = criterion1(outputs_label, labels)\n loss_labeling.backward(retain_graph=True) \n loss_regression = criterion2(mask_regress, mask_gt)\n loss_regression.requires_grad = True\n loss_regression.backward()\n optimizer.step()\n running_loss_labeling += loss_labeling.item() \n running_loss_regression += loss_regression.item() \n if i % 1000 == 999: \n avg_loss_labeling = running_loss_labeling / float(1000)\n avg_loss_regression = running_loss_regression / float(1000)\n print(\"[epoch:%d, batch:%5d] loss_labeling: %.3f loss_regression: %.3f \" % (epoch + 1, i + 1, avg_loss_labeling, avg_loss_regression))\n FILE1.write(\"%.3f\\n\" % avg_loss_labeling)\n FILE1.flush()\n FILE2.write(\"%.3f\\n\" % avg_loss_regression)\n FILE2.flush()\n running_loss_labeling = 0.0\n running_loss_regression = 0.0\n print(\"\\nFinished Training\\n\")\n self.save_model(net)" ]
[ "0.7155436", "0.6939661", "0.685114", "0.68466926", "0.68327856", "0.67759883", "0.6716115", "0.66500854", "0.662701", "0.66258705", "0.660631", "0.65530854", "0.65456945", "0.6514191", "0.6513278", "0.6499993", "0.6499993", "0.6494454", "0.6474171", "0.6471904", "0.64706767", "0.64699215", "0.6407085", "0.639998", "0.6371353", "0.6371325", "0.63648796", "0.6362781", "0.635086", "0.63299835", "0.63286054", "0.6321742", "0.6311118", "0.6303046", "0.6292093", "0.6288903", "0.62879723", "0.628514", "0.62725353", "0.6264534", "0.6262274", "0.6259101", "0.62562335", "0.6254723", "0.62225384", "0.62220407", "0.6217594", "0.6213984", "0.62075037", "0.6206612", "0.620414", "0.61957824", "0.6189516", "0.61873925", "0.6185612", "0.6185347", "0.6183471", "0.6176331", "0.6175878", "0.6172858", "0.61725134", "0.616729", "0.6163433", "0.6163367", "0.61604106", "0.6155571", "0.61545783", "0.61443555", "0.6143194", "0.61431026", "0.6126707", "0.61259085", "0.61241734", "0.61206514", "0.6107361", "0.61025673", "0.60986465", "0.60931313", "0.6086483", "0.60749304", "0.60748893", "0.6069296", "0.60484076", "0.6039241", "0.6035911", "0.60268795", "0.60209024", "0.60203034", "0.60201365", "0.6020052", "0.6010823", "0.60090125", "0.60013086", "0.59933025", "0.59902334", "0.5988095", "0.5987372", "0.5984102", "0.5983399", "0.59820974" ]
0.6169516
61
n = 2 1x2 0 1 n = 3 2x2 0 1 2 x n = 4 2x2 0 1 2 3 n = 5 2x3 0 1 2 3 4 x n = 6 2x3 n=7 3x3
def filled_grid(n): i = 0 r, c = 1, 1 while r * c < n: if i % 2: r += 1 else: c += 1 i += 1 return r, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sw(n):\n return 4*n*n + 2*n + 1", "def nw(n):\n return 4*n*n + 1", "def ne(n):\n return 4*n*n - 2*n + 1", "def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1", "def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)", "def problem9_naive(n):\n for a in range(4, n, 4):\n for b in range(3, n - a):\n c = n - a - b\n if a ** 2 + b ** 2 == c ** 2:\n return a * b * c\n return None", "def hexagonal(n: int) -> int:\n return int(n * (2 * n - 1))", "def binary_compositions(n):\n return productrange(*[2]*(n-1))", "def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2", "def sum_of_three_squares(n):\n special = {1:(1, 0, 0), 2:(1, 1, 0), 3:(1, 1, 1), 10: (1, 3, 0), 34: (3, 3, 4), 58:(3, 7, 0),\n 85:(6, 7, 0), 130:(3, 11, 0), 214:(3, 6, 13), 226:(8, 9, 9), 370:(8, 9, 15),\n 526:(6, 7, 21), 706:(15, 15, 16), 730:(1, 27, 0), 1414:(6, 17, 33), 1906:(13, 21, 36),\n 2986: (21, 32, 39), 9634: (56, 57, 57)}\n\n v = 0\n\n if n == 0:\n return (0, 0, 0)\n\n v = multiplicity(4, n)\n n //= 4**v\n\n if n % 8 == 7:\n return\n\n if n in special.keys():\n x, y, z = special[n]\n return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)\n\n s, _exact = integer_nthroot(n, 2)\n\n if _exact:\n return (2**v*s, 0, 0)\n\n x = None\n\n if n % 8 == 3:\n s = s if _odd(s) else s - 1\n\n for x in range(s, -1, -2):\n N = (n - x**2) // 2\n if isprime(N):\n y, z = prime_as_sum_of_two_squares(N)\n return _sorted_tuple(2**v*x, 2**v*(y + z), 2**v*abs(y - z))\n return\n\n if n % 8 in (2, 6):\n s = s if _odd(s) else s - 1\n else:\n s = s - 1 if _odd(s) else s\n\n for x in range(s, -1, -2):\n N = n - x**2\n if isprime(N):\n y, z = prime_as_sum_of_two_squares(N)\n return _sorted_tuple(2**v*x, 2**v*y, 2**v*z)", "def solve_for_edge_dimensionality(n):\n return int(round(np.sqrt(2 * n + 2.25) - 1.5))", "def sol(n):\n p = 1\n res = 0\n \n while n:\n p*=5\n if n&1:\n res+=p\n n=n>>1\n return res%1000000007", "def find(n):\n tn = int(n / 2)\n s = 2 * tn + 1\n count = 0\n line_list = []\n for i in range(1, n + 1):\n for j in range(i + 1, n + 1):\n if i + j != s:\n line_list.append(str(i) + \" \" + str(j))\n count += 1\n\n return count, line_list", "def McNuggets(n):\n # Your Code Here\n for c in xrange( n/20+2):\n for b in xrange( (n-20*c)/9+2):\n for a in xrange ((n-20*c-9*b)/6 +2):\n if (6*a + 9*b + 20*c) == n :\n return True\n return False", "def hexagonal_number(n):\n return n * (2 * n - 1)", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6", "def solution(n: int) -> int:\n sizearr = n + 1\n\n # create zero-filled multi_arr\n multi_arr = [[0 for x in range(sizearr)] for n in range(sizearr)]\n\n # base value is always skipped after being padded\n multi_arr[0][0] = 1\n for last in range(1, sizearr):\n for next in range(0, sizearr):\n multi_arr[last][next] = multi_arr[last - 1][next]\n if next >= last:\n multi_arr[last][next] += multi_arr[last - 1][next - last]\n\n return multi_arr[n][n] - 1", "def McNuggets(n):\n # Your Code Here\n\n high = n//6+1\n\n if n != 0:\n for i in range(high):\n for j in range(high):\n for k in range(high):\n if 6*k + 9*j + 20*i == n:\n return True\n\n return False\n\n else:\n return False", "def farey(n):\n a, b, c, d = 0, 1, 1, n\n #yield a, b\n while (c <= n):\n k = (n + b) // d\n a, b, c, d = c, d, (k*c-a), (k*d-b)\n yield a, b", "def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))", "def get_squares(n):\n\n return sum([i * i for i in range(n)])", "def algorithm_p(n):\n partition = [0]*n\n last_replaced = 0\n partition[last_replaced] = n\n idx = last_replaced - (n == 1)\n\n while True:\n yield partition[0:last_replaced + 1]\n if idx < 0:\n return\n if partition[idx] == 2:\n partition[idx] = 1\n idx -= 1\n last_replaced += 1\n partition[last_replaced] = 1\n else:\n replacement = partition[idx] - 1\n partition[idx] = replacement\n n = last_replaced - idx + 1\n last_replaced = idx + 1\n while n > replacement:\n partition[last_replaced] = replacement\n last_replaced += 1\n n -= replacement\n partition[last_replaced] = n\n idx = last_replaced - (n == 1)", "def orderByIncreasingBitCount(n):\n res = [0] # freebie\n biggest = 2**n - 1\n for i in range(1, n):\n for j in range(1, biggest):\n if hamming_weight(j) == i:\n res.append(j)\n res.append(biggest) # another freebie\n return res", "def countArrangement(self, n: int) -> int:\n def iter_digit(n):\n while n:\n yield n % 2\n n //= 2\n\n @lru_cache(None)\n def dfs(i, remains):\n if i == n+1:\n return 1\n cnt = 0\n for j, d in enumerate(iter_digit(remains)):\n if d == 0:\n continue\n if j%i == 0 or i%j == 0:\n remains ^= 2**j\n cnt += dfs(i+1, remains)\n remains ^= 2**j\n return cnt\n\n # starting from 11..10 (length is n+1)\n return dfs(1, 2**(n+1)-2)", "def compute(n):\n if n == 1:\n return 1\n else:\n i = find_i(n)\n return 2 * compute(n - i) + 2 ** i - 1", "def go(n):\n if 0 == n:\n return (1, 0)\n else:\n x, y = go(n // 2)\n if n % 2 == 0:\n return (x+y, y)\n else:\n return (x, x+y)", "def compute_pattern(n):\n for x in range(1,n):\n for y in range(x, x*2):\n print(y, end= \" \")\n print()", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def hpint_perm(n):\n c_new = []\n D_new = []\n H_new = []\n for i in range(2 ** n - 1):\n c_new_i = np.zeros((n, 1))\n binStr = bin(i + 1)[2:]\n for j in range(len(binStr)):\n c_new_i[n - 1 - j][0] = int(binStr[len(binStr) - 1 - j])\n c_new.append(c_new_i)\n D_new_i = np.diag(np.transpose(c_new_i)[0])\n D_new.append(D_new_i)\n H_new_i = np.diag(np.transpose(c_new_i * (-2) + 1)[0])\n H_new.append(H_new_i)\n\n return c_new, D_new, H_new", "def solution2(n):\n ones = 0\n while n > 0:\n if n & 1:\n ones += 1\n n = n >> 1\n\n return 0 if ones % 2 == 0 else 1", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def print_triangular_numbers(n):\r\n\r\n\tfor i in range(1, n+1):\r\n\t\tsum = int((i / 2)*(1 + i))\r\n\t\tprint(i, \"\\t\", sum)", "def H(n,x):\n if(n == 0):\n hn = 1\n elif(n == 1):\n hn = 2*x\n else:\n Hnm1 = 1; Hn = 2*x\n for i in range(1,n):\n H = 2*x*Hn - 2*i*Hnm1\n Hnm1 = Hn\n Hn = H\n hn = H\n return hn", "def primes_from_2_to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def I (self, n):", "def amount_of_stairs(n):\n\n matrix = [[0] * n for i in range(n)]\n\n for i in range(0, n):\n for j in range(1, i):\n matrix[i][j] = sum(matrix[i - j - 1][:j])\n matrix[i][i] = 1\n\n # print_matrix(matrix)\n return sum(matrix[n-1])", "def base(x, y, n):\n curRow = [i for i in range(0, n)]\n for N in range(2,n):\n curRow[N] = curRow[N-1]*(N-1)\n for _ in range(3, x+y):\n lastRow = curRow[:]\n for N in range(1, n):\n curRow[N] = curRow[N-1]*(N-1) + lastRow[N-1]\n return curRow[n-1]", "def combin(n, k):\n\tif k > n//2:\n\t\tk = n-k\n\tx = 1\n\ty = 1\n\ti = n-k+1\n\twhile i <= n:\n\t\tx = (x*i)//y\n\t\ty += 1\n\t\ti += 1\n\treturn x", "def square_difference(n):\n\n return n*(n+1)*(3*n+2)*(n-1)/12", "def sixn(m):\n if m <= 2:\n return ()\n if m > 2:\n yield 2\n if m > 3:\n yield 3\n for n in count(1):\n x = 6 * n - 1\n y = x + 2\n if x < m:\n yield x\n else:\n break\n if y < m:\n yield y\n else:\n break", "def count_square_sums(n):\n if n == 0: return 1\n total = 4*( sum(1 for i in divisors(n) if i % 4 == 1) \n - sum(1 for i in divisors(n) if i % 4 == 3) )\n ## Remove duplicate countings if n > 0\n ## Eight duplicates: (+/-a, +/-b) (+/-b, +/-a) \n ## Four duplicates: (0,+1), (0,-1), (+1,0), (-1,0)\n ## Four duplicates: (+/-1,+/-1)\n flg = 0\n if is_square(n): flg += 1\n if is_square(n/2) and (n % 2 == 0): flg += 1\n return (total + 4*flg)/8", "def landau2(n):\n\n factors = primish(n)\n\n # TODO: I have no idea here...\n\n #assert sum(factors) <= n\n return product(factors)", "def solve(n: int) -> None:\n count_triangles = 3 * n * n\n for x in range(1, n+1):\n for y in range(1, x+1):\n xy_gcd = gcd(x, y)\n move_x, move_y = x // xy_gcd, y // xy_gcd\n i = 1\n while y + i * move_x <= n and x - i * move_y >= 0:\n count_triangles += 1 + int(x != y)\n i += 1\n i = 1\n while y - i * move_x >= 0 and x + i * move_y <= n:\n count_triangles += 1 + int(x != y)\n i += 1\n print(count_triangles)", "def generatePrimesFrom2(n):\n sieve = bytearray([True]) * (n//2+1)\n for i in range(1,int(n**0.5)//2+1):\n if sieve[i]:\n sieve[2*i*(i+1)::2*i+1] = bytearray((n//2-2*i*(i+1))//(2*i+1)+1)\n return [2, *compress(range(3,n,2), sieve[1:])]", "def calc(n):\r\n \r\n #Initialize a list of zeroes\r\n moves = [0] * (n + 1)\r\n for i in range(2, n + 1):\r\n #The option always exists to just add one to the previous number\r\n #Number of moves is equal to the previous value plus one\r\n minmoves = moves[i - 1] + 1\r\n \r\n #If the number is divisible by two, check to see if multiplying some previous number by two is more efficient\r\n if i % 2 == 0:\r\n minmoves = min(minmoves, moves[i // 2] + 1)\r\n \r\n #Same as above\r\n if i % 3 == 0:\r\n minmoves = min(minmoves, moves[i // 3] + 1)\r\n moves[i] = minmoves\r\n \r\n #Reconstruct the path we took to get to n\r\n path = []\r\n while n > 1:\r\n #At each iteration, we find where the number of moves is one less than our current moves\r\n path.append(n)\r\n if moves[n] - moves[n-1] == 1:\r\n n -= 1\r\n elif n % 2 == 0 and moves[n] - moves[n // 2] == 1:\r\n n //= 2\r\n elif n % 3 == 0 and moves[n] - moves[n // 3] == 1:\r\n n //= 3\r\n path.append(1)\r\n path.reverse()\r\n return \"{}\\n{}\".format(moves[-1], \" \".join([str(x) for x in path]))", "def primesfrom2to(n):\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primesfrom2to( n ):\n sieve = numpy.ones( n / 3 + ( n % 6 == 2 ), dtype = numpy.bool )\n for i in range( 1, int( ( n ** 0.5 ) / 3 ) + 1 ):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ k * k / 3 ::2 * k] = False\n sieve[k * ( k - 2 * ( i & 1 ) + 4 ) / 3::2 * k] = False\n return numpy.r_[2, 3, ( ( 3 * numpy.nonzero( sieve )[0][1:] + 1 ) | 1 )]", "def primesfrom2to(n):\n sieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)", "def combinations(n) -> float:\r\n c = math.factorial(n) / (math.factorial(2) * math.factorial(n - 2))\r\n return c", "def primesfrom2to(n):\n sieve = numpy.ones(n//3 + (n%6 == 2), dtype=numpy.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[k*k//3::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def decompose(n):\n binary_rep = list(bin(n)[2:])\n binary_rep.reverse()\n s = 0\n while(binary_rep[s] == \"0\"): ##find last occurance of a bit 1\n s += 1\n return (s, n>>s) # = n/(2**s))", "def primesfrom2to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2", "def count_tilings(n: int) -> int:\n if n < 5:\n # handle recursive base case\n return 2**(n - 1)\n else:\n # place each tile at end of row and recurse on remainder\n return (count_tilings(n - 1) +\n count_tilings(n - 2) +\n count_tilings(n - 3) +\n count_tilings(n - 4))", "def sum_of_four_squares(n):\n if n == 0:\n return (0, 0, 0, 0)\n\n v = multiplicity(4, n)\n n //= 4**v\n\n if n % 8 == 7:\n d = 2\n n = n - 4\n elif n % 8 in (2, 6):\n d = 1\n n = n - 1\n else:\n d = 0\n\n x, y, z = sum_of_three_squares(n)\n\n return _sorted_tuple(2**v*d, 2**v*x, 2**v*y, 2**v*z)", "def primes2(n):\n n, correction = n-n%6+6, 2-(n%6>1)\n sieve = [True] * (n//3)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = [False] * ((n//6-k*k//6-1)//k+1)\n sieve[k*(k-2*(i&1)+4)//3::2*k] = [False] * ((n//6-k*(k-2*(i&1)+4)//6-1)//k+1)\n return [2,3] + [3*i+1|1 for i in range(1,n//3-correction) if sieve[i]]", "def makePermutations(n):\n\thalf = n // 2\n\tfull = half * 2\n\tswap = np.random.rand(half) > 0.5\n\tpx = np.arange(n)\n\tpx[:full:2] += swap\n\tpx[1:full:2] -= swap\n\treturn px", "def solution(n):\n i = 1\n j = 2\n sum = 0\n while j <= n:\n if j % 2 == 0:\n sum += j\n i, j = j, i + j\n\n return sum", "def primes2(n):\r\n n, correction = n - n % 6 + 6, 2 - (n % 6 > 1)\r\n sieve = [True] * (n // 3)\r\n for i in range(1, int(n ** 0.5) // 3 + 1):\r\n if sieve[i]:\r\n k = 3 * i + 1 | 1\r\n sieve[k * k // 3::2 * k] = [False] * ((n // 6 - k * k // 6 - 1) // k + 1)\r\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = [False] * (\r\n (n // 6 - k * (k - 2 * (i & 1) + 4) // 6 - 1) // k + 1)\r\n return [2, 3] + [3 * i + 1 | 1 for i in range(1, n // 3 - correction) if sieve[i]]", "def primes2(n):\n correction = (n%6>1)\n n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]\n sieve = [True] * (n/3)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k]=[False]*((n/6-(k*k)/6-1)/k+1)\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k]=[False]*((n/6-(k*k+4*k-2*k*(i&1))/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]", "def sieve8(n):\n prime = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(3, int(n**.5) + 1, 3):\n if prime[i // 3]:\n p = (i + 1) | 1\n prime[ p*p//3 ::2*p] = False\n prime[p*(p-2*(i&1)+4)//3::2*p] = False\n result = (3 * prime.nonzero()[0] + 1) | 1\n result[0] = 3\n return np.r_[2,result]", "def _n_choose_2(n):\n return (n * (n - 1)) // 2", "def primes2(n):\n n, correction = n-n%6+6, 2-(n%6>1)\n sieve = [True] * (n/3)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = [False] * ((n/6-k*k/6-1)/k+1)\n sieve[k*(k-2*(i&1)+4)/3::2*k] = [False] * ((n/6-k*(k-2*(i&1)+4)/6-1)/k+1)\n return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]", "def McNuggets(n):\n a = 0\n b = 0\n c = 0\n\n while 6*a + 9*b + 20*c < n:\n while 6*a + 9*b + 20*c < n:\n while 6*a + 9*b + 20*c < n:\n c += 1\n if 6*a + 9*b + 20*c == n:\n print a, b, c\n return True\n c = 0\n b += 1\n if 6*a + 9*b + 20*c == n:\n print a, b, c\n return True\n b = 0\n a += 1\n\n\n return False", "def solve(n=1000):\r\n return str(sum(x**x for x in range(1, n + 1)))[-10:]", "def primes(n):\n sieve = bytearray([True]) * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = bytearray((n-i*i-1)//(2*i)+1)\n return [2,*compress(range(3,n,2), sieve[1:])]", "def J (self, n):", "def get_triangle_numbers(n):\n r = []\n for i in xrange(1, n + 1):\n t = ((i * (i + 1)) / 2)\n r.append(t)\n return r", "def generate(n):\n \n m1 = np.zeros((n, n), dtype = int)\n m2 = np.zeros((n, n), dtype = int)\n \n for i in range(n):\n for j in range(n):\n m1[i][j] = (j % 32)\n m2[i][j] = (j % 64)\n \n return m1,m2", "def solve(n, seq):\n\n return sum(seq) - (n-1) * (n-2) / 2", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]", "def primesfrom3to(n):\n sieve = numpy.ones(n//2, dtype=numpy.bool)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = False\n return 2*numpy.nonzero(sieve)[0][1::]+1", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n if n <= 3:\n return n\n else:\n i = 3\n x, y, z = 1, 2, 3\n new = 1\n while i < n:\n new = z + (2*y) + (3*x)\n x, y, z = y, z, new \n i += 1\n return new", "def triangleNumber(n):\n return sum(range(n+1))", "def collatz(n):\n if n%2==0: return n/2\n else: return 3*n+1", "def bits(n):\n\n # Create a list of the first 1,000 binary numbers\n binary_list = reverse_binary_list()\n\n # Start by calculating number of 1's for n\n n_ones = num_of_ones(n, binary_list)\n\n # Calculate number of 1's for next value\n next_ones = 0\n while n_ones != next_ones:\n n = n + 1\n next_ones = num_of_ones(n, binary_list)\n\n return(n)", "def Arn(r, n):\n ret = 1\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret", "def sol(n, mem):\n if mem[n] != -1:\n return mem[n]\n \n mem[n] = 0\n for i in range(2, n+1):\n mem[n]+=sol(n-i, mem)*sol(i-2, mem)\n \n return mem[n]", "def McNuggets(n):\n a=0\n b=0\n c=0\n while 6*a + 9*b + 20*c < n:\n for a in range((n//6)+1):\n for b in range((n//9)+1):\n for c in range ((n//20)+1):\n if 6*a + 9*b + 20*c == n:\n return print(True)\n if 6*a + 9*b + 20*c != n:\n return print(False)", "def solution3(n):\n res = []\n while n > 0:\n m = int(math.sqrt(n))**2\n res.append(m)\n n -= m\n return res", "def triangular_number(n):\n return n*(n+1) / 2", "def cycles(n, m):\n if n <= 2:\n return 1\n k = 2\n tk = 3\n pk = 2\n while k < n - 1:\n tk1 = (2 * pk * tk * tk) % m\n pk1 = (2 * pk * pk * tk) % m\n tk = tk1\n pk = pk1\n k += 1\n return (pk * pk * pk) % m", "def prod_of_nth(n):\n factorial = 1\n for i in range(1,n+1):\n factorial *= i\n return factorial", "def collatz(n):\n iterCount = 0\n while(n != 1):\n if(n & 1):\n n = 3 * n + 1\n else:\n n //= 2\n iterCount += 1\n return iterCount", "def sumn(n):\n return n * (n + 1) // 2", "def number_of_trees_of_order(n):\n if n < 2:\n return n\n result = 0\n for k in range(1, n):\n result += k * number_of_trees_of_order(k) * _s(n-1, k)\n return result // (n - 1)", "def plus_table(n):\n return [[(i + j) % n for i in range(n)] for j in range(n)]", "def triple_step_simplified(n):\n\ta = 0\n\tb = 0\n\tc = 1\n\tfor i in range(n):\n\t\ttemp = a + b + c\n\t\ta, b, c = b, c, temp\n\treturn temp", "def make_magic_square(N): # part a\n if N % 2 == 0:\n print('N must be odd.')\n my_magic_square = np.zeros((N, N))\n i = 0\n j = np.ceil(N / 2.).astype(int)\n n = 1\n while n <= N**2:\n my_magic_square[i, j] = n\n n += 1\n i_next =\n j_next =\n if my_magic_square[i_next, j_next] > 0:\n i =\n else:\n i =\n j =\n return my_magic_square", "def fn(n):\n if n == 0: return 1\n return sum(fn(i)*fn(n-i-1) for i in range(n))", "def sieve_of_eratosthenes(n):\n res = [2]\n i = 3\n marked = set()\n while i <= n**.5:\n if i not in marked:\n res.append(i)\n j = 0\n while j <= n/i:\n marked.add(i + j*i)\n j += 1\n i += 2\n while i <= n:\n if i not in marked:\n res.append(i)\n i += 2\n return res", "def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def sum_of_squares(n):\n result = i = 0\n while i < n:\n result += i\n i += 1\n return result", "def pentagonal(n):\n return (n * ((3 * n) - 1)) / 2", "def primesToNumber(n):\r\n sieve = [True] * n\r\n for i in xrange(3,int(n**0.5)+1,2):\r\n if sieve[i]:\r\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\r\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def phi(n):\n sieve = [i if i & 1 else i // 2 for i in range(n + 1)]\n for i in range(3, n + 1, 2):\n if sieve[i] == i:\n for j in range(i, n + 1, i):\n sieve[j] = (sieve[j] // i) * (i - 1)\n\n return sieve", "def fatorial(n):\r\n if n <= 1: return 1\r\n return n * fatorial(n-1)", "def McNuggets(n):\n # Your Code Here\n\n for a in range(0, n/6+1):\n for b in range(0, n/9+1):\n for c in range(0, n/20+1):\n if 6*a+9*b+20*c == n:\n return True\n return False" ]
[ "0.73914665", "0.72387326", "0.6959924", "0.6865498", "0.67776674", "0.67680246", "0.6764694", "0.6735738", "0.66464823", "0.6636954", "0.6585204", "0.6544291", "0.65394866", "0.65350693", "0.6500963", "0.64812386", "0.6394155", "0.63908386", "0.6298539", "0.6288296", "0.6287545", "0.6281248", "0.6278076", "0.6264418", "0.6263395", "0.62578285", "0.62576455", "0.6255471", "0.6251453", "0.6237787", "0.62373495", "0.6234304", "0.6231247", "0.62252504", "0.62166876", "0.6215387", "0.6212877", "0.6207923", "0.6206577", "0.6205234", "0.6198643", "0.6195229", "0.61927426", "0.61909", "0.61896205", "0.61797464", "0.61759865", "0.6175829", "0.6164646", "0.61574924", "0.61398476", "0.61379886", "0.6133654", "0.6129447", "0.61232847", "0.6117089", "0.61154294", "0.6109949", "0.610063", "0.6100003", "0.6099472", "0.6098241", "0.6097107", "0.608331", "0.6078928", "0.6077362", "0.6064554", "0.60559076", "0.60555804", "0.6055001", "0.60527015", "0.60506684", "0.60485965", "0.6048127", "0.60475785", "0.6046266", "0.604606", "0.6043249", "0.6034051", "0.6032778", "0.60308754", "0.60266334", "0.60256016", "0.60255164", "0.6023531", "0.6015765", "0.60052174", "0.5995204", "0.599478", "0.5993817", "0.59897697", "0.5983057", "0.59817785", "0.59813666", "0.59783113", "0.59688026", "0.59670335", "0.5962673", "0.5956016", "0.5948218" ]
0.621542
35
A convenience function to get planet position from spice
def planetPositionVelocity(planetName, time, ephemerisPath = '/supportData/EphemerisData/pck00010.tpc', observer = 'SSB', frame = 'J2000'): pyswice.furnsh_c(bskPath + '/supportData/EphemerisData/de430.bsp') pyswice.furnsh_c(bskPath + '/supportData/EphemerisData/naif0012.tls') #load leap seconds pyswice.furnsh_c(bskPath + ephemerisPath) positionVelocity = pyswice.spkRead(planetName, time, frame, observer) position = positionVelocity[0:3] * 1000 velocity = positionVelocity[3:6] * 1000 pyswice.unload_c(bskPath + ephemerisPath) return position, velocity # [m], [m/s]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_planet(coords):\n r_theta = output_coord_to_r_theta(coords)\n input_coords = r_theta_to_input_coords(r_theta)\n return input_coords", "def get_position(self, t0):\n my_pos_x=np.random.uniform(-20, 20)\n my_pos_y=np.random.uniform(-20, 20)\n r=np.array([my_pos_x, my_pos_y])\n x_y=np.zeros(shape=(self.no_planets-1, 2))\n tol=1e-5\n diff=np.zeros(self.no_planets-1)\n for k in range(self.no_planets-1):\n r1=np.linalg.norm(r)\n r2=np.linalg.norm(r-self.positionFunction(t0)[:, k])\n r3=np.linalg.norm(r-self.positionFunction(t0)[:, k+1])\n x1=0\n y1=0\n x2=self.positionFunction(t0)[0,k]\n y2=self.positionFunction(t0)[1,k]\n x3=self.positionFunction(t0)[0,k+1]\n y3=self.positionFunction(t0)[1, k+1]\n x,y,difference=self.triangulate_analytic(x1,y1,r1,x2,y2,r2,x3,y3,r3)\n x_y[k, 0]=x\n x_y[k, 1]=y\n diff[k]=difference\n if (diff > tol).any():\n print diff.max()\n print \"Oh no, one failed :(\"\n sys.exit(1)\n print \"My pos x:\", my_pos_x\n print \"My pos y:\", my_pos_y\n #return x1, y1, r1, x2, y2, r2, x3, y3, r3", "def spot_coords(self,spot):\n if spot == '1':\n return (330 - 60 ,335 - 15)\n if spot == '2':\n return (419 - 60, 335 - 15)\n if spot == '3':\n return (591 - 60, 159 - 15)\n if spot == '4':\n return (588 - 60, 248 - 15)", "def get_position(self, position):", "def position(self, name, tdb):\n if name == 'earth':\n return self._interpolate_earth(tdb, differentiate=False)\n else:\n return self._interpolate(name, tdb, differentiate=False)", "def _player_loc():\n return _to_my_vec3(_get_mc().player.getTilePos())", "def test_get_position():\n pos = get_position(date, lng, lat)\n assert np.isclose(pos['azimuth'], -2.5003175907168385)\n assert np.isclose(pos['altitude'], -0.7000406838781611)", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def _get_plunger_position(self, position):\n try:\n value = self.positions[position]\n if isinstance(value, (int, float, complex)):\n return value\n else:\n raise RuntimeError(\n 'Plunger position \"{}\" not yet calibrated'.format(\n position))\n except KeyError:\n raise RuntimeError(\n 'Plunger position \"{}\" does not exist'.format(\n position))", "def get_position(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org\n else:\n return None", "def s(self, position: Vector) -> float:\n return self.local_coordinates(position)[0]", "def get_position(self):\n return self.proposition.team_a if self.position else self.proposition.team_b", "def _get_site_pos(dset):\n # TODO hjegei: Workaround -> better would it be if Position object can handle LLH as input format!!!\n x, y, z = gnss.llh2xyz(np.deg2rad(dset.lat), np.deg2rad(dset.lon), dset.height)\n return np.stack((x, y, z), axis=1)", "def get_player_position(self):\n raise NotImplementedError", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def _random_spherical_position(u):\n n = u.size\n nhalf = n // 2\n cos_t = 2 * u[:nhalf] - 1\n phi = 2 * np.pi * u[nhalf:]\n\n sin_t = np.sqrt((1.0 - cos_t * cos_t))\n\n x = sin_t * np.cos(phi)\n y = sin_t * np.sin(phi)\n z = cos_t\n\n return x, y, z", "def player_location(self):\n x = 0\n y = 0\n for line in self.grid:\n for i in line:\n if i == \"P\":\n return x, y\n \n y+=1\n x += 1\n y = 0", "def get_position(self):\n return parsegeometry(self.geometry())[2:]", "def get_center_position( peg, position_on_peg):\n x = Hanoi.FIRST_TOWER_X + peg * Hanoi.DISTANCE_BETWEEN_TOWER\n y = position_on_peg * Hanoi.DISK_HEIGHT + 0.5 * Hanoi.DISK_HEIGHT\n return (x, y)", "def get_obst_position(obst):\n pos = Vector2()\n pos.x = obst.position.x\n pos.y = obst.position.y\n return pos", "def get_position(self, name):\n from scoop.editorial.models import Position\n try:\n return self.positions.get(name=name)\n except Position.DoesNotExist:\n return None", "def solar_longitude(cls, tee):\n return cls.true_position(tee, cls.SIDEREAL_YEAR, 14/360, cls.ANOMALISTIC_YEAR, 1/42)", "def get_terrapos_position():\n dset = parsers.parse_key(\"terrapos_output_position\").as_dataset()\n dset.add_time(\"time\", val=dset.gpsweek, val2=dset.gpssec, scale=\"gps\", fmt=\"gps_ws\")\n dset.add_position(\"site_pos\", time=\"time\", itrs=_get_site_pos(dset))\n\n return dset", "def getVerticePosition(self):\n #def getvoxelpos(model,scale,dims,translate,i,j,k): #centroid!\n return(self.X,self.Y,self.Z)", "def get_position():\n function = LegacyFunctionSpecification()\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.IN)\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.OUT, \n description = \"The current position vector of the particle\")\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n function.must_handle_array = True\n return function", "def get_position():\n\n return character['Position']", "def get_position(self, unit='degree'):\n print(\"GET POSITION\")\n res = self._send_command(\n \"RP;\",\n fb_required=True,\n res_pattern=\"POS:\"\n )\n # The received answer is expected to be something like\n # POS:<float:tilt>,<float:rot>\n tilt = float(res.split(':')[1].split(',')[0])\n rot = float(res.split(':')[1].split(',')[1])\n return (tilt, rot)", "def get_slot_position(self, slot: DeckSlotName) -> Point:\n slot_def = self.get_slot_definition(slot)\n position = slot_def[\"position\"]\n\n return Point(x=position[0], y=position[1], z=position[2])", "def getPoint(self):\n return Point(*self.position)", "def _position_cylindrical2spherical(pos):\n\n rho=pos[:,0]\n theta_cylindrical=pos[:,1]\n z=pos[:,2]\n\n r=np.sqrt(rho**2+z**2)\n theta_spherical=np.arctan2(rho,z)\n phi=theta_cylindrical\n\n return np.dstack((r,theta_spherical,phi))[0]", "def PosPlanet (self, deltaT):\n\n for planet in self.planets:\n position = planet.position + (planet.velocity * deltaT)\n planet.position = position #Each body's resulting position is updated to the body's information defined in the Particle class.", "def at(self, pos):\n return self.world.at(pos)", "def getpos(self):\n return self.pos.cartesianas()", "def position(self):\n return pm.datatypes.Point(self.transform.getTranslation(ws=True))", "def find_player_position(labyrinth: Labyrinth) -> Tuple[int, int]:\n for row in range(0, len(labyrinth)):\n for col in range(0, len(labyrinth[0])):\n if labyrinth[row][col] == Labyrinth.START:\n return row, col\n\n # todo: handle exception, if there is no field holding 'S' then something is wrong\n return -1, -1", "def random_position():\n path = (\n os.path.dirname(__file__)\n + os.sep\n + \"templates\"\n + os.sep\n + \"data\"\n + os.sep\n + \"taxi_stations.json\"\n )\n with open(path) as f:\n stations = json.load(f)[\"features\"]\n pos = random.choice(stations)\n coords = [pos[\"geometry\"][\"coordinates\"][1], pos[\"geometry\"][\"coordinates\"][0]]\n lat = float(\"{0:.6f}\".format(coords[0]))\n lng = float(\"{0:.6f}\".format(coords[1]))\n return [lat, lng]", "def position(t, x, y):\n return x * exp(-t * y) * sin(2 * pi * t)", "def get_pos(self):\n current_angles = self.chain.get_current_values()\n current_xyz, current_rpy = self._solver.forward_solve(current_angles)\n return current_xyz, current_rpy", "def get_position(self):\n return self.position", "def get_position(self):\n return self.position", "def my_location(state):\n return state['gladiators'][state['current_player']]['pos']", "def calcul_point_plan_projection(cls,cx,cy,cz,spx,spy,axe_x,axe_y):\n projX=gs.Vector3(spx*axe_x.x,spx*axe_x.y,spx*axe_x.z)\n projY=gs.Vector3(spy*axe_y.x,spy*axe_y.y,spy*axe_y.z)\n point=gs.Vector3(projX+projY)+gs.Vector3(cx,cy,cz)\n return point", "def get_spawn_xyz(self):\n return self.X, self.Y, self.Z", "def get_position(self):\n response = self.__send_and_receive(protocol.GET_COOR)\n value = self.__gen_response_value(response)\n if value:\n parse_cmd = self.__parse_cmd(response, [\"x\", \"y\", \"z\"])\n coordinate = [parse_cmd[\"x\"], parse_cmd[\"y\"], parse_cmd[\"z\"]]\n return coordinate\n else:\n return False", "def parsenwspt(text):\n lat = int(text[0:4]) / 100\n lon = int(text[4:])\n if lon < 1000:\n lon += 10000\n return (lon / -100, lat)", "def get_orbit_coords(orbit, geo=False, Npts=250, units_rm=True, sim_mars_r=3396.0,\n adjust_spherical=True, return_time=False):\n Nskip = 2 #10000/Npts\n data = pd.read_csv(orbit_dir+'orbit_{0:04d}.csv'.format(orbit))[::Nskip]\n pos = np.array([data['x'], data['y'], data['z']])\n time = data['time'].values\n time_adj = (time-time[0])/(time[-1]-time[0])\n alt = data['altitude']\n \n if adjust_spherical:\n pos = adjust_spherical_positions(pos, alt, sim_mars_r)\n\n if units_rm:\n pos = pos/sim_mars_r\n\n if return_time: return (pos,time, time_adj)\n else: return pos", "def get_pos(self) -> WAVector:\n pass", "def get_position(self):\n raise NotImplementedError()", "def spherical(self, x, y):\n\t\twhile x >= self.planet.width or x < 0 or y >= self.planet.height or y < 0:\n\t\t\t#change x if x is out of boundary\n\t\t\tif x >= self.planet.width:\n\t\t\t\tx -= (self.planet.width)\n\t\t\telif x < 0:\n\t\t\t\tx += (self.planet.width)\n\t\t\t#change y if y is out of boundary\n\t\t\tif y >= self.planet.height:\n\t\t\t\ty -= (self.planet.height)\n\t\t\telif y < 0:\n\t\t\t\ty += (self.planet.height)\n\t\treturn x, y", "def position(self, t):\n return vector_add(self.origin, self.direction.scale(t))", "def get_coord(tic):\n try:\n catalog_data = Catalogs.query_object(objectname=\"TIC\"+tic, catalog=\"TIC\")\n ra = catalog_data[0][\"ra\"]\n dec = catalog_data[0][\"dec\"]\n # print(catalog_data.keys())\n # print(catalog_data[0][\"GAIA\"])\n return ra, dec\n except:\n \tprint(\"ERROR: TIC not found in Simbad\")", "def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]", "def get_pos(x, y):\r\n return normalize(x) // 2, normalize(y) // 4", "def get_position_on_game(self):\n return (self.peg, self.position_on_peg)", "def determine_animal_pos(self, plot, latitude, longitude):\r\n x = convert_fraction_lat(\r\n\r\n str(return_values(plot, latitude)\r\n )\r\n )[0] * self.space.x_max\r\n\r\n y = convert_fraction_long(\r\n str(return_values(plot, longitude)\r\n )\r\n )[0] * self.space.y_max\r\n pos = (x, y)\r\n return pos", "def get_coord(self):\n return self.coord", "def getAtlasPos(self):\n return self.atlasPos", "def get_position(game_object: GameObject) -> Union[CommonVector3, None]:\n if game_object is None:\n return None\n # noinspection PyBroadException\n try:\n return CommonVector3.from_vector3(game_object.position)\n except:\n return None", "def cart2spher(x: np.ndarray, y: np.ndarray,\n z: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n indexes = np.where((x == 0) & (y == 0))[0]\n if indexes.size:\n x[indexes] = np.nan\n y[indexes] = np.nan\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n if indexes.size:\n lon[indexes] = 0\n lat[indexes] = np.pi * 0.5 * np.sign(z[indexes])\n return np.degrees(lon), np.degrees(lat)", "def position(t):\n return c + tangent_vec * 7 * t ** 2", "def _get_random_pos_on_a_side(self):\n pass", "def __LocationOf(self, vial):\n location = vial.getLabel()\n \n if location in Instrument.__reagentPositionMap.keys():\n # Override label\n location = Instrument.__reagentPositionMap[location]\n\n return (vial.getSector(), location)", "def _world_point(self, point_3d):\n return self.obj.matrix_world @ point_3d", "def getPosition(self,space=SPACE_JOINT):\n\n return self.robot.position(space)", "def position(square):\n first = square[0]\n second = square[1]\n col = parseCol(first)\n row = parseRow(second)\n return (row, col)", "def get_position(self):\n ret = _pal.Vec3()\n _pal.lib.geometry_get_position(self._geometry, ret)\n return [x for x in ret]", "def xyz(self, i):\n return self.xp[i], self.yp[i], self.zp[i]", "def _get_pos(ind_id, sim_id):\n\n # get current position of 'head'\n x, y = p.getBasePositionAndOrientation(ind_id, physicsClientId=sim_id)[0][0:2]\n return x, y", "def getPosition(self, time: float, view: Optional[Str] = ...) -> CVec3:\n ...", "def get_params_proj(ima, xform = \"xform.projection\"):\n\tt = ima.get_attr(xform)\n\td = t.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],-d[\"tx\"],-d[\"ty\"]", "def cart_to_loc(cart):\n return (cart[2] * slice_size) + (cart[1] * grid_size) + cart[0]", "def particle_pos(particle, time):\n return particle.pos + particle.dir * particle.speed * (time - particle.time)", "def get_position_for(self, ticker):\n return position.Position.from_account_id_and_ticker(self.id, ticker)", "def pos(self):\n return Point(self.currentItem.mapFromScene(self._scenePos))", "def pos(self):\n return Point(self.currentItem.mapFromScene(self._scenePos))", "def pos(self):\n return Point(self.currentItem.mapFromScene(self._scenePos))", "def get_position(self):\n return self.__position", "def make_planet(npix0,osf,xc,yc,rplanet,map0):\n npix=int(np.floor(npix0*osf))\n make_planet_c(npix,npix,xc*osf,yc*osf,rplanet*osf,map0)\n planet=map0.copy().reshape((npix0,osf,npix0,osf))\n planet=planet.mean(axis=3).mean(axis=1)\n return planet", "def define_spot(self,mpos):\n mpos_coord = ((mpos[0] - 199)/87, (mpos[1] - 116)/87)\n if mpos_coord == (1,2):\n spot = \"1\"\n return spot\n if mpos_coord == (2,2):\n spot = \"2\" \n return spot\n if mpos_coord == (4,0):\n spot = \"3\"\n return spot\n if mpos_coord == (4,1):\n spot = \"4\" \n return spot\n else:\n return False", "def _pose_from_odom(self, odom): \n pose = odom.pose.pose.position\n return [pose.x, pose.y, pose.z]", "def get_pos(self) -> tuple:\n return self.rect.center", "def make_position(data) -> Position:\n return (data[\"x\"], data[\"y\"])", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def _getCoords(self):\n\n if self._coords is not None:\n return self._coords[self._acsi]", "def m_location_get(self) -> Point:\n pass", "def getCurrentStagePosition(self):\n \n # send query\n self.sendCMDstring(\"/cli:python /app:matrix /sys:1 /cmd:getinfo /dev:stage\")\n # wait for and parse response\n resp=self.readandparseCAM()[0]\n if resp['dev']=='stage':\n if nolayoutmodule:\n sp = (float(resp['xpos']),float(resp['ypos']),float(resp['zpos']))\n else:\n sp = layout.StagePosition()\n sp[:] = (float(resp['xpos']),float(resp['ypos']),float(resp['zpos']))\n return sp\n else:\n return None", "def position2(t):\n return 98.0 * exp(-t * 2.0 / 13.0) * sin(2 * pi * t)", "def get_pos(self) -> tuple:\n return self.pos", "def get_origin(self):\n return self.coord_cls(x=0, y=0, z=0, system=self)", "def get_pos(self):\r\n return self.pos", "def local_coordinates(self, position: np.ndarray) -> Tuple[float, float]:\n raise NotImplementedError()", "def get_center(self,lonlat=False):\n lon, lat = np.asarray(self.rotator.rots[0][0:2])*180/pi\n if lonlat: return lon,lat\n else: return pi/2.-lat*dtor, lon*dtor", "def get_pos(self):\n return (self.x, self.y)", "def random_spherepos(n):\n signs = np.sign(rand.uniform(-1,1,size=n))\n thetas = Angle(np.arccos(rand.uniform(size=n)*signs),unit=u.rad) #random b/w 0 and 180\n phis = Angle(rand.uniform(0,2*np.pi,size=n),unit=u.rad)\n c = SkyCoord(phis,thetas,1,representation='physicsspherical')\n return c", "def get_position(self):\n return self._position", "def getTilePos(self, pos = None):\n\n if not pos:\n pos = self.actor.getPos()\n \n for i in range(len(pos)):\n pos[i] = int(math.floor( (pos[i] + self.dimensions[i]) / 2.0))\n #pos[i] = int(math.floor( pos[i] / 2.0))\n\n return pos", "def _get_random_position(self):\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))", "def _position_spherical2cylindrical(pos):\n \n\n r=pos[:,0]\n theta_spherical=pos[:,1]\n phi_spherical=pos[:,2]\n\n if any(theta_spherical>np.pi) or any(theta_spherical<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n rho=r*np.sin(theta_spherical)\n theta_cylindrical=phi_spherical\n z=r*np.cos(theta_spherical)\n\n return np.dstack((rho,theta_cylindrical,z))[0]", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def getSpawnLocations(team):\n if team == 2:\n tSpawnEntities = es.createentitylist(\"info_player_terrorist\")\n locations = []\n for key in tSpawnEntities:\n xyz = tuple(tSpawnEntities[key][\"CBaseEntity.m_vecOrigin\"].split(\",\"))\n locations.append(xyz)\n return tuple(locations)\n elif team == 3:\n ctSpawnEntities = es.createentitylist(\"info_player_counterterrorist\")\n locations = []\n for key in ctSpawnEntities:\n xyz = tuple(ctSpawnEntities[key][\"CBaseEntity.m_vecOrigin\"].split(\",\"))\n locations.append(xyz)\n return tuple(locations)\n else:\n return False" ]
[ "0.6685541", "0.63394314", "0.6268292", "0.6207987", "0.6067741", "0.60477245", "0.60084856", "0.59766847", "0.59688926", "0.5960511", "0.5945551", "0.5903077", "0.5866284", "0.5810986", "0.57815737", "0.577838", "0.5764995", "0.5735745", "0.5732618", "0.5730515", "0.57264274", "0.5712607", "0.5711692", "0.5711228", "0.5708364", "0.56993055", "0.569569", "0.56875163", "0.56858826", "0.56848276", "0.56847924", "0.56779116", "0.5660199", "0.56472415", "0.5635045", "0.56199974", "0.56182224", "0.5615896", "0.5615688", "0.5615688", "0.5606501", "0.5598538", "0.55914706", "0.5581623", "0.5552826", "0.5546287", "0.55391854", "0.5537225", "0.5532899", "0.54978955", "0.54961985", "0.5482859", "0.5470176", "0.54690504", "0.54674965", "0.5460283", "0.5456375", "0.5444388", "0.5437684", "0.5431098", "0.54265356", "0.5423008", "0.54218477", "0.5418755", "0.5417297", "0.54161274", "0.5412543", "0.53994787", "0.53925097", "0.5388053", "0.53838456", "0.53821796", "0.5374623", "0.53738", "0.53738", "0.53738", "0.5372912", "0.5369006", "0.53659683", "0.5364767", "0.536389", "0.5361594", "0.5361478", "0.53557026", "0.5352131", "0.5347428", "0.5344589", "0.53354347", "0.5334772", "0.53265506", "0.5325757", "0.53256017", "0.5324849", "0.532309", "0.53192353", "0.5318525", "0.53132546", "0.5310341", "0.53077745", "0.5306449" ]
0.61307
4
Backup the git refs.
def backup_ref(self): # Back ourselves up! backup_ref="refs/backups/{0}-{1}-{2}".format(self.ref_type, self.ref_name, int( time.time() )) command = ("git", "update-ref", backup_ref, self.old_sha1) process = subprocess.Popen(command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backup(self):\n\n\t\twith temp_dir(self.path):\n\t\t\t# only if changes made\n\t\t\tcheck = sp.check_output(['git', 'status', '--porcelain'])\n\t\t\t# check if untracked files\n\t\t\tuntracked = sp.check_output(['git', 'ls-files', '--others', '--exclude-standard'])\n\n\t\t\tif check:\n\t\t\t\tif untracked:\n\t\t\t\t\t# just add them all ... probably a better/safer/more direct way to do this\n\t\t\t\t\t_ = sp.check_output(['git', 'add', '.'])\n\t\t\t\t_ = sp.check_output([\n\t\t\t\t\t\t\"git\", \"commit\", \"-am\", f\"AUTO update on {dt.date.today().isoformat()}\"])\n\n\t\t\t# presumes that there is a remote!\n\t\t\toutput = sp.check_output([\n\t\t\t\t\t\"git\", \"push\"],\n\t\t\t\t\tstderr=sp.STDOUT\n\t\t\t\t\t)\n\n\t\t\treturn output.decode()\n\t\t\t# else:\n\t\t\t# \treturn 'No changes to commit'", "def _stash_and_checkout(repo, version):\n repo.git.stash()\n repo.git.checkout(version)\n repo.git.clean(\"-df\")", "def backup_database():\n db_path = os.path.join(config.cum_dir, 'cum.db')\n backup_path = os.path.join(config.cum_dir, 'cum.db.bak')\n copyfile(db_path, backup_path)", "def backup(self):\n self.logger.info(\"Backing up current version of model...\")\n self.save_checkpoint(filename='backup.pth.tar')", "def backup(self):\r\n print('Backing up old files...')\r\n\r\n # Connect with SSH-PubKey and execute backup script\r\n subprocess.run(\r\n ['ssh',\r\n '-i', self.ssh_key,\r\n '-o', 'StrictHostKeyChecking=no',\r\n 'robot@{}'.format(self.settings['ip']),\r\n 'robolab-backup'\r\n ])\r\n\r\n print('Done.')", "def backup():\n backup_shift(os, config.utils.tasks.backup_depth)\n if config.utils.tasks.secret_key is None:\n shutil.copyfile(config.core.database_name, config.core.database_name+'.1')\n else:\n data = get_encrypted_database()\n with open(config.core.database_name+'.1', 'wb') as f:\n f.write(data)", "def backup(self):\n import datetime\n suffix = datetime.datetime.now().strftime('%Y-%m-%d--%H-%M-%S')\n self.host.run(\"test -f '%s' && cp --archive '%s' '%s.%s'\" % (\n esc1(self.remote_path), esc1(self.remote_path), esc1(self.remote_path), esc1(suffix)), use_sudo=self.use_sudo)", "def revert(self, ref):\n self._git.head.commit = ref\n self._git.head.reset(index=True, working_tree=True)", "def _save_state(self):\n with open(os.path.join(self._workdir, '.git', 'drover'), 'wb') as f:\n cPickle.dump(self, f)", "def __makeBackup(self):\n pass #FIXME!!!", "def backup(self):\n self.rollback_steps.insert(0, self.mongos.start_balancer)\n self.run_step(self.mongos.stop_balancer, 2)\n\n self.run_step(self.wait_for_locks)\n\n self.rollback_steps.insert(0, self.finish_shards_maintenance)\n self.run_step(self.prepare_shards_maintenance)\n\n self.run_step(self.backup_dump)\n\n self.rollback_steps.remove(self.finish_shards_maintenance)\n self.run_step(self.finish_shards_maintenance, 2)\n\n self.rollback_steps.remove(self.mongos.start_balancer)\n self.run_step(self.mongos.start_balancer, 4) # it usually starts on\n # the second try\n\n if self.backup_bucket is not None:\n run(\"rmdir %s\" % self.backup_path)\n\n logging.info(\"Finished successfully\")", "def __gitHouseKeeping(self):\n self.vcs.gitHouseKeeping(self.project.getProjectPath())", "def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)", "def backup(ctx, project, origin, force):\n\n if not check_main_conf(ctx):\n return\n\n if origin is not None and project is None:\n click.echo(\"--project option is required when --origin is set.\")\n return\n\n bkp = ctx.obj[\"bkp\"]\n\n if not os.path.exists(ctx.obj[\"PROJECTS_DIR\"]):\n click.echo(\"Projects directory doesn't exists at %s\" % ctx.obj[\"PROJECTS_DIR\"])\n return\n\n if project is not None:\n bkp.project_load(project_name=project)\n bkp.backup(origin=origin, force=force)\n else:\n for file in os.listdir(ctx.obj[\"PROJECTS_DIR\"]):\n if file.endswith(\".conf\"):\n project_name = file.replace(\".conf\", \"\")\n bkp.project_load(project_name=project_name)\n bkp.backup(origin=origin, force=force)", "def git_upgraded_pkgs(self):\n\n self.extract_from_cachedir()\n self.etc_commits.added.commit()\n\n cherry_pick_sha = None\n if self.etc_commits.cherry_pick.rpaths:\n self.etc_commits.cherry_pick.commit()\n cherry_pick_sha = self.repo.git_cmd('rev-list -1 HEAD --')\n\n # Clean the working area of the files that are not under version\n # control.\n self.repo.git_cmd('clean -d -x -f')\n\n # Update the master-tmp branch with new files.\n if self.master_commits.added.rpaths:\n self.repo.checkout('master-tmp')\n for rpath in self.master_commits.added.rpaths:\n repo_file = os.path.join(self.repodir, rpath)\n if os.path.lexists(repo_file):\n warn('adding %s to the master-tmp branch but this file'\n ' already exists' % rpath)\n copy_file(rpath, self.root_dir, self.repodir,\n repo_file=repo_file)\n self.master_commits.added.commit()\n\n return cherry_pick_sha", "def backup(self):\n\n for filename in self.filenames[:]:\n if not filename.endswith(\".\"+self.PYTHON_EXTENSION):\n continue\n origfilename = filename + \".\" + self.BACKUP_EXTENSION\n if origfilename not in self.filenames:\n shutil.copy(filename, origfilename)\n self.filenames.append(origfilename)", "def automatic_backup(self):\n\n if self.observationId:\n logging.info(\"automatic backup\")\n self.save_project_activated()", "def save_backup(\n self):\n self.backup = self.data", "def hard_reset_branches(args):\n checkout_branches(args)\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Hard resetting tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n repo.check_command([\"reset\", \"--hard\", project.remote_refspec])", "def flush_repo():\n server = get_server()\n run(\"rm -rf %(project_name)s\" % env)\n git.clone()\n server.setup()", "def dump_refs(args):\n man = load_manifest()\n first = True\n for (name, project) in man.projects.iteritems():\n if not first: print\n first = False\n print \"Project %s:\" % name\n\n repo = GitRepo(workdir_for_project(project))\n print \" HEAD: %s\" % repo.rev_parse(\"HEAD\")\n print \" Symbolic: %s\" % repo.current_branch()\n project_status(project, indent=2)\n\n repo = get_manifest_repo()\n if repo:\n print\n print \"Manifest repo:\"\n print \" HEAD: %s\" % repo.rev_parse(\"HEAD\")\n print \" Symbolic: %s\" % repo.current_branch()\n repo_status(repo,\n repo.current_branch(),\n \"origin/\" + repo.current_branch(),\n indent=2)\n check_dirty_repo(repo, indent=2)", "def svn_fs_hotcopy(*args):\r\n return _fs.svn_fs_hotcopy(*args)", "def __gitStashClear(self):\n self.vcs.gitStashClear(self.project.getProjectPath())", "def backup_database(self):\n backup_file = \"{}-{}.sql\".format(\n config.DATABASE_NAME, datetime.today().strftime(\"%Y-%m-%d--%H%M\")\n )\n backup_uri = \"{}/{}\".format(config.DATABASE_BACKUP_BUCKET, backup_file)\n step = \"Backing Up Database:\\nbackup={}\".format(backup_uri)\n try:\n self.slacker.send_thread_reply(step)\n backup_command = [\n \"gcloud\",\n \"sql\",\n \"export\",\n \"sql\",\n config.DATABASE_INSTANCE_NAME,\n backup_uri,\n \"--database={}\".format(config.DATABASE_NAME),\n \"--verbosity=debug\",\n ]\n subprocess.run(backup_command, check=True)\n except Exception as e:\n self.raise_step_error(step=step, error=e)", "def backup(self, backup):\n self._backup = backup", "def __restoreBackup(self):\n pass #FIXME!!!", "def __gitStashBranch(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashBranch(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Create Branch\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def __gitStashDrop(self):\n self.vcs.gitStashDrop(self.project.getProjectPath())", "def clean(self):\n self.run(['git', 'reset', '--hard', 'HEAD'])\n self.run(['git', 'clean', '-fdx'])\n self.run(['git', 'checkout', 'origin/master'])", "def backups(self, backups):\n\n self._backups = backups", "def git_sync(commit_ish, force, last_tag, reset, url, directory):\n git_sync_(url, directory, commit_ish, force=force, last_tag=last_tag, reset=reset)", "def backup(config_file, bakfile):\n return _backup_config(config_file, bakfile)", "def svn_fs_hotcopy_berkeley(*args):\r\n return _fs.svn_fs_hotcopy_berkeley(*args)", "def flush():\n\timport cPickle as pickle\n\timport shutil\n\tfrom yblib.primitives import Primitives, StoragePool\n\n\top = optparse.OptionParser(\"Usage: ybadmin flush [opts] <checkpoint file>\")\n\top.add_option('-p', dest='purge', action='store_true', default=False,\n\t\thelp=\"Purge spool. Deletes files after successful replication.\")\n\n\t(opts, args) = op.parse_args()\n\n\ttry:\n\t\t(chkfile,) = args\n\texcept ValueError:\n\t\terr(op.format_help(), os.EX_USAGE)\n\n\t(in_use, to_copy) = pickle.load(file(chkfile))\n\n\tto_copy.extendleft(list(in_use))\n\n\tyb_config = load_yb_config(CONF_FILE)\n\tPrimitives.configure()\n\n\tfor cfile in to_copy:\n\t\tcan_delete = opts.purge\n\t\tsrc = cfile.path\n\n\t\tfor pool in StoragePool:\n\t\t\tdst = pool.dup_file(cfile).path\n\n\t\t\ttry:\n\t\t\t\tshutil.copy2(src, dst)\n\t\t\t\tprint \"Copied %s to %s\" % (src, dst)\n\t\t\texcept (shutil.Error, IOError), exc:\n\t\t\t\tcan_delete = False\n\t\t\t\tprint \"FAILED COPY FROM %s to %s REASON: %s\" % (src, dst, exc)\n\n\t\tif can_delete:\n\t\t\tos.unlink(src)\n\t\t\tprint \"Deleted %s\" % src", "def _push(self, src: str, dst: str) -> None:\n force = False\n if src.startswith(\"+\"):\n src = src[1:]\n force = True\n present = [self._refs[name][1] for name in self._refs]\n present.extend(self._pushed.values())\n # before updating the ref, write all objects that are referenced\n objects = git.list_objects(src, present)\n try:\n # upload objects in parallel\n pool = multiprocessing.pool.ThreadPool(processes=self._processes)\n res = pool.imap_unordered(Binder(self, \"_put_object\"), objects)\n # show progress\n total = len(objects)\n self._trace(\"\", level=Level.INFO, exact=True)\n for done, _ in enumerate(res, 1):\n pct = int(float(done) / total * 100)\n message = \"\\rWriting objects: {:3.0f}% ({}/{})\".format(pct, done, total)\n if done == total:\n message = \"%s, done.\\n\" % message\n self._trace(message, level=Level.INFO, exact=True)\n except Exception:\n if self.verbosity >= Level.DEBUG:\n raise # re-raise exception so it prints out a stack trace\n else:\n self._fatal(\"exception while writing objects (run with -v for details)\\n\")\n sha = git.ref_value(src)\n error = self._write_ref(sha, dst, force)\n if error is None:\n _write(\"ok %s\" % dst)\n self._pushed[dst] = sha\n else:\n _write(\"error %s %s\" % (dst, error))", "def backup():\n # Backup the WordPress database.\n db('backup')\n\n # Copy teh wp-config.php file from the server.\n get(os.path.join(env.wordpress_path, 'wp-config.php'),\n './backups/wp-config.php')\n\n now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n theme_list = wp_cli('theme list --format=csv')\n plugin_list = wp_cli('plugin list --format=csv')\n\n # Backup the installed themes\n #with open('./backups/themes.csv', 'w') as f:\n # f.write(theme_list)\n\n # Backup the installed plugins\n #with open('./backups/plugins.csv', 'w') as f:\n # f.write(plugin_list)", "def __gitStashSave(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashSave(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Save Stash\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def analisa_backup(self):\n #obtem as pastas que precisam ser criadas na pasta de destino \n self.obter_pastas_inexistentes_no_destino()\n \n #enquanto existir subpasta na pasta atual, faça backup recursivo\n while(self.sub_pastas_nao_finalizadas):\n \n #pega ultima pasta da lista\n sub_pasta = self.sub_pastas_nao_finalizadas[-1]\n \n #cria um objeto backup, a partir da criação de um objeto pasta \n #(com base na subpasta de origem) e de uma sub_pasta de mesmo nome\n #na pasta de destino não é necessário que esta subpasta, na pasta \n #de destino exista. Após a criação do objeto Backup, chama-se o \n #método analisa_backup, que por meio de recursão varre todas as \n #subpastas verificando a necessidade de backup\n Backup(Pasta(sub_pasta),Pasta(os.path.join(self.pasta_destino.obter_caminho(),os.path.basename(sub_pasta)))).analisa_backup()\n \n #subpasta finalizada, retira a mesma da lista\n self.sub_pastas_nao_finalizadas.pop()\n\n #Pega somente o nome do arquivo a partir de uma lista com os caminhos \n #absolutos de cada arquivo\n arquivos_origem = [os.path.basename(arq) \\\n for arq in self.pasta_origem.obter_lista_arquivos()]\n \n arquivos_destino = [os.path.basename(arq) \\\n for arq in self.pasta_destino.obter_lista_arquivos()]\n\n for arquivo_origem in arquivos_origem:\n \n #Verifica se o arquivo de origem esta na pasta de destino, \n #caso não esteja, deve ser feito o backup\"\n if arquivo_origem not in arquivos_destino:\n \n #insere na lista operacao, um tupla (arquivo,pasta_origem,\n #pasta_destino) de modo a que no final o backup seja realizado\"\n self.__class__.operacoes_copiar.append(\\\n (arquivo_origem,\\\n self.pasta_origem.obter_caminho(),\\\n self.pasta_destino.obter_caminho())\\\n )", "def backup_database():\n logger.info(\"start database_backup\")\n management.call_command('dbbackup', compress=True)\n logger.info(\"end database_backup\")", "def dir_backup():\n return abspath('back')", "def autosave(path):\n cmd = git.cmd.Git(path)\n if cmd.status(porcelain=True):\n cmd.add(\".\")\n cmd.commit(message=f\"{datetime.now()} autosave\")\n cmd.push()\n print(f\"[ OK ] {datetime.now()} saved {path} \")", "def _backup_meta_data(meta_path: Path) -> None:\n meta_path = meta_path.resolve()\n backup_meta_path = meta_path.parent / (meta_path.name + \".bak\")\n i = 0\n while backup_meta_path.exists():\n backup_meta_path = backup_meta_path.with_suffix(\".bak{}\".format(i))\n i += 1\n shutil.copy(str(meta_path), str(backup_meta_path))", "def backup_all_db():\n filename = BACKUP_DIR + \"/\" + str(datetime.datetime.now().isoformat()) + \".yaml\"\n with open(filename, 'w+') as base_fp:\n for model in [Framework, Project, Document, Component, Arch, # Meta models\n WorkItem, AutoCase, Linkage, Bug, AutoCaseFailure]:\n base_fp.write(serializers.serialize('yaml', model.objects.all(), fields=model._min_dump))", "def back(self):\n\n self.root.bom_compare_old = self.old_entry.get()\n self.root.bom_compare_save = self.save_entry.get()\n self.root.bom_compare_new = self.new_entry.get()\n\n self.root.back(BomCompare)", "def undo(backup):\r\n backup.load_backup()\r\n backup.undo_moves()", "async def module_command_backup(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n file = parsed.args[\"name\"]\n file = file.with_suffix(f\"{file.suffix}.sqlite\")\n await self.database_create_backup(file)\n await ctx.core_command_backup(parsed, file)", "def backup_data():\n\ttry:\n\t\tos.chdir(backup_directory)\n\texcept:\n\t\tprint(\"Backup folder does not exist!\")\n\tfor directory in directories:\n\t\tshutil.rmtree('./'+directory)\n\tos.chdir('..')\n\tfor directory in directories:\n\t\tprint(\"Backing up data for label '{}'...\".format(directory))\n\t\tshutil.copytree('./'+directory, backup_directory+'/'+directory)\n\tprint(\"Backup complete!\")", "def restore_backup():\n\tfor directory in directories:\n\t\tshutil.rmtree('./'+directory)\n\tfor directory in directories:\n\t\tprint(\"Restoring data for label '{}'...\".format(directory))\n\t\tshutil.copytree(backup_directory+'/'+directory, './'+directory)\n\tprint(\"Data restoration complete!\")", "def web_backup():\n conf = config.utils\n if conf.tasks.secret_key is None:\n upload_path = config.core.database_name\n file = None\n else:\n file = tempfile.NamedTemporaryFile(delete=False)\n file.write(get_encrypted_database())\n file.close()\n upload_path = file.name\n\n factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP\n # noinspection PyDeprecation\n with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password,\n session_factory=factory, use_list_a_option=False) as host:\n backup_shift(host, conf.tasks.web_backup_depth)\n host.upload(upload_path, config.core.database_name+'.1')\n if file is not None:\n os.unlink(file.name)", "def createBackup(self, filename):\n if (not os.path.isfile(filename + '.bak')) and os.path.isfile(filename):\n with open(filename + '.bak', 'wb') as bakf:\n with open(filename, 'rb') as oldf:\n bakf.write(oldf.read())\n print(filename + \" backed up\")", "def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")", "def backup_changed(changed, name):\n with open(os.path.join(CHANGED_PICKLE_PATH, name), 'wb') as f:\n pickle.dump(changed, f, -1)", "def backup(ctx):\n config_path = ctx.obj['config_path']\n logger = ctx.obj['logger']\n\n config = Config(config_path)\n scheduler = BlockingScheduler(\n executors={'default': ThreadPoolExecutor(max_workers=1)}\n )\n\n for job in config.jobs.values():\n logger.info(f'filesystem={job.filesystem} '\n f'cron=\"{job.cron}\" '\n 'msg=\"Adding job.\"')\n scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)\n\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n pass", "def test_backup_merge(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=self.backupset.number_of_backups)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n backup_count = 0\n \"\"\" remove last 6 chars of offset time in backup name\"\"\"\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n backup_name = bk_info[\"backups\"][i][\"date\"]\n if self.debug_logs:\n print(\"backup name \", backup_name)\n print(\"backup set \", self.backups)\n if backup_name in self.backups:\n backup_count += 1\n self.log.info(\"{0} matched in info command output\".format(backup_name))\n self.assertEqual(backup_count, len(self.backups), \"Initial number of backups did not match\")\n self.log.info(\"Initial number of backups matched\")\n self.backupset.start = randrange(1, self.backupset.number_of_backups)\n self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)\n status, output, message = self.backup_merge(check_for_panic=True)\n if not status:\n self.fail(message)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n backup_count = 0\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n backup_name = bk_info[\"backups\"][i][\"date\"]\n if self.debug_logs:\n print(\"backup name \", backup_name)\n print(\"backup set \", self.backups)\n backup_count += 1\n if backup_name in self.backups:\n self.log.info(\"{0} matched in info command output\".format(backup_name))\n else:\n self.fail(\"Didn't expect backup date {0} from the info command output\" \\\n \" to be in self.backups (the list of exepected backup dates\" \\\n \" after a merge)\".format(backup_name))\n\n self.assertEqual(backup_count, len(self.backups), \"Merged number of backups did not match\")\n self.log.info(\"Merged number of backups matched\")", "def restore(self, clean=False):\n\n for origfilename in self.filenames[:]:\n if not origfilename.endswith(\".\"+self.BACKUP_EXTENSION):\n continue\n filename = origfilename.strip(\".\"+self.BACKUP_EXTENSION)\n shutil.copy(origfilename, filename)\n self.filenames.append(filename)\n if clean:\n os.remove(origfilename)", "def stash_changes(c): # , stash_name: str):\n c.run(f\"git stash push -q --keep-index\")", "def backup_project():\n _require_environment()\n\n # Unless explicitly provided, uses local Django settings to\n # extract username/password to access remote database\n database = env.project.get('database', None)\n if not database:\n django.settings_module(env.project['settings'])\n database = django_settings.DATABASES['default']\n\n # Remote side\n with prefix(_django_prefix()):\n with cd(_django_project_dir()):\n # Creates dir to store backup, avoiding existing similar names\n dirname = '../backup/%s_%s' % (datetime.date.today().strftime('%Y%m%d'), env.environment)\n path = dirname\n index = 0\n while files.exists(path) or files.exists('%s.tar.gz' % path):\n index += 1\n path = '%s.%s' % (dirname, index)\n run('mkdir -p %s' % path)\n\n # Backup MySQL\n run('mysqldump %s -u %s -p%s %s > %s/%s.sql' % (\n '-h %s' % database['HOST'] if database.get('HOST', None) else '',\n database['USER'],\n database['PASSWORD'],\n database['NAME'],\n path,\n env.project['project'],\n ))\n\n # Backup extra files\n extra_backup_files = env.project.get('extra_backup_files', [])\n for file in extra_backup_files:\n run('cp -R %s %s/' % (file, path))\n\n # Create .tar.gz and removes uncompressed files\n with hide('stdout'):\n run('tar -czvf %s.tar.gz %s/' % (path, path))\n run('rm -rf %s/' % path)\n\n # Download backup?\n if console.confirm('Download backup?'):\n return get('%s.tar.gz' % path, '../backup')", "def on_btnBackup_clicked(self, widget):\n try:\n variables.filechooserbackup.show()\n variables.neobackup = funcionesvar.backup()\n variables.neobackup = str(os.path.abspath(variables.neobackup))\n\n except:\n print('error abrir file choorse backup')", "def save(self):\n\n err = C.git_remote_save(self._remote)\n check_error(err)", "def backup(self):\n success = False\n try:\n with open(self.pickle_filename, 'wb') as brain_data:\n pickle.dump(self, brain_data)\n with open('{0}.bak'.format(self.pickle_filename), \n 'wb') as brain_data_bak:\n pickle.dump(self, brain_data_bak)\n except IOError as err:\n print('File error: {0} encountered while saving brain data'.\n format(err))\n except pickle.PickleError as perr: \n print('Pickling error: {0} encountered while saving brain data'.\n format(perr)) \n else:\n success = True\n return success", "def _backup(self, parsed_args):\n if self.backup:\n dep_sys = self.document['deploymentSystem']\n dep_path = self.document['deploymentPath']\n backup_dep_path = dep_path + '.' + str(seconds())\n\n print_stderr('Backing up agave://{}/{}'.format(dep_sys, dep_path))\n start_time = milliseconds()\n self.messages.append(\n ('backup', 'src: agave://{}/{}'.format(dep_sys, dep_path)))\n self.messages.append(\n ('backup', 'dst: agave://{}/{}'.format(dep_sys,\n backup_dep_path)))\n\n try:\n # TODO - only do this if dep_path exists, otherwise an Exception will be raised\n manage.move(dep_path,\n system_id=dep_sys,\n destination=backup_dep_path,\n agave=self.tapis_client)\n print_stderr('Finished ({} msec)'.format(milliseconds() -\n start_time))\n return True\n except Exception as exc:\n if self.ignore_errors:\n self.messages.append(('backup', str(exc)))\n print_stderr('Failed ({} msec)'.format(milliseconds() -\n start_time))\n return False\n else:\n raise\n\n return True", "def backup(self, outdir=None):\n import os\n if outdir is None:\n import time\n outdir = os.path.join('backup',time.strftime('%Y%m%d-%H%M'))\n cmd = 'time mongodump -c \"%s\" -h %s:%s -d mfdb -o \"%s\"'%(\n self.collection.name, self.db.host, self.db.port, outdir)\n print cmd\n os.system(cmd)", "def backup_files(self):\n backup_path = os.path.join(self.backupdir, self.get_timestamp().replace(':', '-'))\n try:\n if not os.path.exists(backup_path):\n self.make_path(backup_path)\n if not os.path.exists(backup_path):\n raise IOError('Path was not made correctly')\n else:\n self.print_to_log('Backup path: %s' % backup_path)\n for item in self.file_list:\n try:\n self.print_to_log('Backing up file: %s' % item)\n shutil.copy(item, backup_path)\n except IOError, why:\n self.error = 2\n self.print_to_log(str(why))\n self.print_to_log('Unable to archive file: %s continuing' % item)\n except IOError, why:\n self.print_to_log(str(why))\n self.print_to_log('Quiting with out archiving')\n self.error = 1", "def git():\n pass", "def __gitStashApply(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashApply(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Restore Stash\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def step_back(\n self):\n if self.backup != None:\n self.data = self.backup", "def fullBackup(backupName, verify, doTheBackup = True):\n backup(backupName, full = True, verify = verify, verifyIncrementally = False, doTheBackup = doTheBackup)", "def backupRM(purge=False):\n print(\"Backing up your remarkable files\")\n if purge:\n shutil.rmtree(\"/Users/lisa/Documents/remarkableBackup\" + remContent)\n print(\"deleted old files\")\n backupCommand = \"\".join([\"scp -r \", remarkableUsername, \"@\", remarkableIP,\n \":\", remarkableDirectory, \" \",\n remarkableBackupDirectory])\n #print(backupCommand)\n os.system(backupCommand)", "def backup():\n\n\t# Sound backup alarm.\n\t#Sound.tone([(1000, 500, 500)] * 3)\n\n\t# Turn backup lights on:\n\tLeds.set_color(Leds.RIGHT, Leds.RED)\n\tLeds.set_color(Leds.LEFT, Leds.RED)\n\n\t# Stop both motors and reverse for 1.5 seconds.\n\t# `run-timed` command will return immediately, so we will have to wait\n\t# until both motors are stopped before continuing.\n\tstop()\n\trightMotor.run_timed(duty_cycle_sp=-75, time_sp=750)\n\tleftMotor.run_timed(duty_cycle_sp=-75, time_sp=750)\n\n\t# When motor is stopped, its `state` attribute returns empty list.\n\t# Wait until both motors are stopped:\n\twhile any(m.state for m in (leftMotor, rightMotor)):\n\t\tsleep(0.1)\n\n\t# Turn backup lights off:\n\tLeds.set_color(Leds.RIGHT, Leds.GREEN)\n\tLeds.set_color(Leds.LEFT, Leds.GREEN)", "def checkout_latest():\n with cd(env.repo_path):\n run('git checkout %(branch)s;' % env)\n run('git pull origin %(branch)s' % env)", "def backup_config(self):\n prev_config = set()\n for src in self.config:\n dst = '%s.prev' % src\n LOGGER.debug('Backing up %s to %s', src, dst)\n\n try:\n shutil.copy(src, dst)\n\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n\n # If the config file is missing, we can skip backing it up.\n LOGGER.warning('File %s missing, skipping backup', src)\n\n else:\n prev_config.add(dst)\n return prev_config", "def checkout_branches(args):\n\n ensure_tracking_branches([])\n if check_dirty([]) and '-f' not in args:\n raise Exception(\"Cannot checkout new branches with dirty projects.\")\n \n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Checking out tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n # Check that sucker out\n repo.check_command([\"checkout\", project.tracking_branch])", "def sync_from_upstream(self):\n if not self.missing_branches:\n self.log(f\"All branches are synced, nothing to do here.\")\n return\n\n with tempfile.TemporaryDirectory() as tmpdir:\n src_path = Path(tmpdir) / self.deb_model.src\n self.deb_model.base.clone(cwd=tmpdir)\n for branch in self.missing_branches:\n self.log(f\"Processing branch {branch}\")\n self.deb_model.base.checkout(branch, new_branch=True, cwd=str(src_path))\n\n changelog_fn = src_path / \"debian/changelog\"\n changelog_fn_tpl = src_path / \"debian/changelog.in\"\n\n k8s_major_minor = semver.VersionInfo.parse(branch.lstrip(\"v\"))\n\n changelog_context = {\n \"deb_version\": f\"{str(k8s_major_minor)}-0\",\n }\n\n self.log(f\"Writing template vars {changelog_context}\")\n changelog_out = changelog_fn_tpl.read_text()\n changelog_out = self.render(changelog_fn_tpl, changelog_context)\n changelog_fn.write_text(changelog_out)\n\n self.log(f\"Committing {branch}\")\n self.deb_model.base.add([str(changelog_fn)], cwd=str(src_path))\n self.deb_model.base.commit(\n f\"Creating branch {branch}\", cwd=str(src_path)\n )\n self.deb_model.base.push(ref=branch, cwd=str(src_path))", "def copy_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")", "def __gitStashPop(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitStashPop(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Restore Stash\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def backup(backupName, full, verify, verifyIncrementally = False, doTheBackup = True):\n testRestoreDir = localenv.backups.testRestoreDir\n backupDetails = localenv.backups.backups[backupName]\n backupMap = getBackupMap(backupName)\n BackupOperations.doBackup (backupDetails.source, backupMap, testRestoreDir, full = full, \n verify = verify, verifyIncrementally = verifyIncrementally, \n doTheBackup = doTheBackup, \n recordTrigger = localenv.backups.recordTrigger)", "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def reset_backup_folder(self):\n pass", "def _save(self):\n if not os.path.exists(gitrepo.DEFAULT_REPOSITORY_PATH):\n # there is no data yet --> nothing to save\n return\n\n self.stack.serialize(DEFAULT_STACK)\n self.backlog.serialize(DEFAULT_QUEUE)\n # self.blocked.serialize(DEFAULT_LIMBO)\n self.sleeping.serialize(DEFAULT_DORM)", "def _pushbookmark(pushop):\n if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:\n return\n pushop.stepsdone.add(b'bookmarks')\n ui = pushop.ui\n remote = pushop.remote\n\n for b, old, new in pushop.outbookmarks:\n action = b'update'\n if not old:\n action = b'export'\n elif not new:\n action = b'delete'\n\n with remote.commandexecutor() as e:\n r = e.callcommand(\n b'pushkey',\n {\n b'namespace': b'bookmarks',\n b'key': b,\n b'old': hex(old),\n b'new': hex(new),\n },\n ).result()\n\n if r:\n ui.status(bookmsgmap[action][0] % b)\n else:\n ui.warn(bookmsgmap[action][1] % b)\n # discovery can have set the value form invalid entry\n if pushop.bkresult is not None:\n pushop.bkresult = 1", "def ensure_sync_master_branch(self):\n # TODO(robertocn): Investigate what causes the states mentioned in the\n # docstring in the first place.\n self.api.m.git('update-ref', 'refs/heads/master',\n 'refs/remotes/origin/master')\n self.api.m.git('checkout', 'master', cwd=self.api.m.path['checkout'])", "def branched_repo(tmp_path_factory):\n tmpdir = tmp_path_factory.mktemp(\"branched_repo\")\n git_repo = GitRepoFixture.create_repository(tmpdir)\n git_repo.add(\n {\n \"del_master.py\": \"original\",\n \"del_branch.py\": \"original\",\n \"del_index.py\": \"original\",\n \"del_worktree.py\": \"original\",\n \"mod_master.py\": \"original\",\n \"mod_branch.py\": \"original\",\n \"mod_both.py\": \"original\",\n \"mod_same.py\": \"original\",\n \"keep.py\": \"original\",\n },\n commit=\"Initial commit\",\n )\n branch_point = git_repo.get_hash()\n git_repo.add(\n {\n \"del_master.py\": None,\n \"add_master.py\": \"master\",\n \"mod_master.py\": \"master\",\n \"mod_both.py\": \"master\",\n \"mod_same.py\": \"same\",\n },\n commit=\"master\",\n )\n git_repo.create_branch(\"branch\", branch_point)\n git_repo.add(\n {\n \"del_branch.py\": None,\n \"mod_branch.py\": \"branch\",\n \"mod_both.py\": \"branch\",\n \"mod_same.py\": \"same\",\n },\n commit=\"branch\",\n )\n git_repo.add(\n {\"del_index.py\": None, \"add_index.py\": \"index\", \"mod_index.py\": \"index\"}\n )\n (git_repo.root / \"del_worktree.py\").unlink()\n (git_repo.root / \"add_worktree.py\").write_bytes(b\"worktree\")\n (git_repo.root / \"mod_worktree.py\").write_bytes(b\"worktree\")\n return git_repo", "def _pushb2checkbookmarks(pushop, bundler):\n if not _pushing(pushop) or pushop.force:\n return\n b2caps = bundle2.bundle2caps(pushop.remote)\n hasbookmarkcheck = b'bookmarks' in b2caps\n if not (pushop.outbookmarks and hasbookmarkcheck):\n return\n data = []\n for book, old, new in pushop.outbookmarks:\n data.append((book, old))\n checkdata = bookmod.binaryencode(pushop.repo, data)\n bundler.newpart(b'check:bookmarks', data=checkdata)", "def dump(self, check_hash=False):\n if is_fresh(self.name, self._content_hash):\n return\n write_file(self.name, self.content, overwrite=True)\n if check_hash:\n self._content_hash = None\n self.save()", "def backup(self):\n logging.info('Executing NCBI Blast backup')\n backup_folder = self.create_backup_dir()\n if not backup_folder:\n logging.error('Failed to create backup folder.')\n return False\n # Copy only README files for future reference\n app_readme_file = self.config['readme_file']\n ncbi_readme_file = self.info_file_name\n try:\n shutil.copy2(app_readme_file, backup_folder)\n shutil.copy2(ncbi_readme_file, backup_folder)\n except Exception as e:\n logging.exception('NCBI Blast Backup did not succeed. Error: {}'\n .format(e))\n return False\n return True", "def backup():\n\n # Sound backup alarm.\n spkr = Sound()\n spkr.tone([(1000, 500, 500)] * 3)\n\n # Turn backup lights on:\n leds = Leds()\n\n for light in ('LEFT', 'RIGHT'):\n leds.set_color(light, 'RED')\n\n # Stop both motors and reverse for 1.5 seconds.\n # `run-timed` command will return immediately, so we will have to wait\n # until both motors are stopped before continuing.\n for m in motors:\n m.stop(stop_action='brake')\n m.run_timed(speed_sp=-500, time_sp=1500)\n\n # When motor is stopped, its `state` attribute returns empty list.\n # Wait until both motors are stopped:\n while any(m.state for m in motors):\n sleep(0.1)\n\n # Turn backup lights off:\n for light in ('LEFT', 'RIGHT'):\n leds.set_color(light, 'GREEN')", "def post_backup(self, backup, manifest_file):\n pass", "def test_multiple_branches(self, tmpgitdir):\n with tmpgitdir.join('file_a.txt').open('w') as handle:\n handle.write('first file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'first'])\n\n subprocess.check_call(['git', 'checkout', '-b', 'testbranch'])\n\n with tmpgitdir.join('file_b.txt').open('w') as handle:\n handle.write('second file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'second'])\n\n assert git_head_ref_name(tmpgitdir) == 'testbranch'", "def rollback(folder_name, with_subfolders):\n process_backups(folder_name, with_subfolders, lambda x: copy2(x, x[:-4]))", "def do_after_dump(self, dump_items):\n # note that it's possible for links in \"latest\" to point to\n # files from different runs, in which case the checksum files\n # will have accurate checksums for the run for which it was\n # produced, but not the other files. FIXME\n for htype in Checksummer.HASHTYPES:\n dfname = DumpFilename(\n self.wiki, None, self.checksummer.get_checksum_filename_basename(htype))\n self.symlinks.save_symlink(dfname)\n self.symlinks.cleanup_symlinks()\n for item in dump_items:\n self.runinfo.save_dump_runinfo(RunInfo.report_dump_runinfo(dump_items))\n if item.to_run():\n dump_names = item.list_dumpnames()\n if type(dump_names).__name__ != 'list':\n dump_names = [dump_names]\n if item._parts_enabled:\n # if there is a specific part we are doing, we want to only clear out\n # old files for that part, because new files for the other\n # parts may not have been generated yet.\n partnum = item._partnum_todo\n else:\n partnum = None\n\n checkpoint = None\n if item._checkpoints_enabled:\n if item.checkpoint_file is not None:\n # if there's a specific checkpoint file we are\n # rerunning, we would only clear out old copies\n # of that very file. meh. how likely is it that we\n # have one? these files are time based and the start/end pageids\n # are going to fluctuate. whatever\n checkpoint = item.checkpoint_file.checkpoint\n\n for dump in dump_names:\n self.symlinks.remove_symlinks_from_old_runs(\n self.wiki.date, dump, partnum, checkpoint, onlyparts=item.onlyparts)\n\n self.feeds.cleanup_feeds()", "def rsync_reference_files(self):\n self.cmd(\"{rsync_cmd} {remote} {local} && gunzip {local}/*.gz\"\n .format(\n rsync_cmd=self.cmds[\"rsync\"],\n remote=self.files[\"remote_reference_dir\"],\n local=self.local_reference_dir,\n ),\n shell=True)", "def refresh():\n git.fetch()\n output = str(git.merge('--ff-only')).strip()\n if output != 'Already up to date.':\n print(output)\n git.fetch('--tags')", "def perform_backup(repo, archive_name, config, logger):\n repo.backup(archive_name, config['backup_source_paths'])\n\n integrity_failure = False\n for check in config.get('check_files', []):\n check_command = [os.path.join('check_commands', check['command'])]\n path = os.path.join(\n config['working_directory'],\n check['path'].lstrip('/')\n )\n\n check_command.extend(check['arguments'])\n check_command.append(path)\n\n repo.restore_file_from_archive(archive_name, check['path'])\n\n proc = subprocess.Popen(\n check_command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n stdout, stderr = proc.communicate()\n for line in stdout.splitlines():\n logger.info(line)\n if proc.returncode != 0:\n logger.error('Backup integrity check failed!')\n output = logger.error\n integrity_failure = True\n else:\n output = logger.warning\n for line in stderr.splitlines():\n output(line)\n\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.unlink(path)\n\n # Make sure we fail noisily if for whatever reason the archive has become\n # corrupted.\n repo.check()\n repo.check_archive(archive_name)\n\n if integrity_failure:\n raise CheckFailure('Backup file checks failed.')", "def backup_tables(tables, backup_filename):\n tables_switches = \" \".join(f\"-t {table}\" for table in tables)\n jobs = cpu_count()\n cmd = f\"pg_dump {tables_switches} -j {jobs} -Fc > {backup_filename}\"\n pg_dump = run(cmd, shell=True, capture_output=True)\n if pg_dump.returncode != 0:\n webhook_url = environ.get(\"SLACK_WEBHOOK_URL\")\n if webhook_url:\n msg = \"Failed to {cmd}:\\n{pg_dump.stderr.decode()}\"\n notify_via_slack(webhook_url, msg)\n exit(pg_dump.returncode)", "def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)", "def back(self):\n self.book.back()\n self.book.save()\n self.save()", "def test_backup_restore_with_ops(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n initial_gen = copy.deepcopy(gen)\n initial_keys = []\n for x in initial_gen:\n initial_keys.append(x[0])\n self.log.info(\"Start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.log.info(\"Create backup repo \")\n self.backup_create()\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n\n if self.compact_backup and self.ops_type == \"delete\":\n self.log.info(\"Start to compact backup \")\n self.backup_compact_validate()\n self.log.info(\"Validate deleted keys\")\n self.backup_compact_deleted_keys_validation(initial_keys)\n\n self.log.info(\"start restore cluster \")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n self.backupset.start = start\n self.backupset.end = end\n self._backup_restore_with_ops(backup=False, compare_function=\">=\")\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"", "def __removeBackup(self):\n pass #FIXME!!", "def reset(self, depth=0):\n\n if self.ref_type(self.default_ref) == 'branch':\n branch = self.truncate_ref(self.default_ref)\n branch_output = fmt.ref_string(branch)\n if not self.existing_local_branch(branch):\n return_code = self._create_branch_local_tracking(branch, self.remote, depth=depth, fetch=True)\n if return_code != 0:\n message = colored(' - Failed to create tracking branch ', 'red') + branch_output\n self._print(message)\n self._exit(message)\n return\n elif self._is_branch_checked_out(branch):\n self._print(' - Branch ' + branch_output + ' already checked out')\n else:\n self._checkout_branch_local(branch)\n remote_output = fmt.remote_string(self.remote)\n if not self.existing_remote_branch(branch, self.remote):\n message = colored(' - No existing remote branch ', 'red') + remote_output + ' ' + branch_output\n self._print(message)\n self._exit(message)\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._print(' - Reset branch ' + branch_output + ' to ' + remote_output + ' ' + branch_output)\n remote_branch = self.remote + '/' + branch\n self._reset_head(branch=remote_branch)\n elif self.ref_type(self.default_ref) == 'tag':\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._checkout_tag(self.truncate_ref(self.default_ref))\n elif self.ref_type(self.default_ref) == 'sha':\n self.fetch(self.remote, ref=self.default_ref, depth=depth)\n self._checkout_sha(self.default_ref)", "def backup_remote():\n remote_filename = get_backup_filename(hostname=env.host_string)\n print(\"Remote filename: \" + remote_filename)\n\n with cd('bookmarker'):\n run('source env/bin/activate && ' + BACKUP_COMMAND + remote_filename)\n # scp the remote backup file to local.\n get(remote_filename, remote_filename)\n\n return remote_filename" ]
[ "0.6952309", "0.6250587", "0.6083144", "0.6023908", "0.6018048", "0.5927529", "0.5912871", "0.58550334", "0.5830668", "0.5823843", "0.5808333", "0.5635343", "0.5619511", "0.5558526", "0.5550184", "0.54987204", "0.54584205", "0.54354334", "0.54245806", "0.53884566", "0.5356423", "0.5344026", "0.5339792", "0.53226286", "0.53070134", "0.5303594", "0.53003895", "0.5298812", "0.5298441", "0.52726203", "0.5262915", "0.52319324", "0.5207645", "0.51841843", "0.51703054", "0.51578486", "0.51438683", "0.51366764", "0.51298493", "0.5115149", "0.51148844", "0.51091194", "0.5091682", "0.5084122", "0.50769424", "0.50752896", "0.5074597", "0.5061824", "0.50541574", "0.5046171", "0.5041557", "0.50408435", "0.50257695", "0.5013014", "0.4986673", "0.49841446", "0.49782503", "0.49603972", "0.49582323", "0.4952453", "0.4951818", "0.49461418", "0.49432623", "0.49337462", "0.49286854", "0.49233475", "0.49167418", "0.49110532", "0.49047148", "0.4899021", "0.48961985", "0.48941538", "0.48854783", "0.4881136", "0.4879211", "0.4862393", "0.48573726", "0.48538223", "0.48529744", "0.4851481", "0.48411158", "0.48401427", "0.4840098", "0.4836437", "0.48345974", "0.48242033", "0.4804516", "0.48038453", "0.48025435", "0.48022485", "0.47895417", "0.478342", "0.477935", "0.47792217", "0.47729427", "0.47725844", "0.47722355", "0.4771875", "0.47712436", "0.4770994" ]
0.7884595
0
Whether the audit failed (True) or passed (False).
def audit_failed(self): return self.__failed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasFailed(self):\n record = self.getRunRecord().getRecord(\"run\")\n return record.state is FAIL", "def is_failed(self):\n\n return self._state == \"FAILED\"", "def is_failed(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-failed').succeeded", "def passed(self):\n return not self.failed()", "def is_failing(self):\n return self.current_state == self.States.FAILED", "def didFail(self):\n return self.state in (\"cancelled\", \"failed\")", "def __bool__(self) -> bool:\n return self.failed", "def is_successful(self):\n for item in self.summary:\n if item['task_status'] is False:\n return testcase.TestCase.EX_TESTCASE_FAILED\n\n return super().is_successful()", "def isfailure(self):\n\n return self.proc.returncode != 0", "def didFail(self):\n return self._state in self._FailedStates", "def failed(self) -> bool:\n return not self.ok", "def was_successful(self):\n return self.data.exception_type is None or \\\n self.data.exception_type in TestOutcome.POSITIVE_RESULTS", "def is_successful(self):\n skips = self.details.get(\"skipped_number\", 0)\n if skips > 0 and self.deny_skipping:\n return testcase.TestCase.EX_TESTCASE_FAILED\n if self.tests_count and (\n self.details.get(\"tests_number\", 0) != self.tests_count):\n return testcase.TestCase.EX_TESTCASE_FAILED\n return super().is_successful()", "def failed(self):\n if len(self.progress) > 0:\n return self.progress[-1].status == TestStatus.canceled\n return False", "def has_failed(self):\n return self._error is not None", "def had_error(self):\n return self.data.exception_type == TestOutcome.ERROR", "def failed(self):\n return len(self.failed_outputs) > 0 or len(self.errors) > 0", "def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False", "def is_failing(self):\n if self.data.exception_type is None:\n return False\n\n if self.mode in (MODE_CRITICAL, MODE_FINALLY) and \\\n self.data.exception_type not in TestOutcome.POSITIVE_RESULTS:\n return True\n\n if self.mode in (MODE_OPTIONAL,) and \\\n self.data.exception_type not in TestOutcome.UNCRITICAL_RESULTS:\n return True\n\n return False", "def indicate_failure(self):\n pass", "def passed(self):\n if self.result == RESULT_PASS:\n return True\n\n return False", "def is_failed_user_data_retrieval(self):\n return self._tag == 'failed_user_data_retrieval'", "def failed(self):\n output = self.__call__()\n return output.failed", "def failed_roboscript(self) -> bool:\n return pulumi.get(self, \"failed_roboscript\")", "def zero_failures(self) -> bool:\n return abs(self.failurerate) < 1e-7", "def _job_was_successful(self, status):\n success = True\n\n # https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/Event\n for event in status[\"metadata\"][\"events\"]:\n\n logger.debug(event[\"description\"])\n\n # Does it always result in fail for other failure reasons?\n if \"failed\" in event:\n success = False\n action = event.get(\"failed\")\n logger.debug(\"{}: {}\".format(action[\"code\"], action[\"cause\"]))\n\n elif \"unexpectedExitStatus\" in event:\n action = event.get(\"unexpectedExitStatus\")\n\n if action[\"exitStatus\"] != 0:\n success = False\n\n # Provide reason for the failure (desc includes exit code)\n msg = \"%s\" % event[\"description\"]\n if \"stderr\" in action:\n msg += \": %s\" % action[\"stderr\"]\n logger.debug(msg)\n\n return success", "def failed(self):\n return self.joe.dead", "def has_failures_or_errors(self):\r\n return (self._num_failures() > 0) or (self._num_script_errors() > 0)", "def server_failure(self, resp):\n return resp[0] in FAILURE_CODES", "def result(self):\n result = True\n if self.state != \"error\":\n if self.tests_run < len(self.tests):\n result = False\n else:\n failed = [test for test in self.tests if test.test_result == False]\n if failed:\n result = False\n else:\n result = False\n\n return result", "def is_success(self):\n return self.status_code >= 200 and self.status_code < 300 and self.uuid", "def __bool__(self):\n return not self.err", "def __bool__(self):\n return self.is_successful", "def is_successful(self):\n return self._is_successful(self.denoising_label)", "def test_case_01(self):\n if True:\n self.fail()", "def test_invalid(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n with pytest.raises(AssertionError):\n key.audit(5, 1, 1, 1)", "def successful(self) -> bool:\n pass", "def audit_only(self) -> bool:\n result = True\n for effect in self.allowed_effects:\n if effect not in [\"disabled\", \"audit\", \"auditifnotexists\"]:\n result = False\n return result", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def is_success(self):\r\n if self.status_code < 400:\r\n return True\r\n return False", "def infrastructure_failure(self) -> bool:\n return pulumi.get(self, \"infrastructure_failure\")", "def is_error(self):\r\n if self.status not in [STATUS_CODES['200'], ]:\r\n return True\r\n else:\r\n return False", "def passed(self):\n return self.is_executed and self.is_executed_ok and self.is_equal_result", "def get_success_flag(self):\n return True", "def _is_fail(self):\n failed = False\n for obj in self.world_state.objects:\n failed = failed or obj.lost\n return failed", "def has_failures(self):\n for fail_cache in self._failures.values():\n if fail_cache:\n return True\n return False", "def _episode_success(self, observations):\n dist = self._env.get_metrics()[\"object_to_goal_distance\"]\n if (\n abs(dist) > self._success_distance\n or observations[\"gripped_object_id\"] != -1\n ):\n return False\n return True", "def successful(self):\n return self._exc_info is not None and self._exc_info[1] is None", "def has_failed_outputs(self):\n return False", "def is_last_update_failed(self):\n return self._data.get('last_update_failed')", "def has_error(self, response):\n return response.find(' Matched') == -1 and response.find(' Failed') == -1", "def has_error(self):\n return self.status == 'OK'", "def _check_error(self):\n\n if self.error_code_test != 0:\n return False\n else:\n return True", "def conditionFailed(self):\n result = Activatable(self.effects, condition=AlwaysFalseCondition()).canActivate(self.game)\n self.assertFalse(result, \"The Activatable should not be activatable\")", "def is_successful(self):\n\t\treturn randint(1, 100) <= self.get_success_probability()", "def test_audit(user, is_program, has_company):\n enrollment = (\n ProgramEnrollmentFactory.create()\n if is_program\n else CourseRunEnrollmentFactory.create()\n )\n if has_company:\n enrollment.company = CompanyFactory.create()\n\n enrollment.save_and_log(user)\n\n expected = {\n \"active\": enrollment.active,\n \"change_status\": enrollment.change_status,\n \"created_on\": format_as_iso8601(enrollment.created_on),\n \"company\": enrollment.company.id if has_company else None,\n \"company_name\": enrollment.company.name if has_company else None,\n \"email\": enrollment.user.email,\n \"full_name\": enrollment.user.name,\n \"id\": enrollment.id,\n \"order\": enrollment.order.id,\n \"text_id\": enrollment.program.readable_id\n if is_program\n else enrollment.run.courseware_id,\n \"updated_on\": format_as_iso8601(enrollment.updated_on),\n \"user\": enrollment.user.id,\n \"username\": enrollment.user.username,\n }\n if not is_program:\n expected[\"edx_enrolled\"] = enrollment.edx_enrolled\n expected[\"run\"] = enrollment.run.id\n else:\n expected[\"program\"] = enrollment.program.id\n assert (\n enrollment.get_audit_class().objects.get(enrollment=enrollment).data_after\n == expected\n )", "def _failed(self, msg):\n self.log(msg)\n self.result.passed = False\n self.result.add_error(msg)\n self.log(u\"Failed\")", "def is_success(self):\n return self.type_id == STATE_SUCCESS", "def is_successful(self) -> bool:\n return bool(self.result_state and self.result_state.is_successful())", "def succeed(self) -> bool:\n return self.errorCode is None or len(self.errorCode) < 1", "def is_on(self) -> bool:\n return self._heater.status[\"is_failed\"]", "def IsOk(self):\r\n \r\n return True", "def test_failed():\n assert False", "def failed_on(self):\n return self._failed_on", "def violated(self) -> bool:\n ...", "def failed_roboscript(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"failed_roboscript\")", "def is_successful(self):\n try:\n if self.is_skipped:\n return TestCase.EX_TESTCASE_SKIPPED\n assert self.criteria\n assert self.result is not None\n if (not isinstance(self.result, str) and\n not isinstance(self.criteria, str)):\n if self.result >= self.criteria:\n return TestCase.EX_OK\n else:\n # Backward compatibility\n # It must be removed as soon as TestCase subclasses\n # stop setting result = 'PASS' or 'FAIL'.\n # In this case criteria is unread.\n self.__logger.warning(\n \"Please update result which must be an int!\")\n if self.result == 'PASS':\n return TestCase.EX_OK\n except AssertionError:\n self.__logger.error(\"Please run test before checking the results\")\n return TestCase.EX_TESTCASE_FAILED", "def failed(self):\n\t\tpass", "def is_error(self):\n return self.type_id == STATE_ERROR", "def succeeded(self):\n return self.current_reward == 300", "def error_out(self) -> bool:\n return self._action == 'error'", "def alerted(self) -> bool:\n\t\treturn self._raw_result['data']['alerted']", "def check_rpt_status(self) -> bool:\n return self.allele == self.fasta_alt", "def MayPassTest(self):\n session.console.info('Test results for output volume %r: %r',\n self._output_volumes[self._output_volume_index],\n self._test_results[self._output_volume_index])\n if self._test_results[self._output_volume_index]:\n return True\n return False", "def _IsBuildFailed(build_data):\n if (build_data.get('results') in FAILED and\n not _IsBuildSuccessful(build_data)):\n return True\n return False", "def check_result(self, result):\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['exit_status'] != 0:\n self.fail(\"##Error detected from check_result\")\n else:\n self.log.info(\"--check_result passed\")", "def is_success(self):\n return self.current_state == self.States.SUCCEEDED", "def is_success(self):\n return self._tag == 'success'", "def test_get_unsuccessful_ta():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n list_failed, list_else = ta.get_unsuccessful_ta('ta_status_bool')\n\n assert list_else[0] == ta.source.data['ta_status_bool'][0]\n assert np.isnan(list_failed[0])", "def on_failure(self):\n if self.args.disable_rollback is True:\n on_failure = None\n else:\n on_failure = self.args.on_failure\n return on_failure", "def is_attempted(self):\r\n return self.attempts > 0", "def test_login_failure(self):\n self.client.login(username=self.username, password='AWrongPassword')\n # 2: creation and login\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def infrastructure_failure(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"infrastructure_failure\")", "def print_tcase_failed(self,testcaseName,reasonFailed):\n\n # go throuht the test case objects\n\tfor t in self.testcases:\n\t\t\n\t\ttName = t.name\n\t\tif tName == testcaseName:\n\t\t\t#print tName\n\t\t\tt.status = \"Failed\"\n\t\t\tt.reasonPassed = reasonFailed\n self.print_summary()\n raise TestCaseFailed (\"Testcase '%s' Failed, reason '%s\"%(testcaseName,reasonFailed))\n sys.exit(1)\n return 1\n\n\traise ViriValuePassedError(\"Testcase '%s' doesnt seem to be run but print failed called\"%testcaseName)", "def is_success(self) -> bool:\n tasks_by_status = self.tasks_by_status()\n return set(tasks_by_status) <= {TaskStatus.SUCCESS}", "def is_last_job_failed(self):\n return self._data.get('last_job_failed')", "def __failure(error):\n description = error.describe()\n # check for exceptions\n if any([ex in description for ex in exceptions]):\n return False, description\n # check for and remove unexpected data, returning the removed data\n unexpected_prop = unexpected_prop_regex.search(description)\n if unexpected_prop:\n prop = unexpected_prop.group(1)\n data = { prop: error.instance[prop] }\n del error.instance[prop]\n return False, data\n # check for invalid data item, return index\n item_error = item_error_regex.search(description)\n if item_error:\n rt, index = item_error.group(1), int(item_error.group(2))\n if rt == record_type:\n return False, index\n # no exceptions met => failure\n return True, description", "def failure(self):\n self.logger.debug(\"Logging failure for %s\", self.key)\n self.failures = self.driver.failure(self.key)", "def has_errors(self) -> bool:", "def passed(test: bool) -> str:\n return 'passed' if test else 'failed'", "def test_audit_only_not_expired(self):\n CourseDurationLimitConfig.objects.create(enabled=True, enabled_as_of=datetime(2010, 1, 1, tzinfo=UTC))\n audit_only_course = CourseFactory.create()\n self.create_user_for_course(audit_only_course, CourseUserType.ENROLLED)\n response = self.client.get(course_home_url(audit_only_course))\n assert response.status_code == 200\n self.assertContains(response, TEST_COURSE_TOOLS)\n self.assertNotContains(response, TEST_BANNER_CLASS)", "def hasErrors(self):\n return False", "def failed(self, id, err=''):\n\n records = self.db.get_table()\n index = -1\n\n for i in range(0, len(records)):\n if str(records[i][\"id\"]) == str(id):\n index = i\n \n if index == -1:\n return None\n\n records[index][\"status\"] = \"failed\"\n if 'end-time' in records[index]:\n records[index][\"end-time\"] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if 'comments' in records[index]:\n records[index][\"comments\"] += \" failed{ \" + err + \" };\"\n\n self.db.update_row(index, records[index])\n\n _log.info('Test %s marked as failed with message %s.' % (str(id), str(err)))\n \n return records[index]", "def check_if_actuall(self) -> bool:\n\n return self.last_date >= self.get_last_image_date()", "def check_status(self):", "def failure(self) -> 'outputs.EndConditionResponse':\n return pulumi.get(self, \"failure\")", "def failed(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"failed\")", "def failed(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"failed\")", "def need_attention(self):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n status_msg = self.status()\n if any([each in status_msg for each in msg]):\n return True\n return False", "def testComplete(self, fail):\n if not fail:\n print \"Test Complete\"\n return self.__g.SUCCESS\n else:\n print \"Test Failed\"\n return self.__g.FAIL" ]
[ "0.70987207", "0.7077051", "0.6967613", "0.6923564", "0.68601596", "0.6790287", "0.67796767", "0.6752796", "0.67493594", "0.67102164", "0.6676905", "0.667684", "0.6542292", "0.64718133", "0.6417293", "0.64025366", "0.63911164", "0.634746", "0.6297389", "0.62367797", "0.62313586", "0.6224354", "0.6202326", "0.617334", "0.61608654", "0.6149201", "0.6146332", "0.61417973", "0.6123227", "0.6112711", "0.61126167", "0.610612", "0.6102712", "0.6096416", "0.60545295", "0.60129374", "0.6005591", "0.5981058", "0.59579706", "0.59579706", "0.5951122", "0.59329134", "0.592341", "0.5913023", "0.59009826", "0.58931446", "0.5885462", "0.58728045", "0.5872288", "0.5870117", "0.5868274", "0.5857305", "0.5856742", "0.58522433", "0.5812072", "0.58069324", "0.5801258", "0.5783449", "0.5776561", "0.57643676", "0.57635945", "0.5759404", "0.57556456", "0.5752655", "0.57506937", "0.5719528", "0.5715914", "0.57065684", "0.56858075", "0.56842554", "0.56809396", "0.5680632", "0.56757295", "0.56700784", "0.56653833", "0.5665317", "0.56591654", "0.5653168", "0.5640025", "0.5638559", "0.5619451", "0.56166905", "0.56141496", "0.56077164", "0.5585725", "0.55830663", "0.5581887", "0.5581884", "0.5580646", "0.5580539", "0.55732465", "0.55687547", "0.55531496", "0.55526316", "0.554085", "0.5532937", "0.55321854", "0.55321854", "0.5529933", "0.5528526" ]
0.76317364
0
Audit the commit for proper endofline characters. The UNIX type EOL is the only allowed EOL character.
def audit_eol(self): # Regex's.... re_commit = re.compile("^\xff(.+)\xff$") re_filename = re.compile("^diff --(cc |git a\/.+ b\/)(.+)$") blocked_eol = re.compile(r"(?:\r\n|\n\r|\r)$") # Bool to allow special files such as vcards to bypass the check eol_allowed = False # Do EOL audit! process = get_change_diff( self.repository, ["-p"] ) for line in process.stdout: commit_change = re.match( re_commit, line ) if commit_change: commit = commit_change.group(1) continue file_change = re.match( re_filename, line ) if file_change: filename = file_change.group(2) eol_violation = False eol_allowed = False # Check if it's an allowed mimetype # First - check with the mimetypes system, to see if it can tell guessed_type, _ = mimetypes.guess_type(filename) if guessed_type in self.ALLOWED_EOL_MIMETYPES: eol_allowed = True continue # Second check: by file extension # NOTE: This uses the FIRST dot as extension splitted_filename = filename.split(os.extsep) # Check if there's an extension or not # NOTE This assumes that files use dots for extensions only! if len(splitted_filename) > 1: extension = splitted_filename[1] if extension in self.ALLOWED_EOL_EXTENSIONS: eol_allowed = True continue # Unless they added it, ignore it if not line.startswith("+"): continue if re.search( blocked_eol, line ) and not eol_violation: # Is this an allowed filename? if eol_allowed: continue # Failure has been found... handle it eol_violation = True self.__log_failure(commit, "End of Line Style (non-Unix): " + filename);
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eol(self):\n if self.current not in EOL:\n self.on_parser_error(\"EOL expected\")\n self.maybe_eol()", "def _output_commit_line(self): # noqa: C901, E501 pylint: disable=too-many-branches\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns + 1):\n if i == self.num_columns:\n if seen_this:\n break\n col_commit = self.commit\n else:\n col = self.columns[i]\n col_commit = self.columns[i].commit\n\n if col_commit == self.commit:\n seen_this = True\n self.buf += '*'\n chars_written += 1\n\n if self.num_parents > 2:\n chars_written += self._draw_octopus_merge()\n elif seen_this and self.num_parents > 2:\n self._write_column(col, '\\\\')\n chars_written += 1\n elif seen_this and self.num_parents == 2:\n # This is a 2-way merge commit. There is no\n # GraphState.PRE_COMMIT stage for 2-way merges, so this is the\n # first line of output for this commit. Check to see what the\n # previous line of output was.\n #\n # If it was GraphState.POST_MERGE, the branch line coming into\n # this commit may have been '\\', and not '|' or '/'. If so,\n # output the branch line as '\\' on this line, instead of '|'.\n # This makes the output look nicer.\n if (self.prev_state == GraphState.POST_MERGE and\n self.prev_commit_index < i):\n self._write_column(col, '\\\\')\n else:\n self._write_column(col, '|')\n chars_written += 1\n else:\n self._write_column(col, '|')\n chars_written += 1\n self.buf += ' '\n chars_written += 1\n\n self._pad_horizontally(chars_written)\n if self.num_parents > 1:\n self._update_state(GraphState.POST_MERGE)\n elif self._is_mapping_correct():\n self._update_state(GraphState.PADDING)\n else:\n self._update_state(GraphState.COLLAPSING)", "def test_no_final_eol(self, env: yaenv.Env):\n from tempfile import mkstemp\n env.envfile = mkstemp()[-1]\n with open(env, 'w') as f:\n f.write('EOL=no')\n env['BLANK'] = ''\n with open(env, 'r') as f:\n assert len(f.readlines()) == 2", "def eat_EOL(self):\n # print(\"Start eating EOL\")\n self.eat(EOL)\n while self.current_token.type == EOL:\n self.eat(EOL)\n # print(\"Stop eating EOL\")", "def escape_eol_chars(options):\n pass", "def log(self, chars):\n self.insert(END, chars+'\\n')\n self.see(END)\n self.update()", "def _(event):\n if line.is_multiline:\n line.newline()\n else:\n if line.validate():\n cli_ref().line.add_to_history()\n cli_ref().set_return_value(line.document)", "def _endline(line):\n return line.rstrip() + '\\n'", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def test_end_of_line_single_char_last_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"7.1\", \"7.1\"),\n command_name=\"end-of-line\",\n )", "def test_dos_eol():\n import figleaf, figleaf.annotate_html\n \n figleaf.start()\n execfile(os.path.join(thisdir, 'tst_dos_eol.py'))\n figleaf.stop()\n\n coverage = figleaf.get_data().gather_files()\n\n tmpdir = tempfile.mkdtemp('.figleaf')\n\n try:\n figleaf.annotate_html.report_as_html(coverage, tmpdir, [], {})\n finally:\n files = glob.glob('%s/*' % (tmpdir,))\n for f in files:\n os.unlink(f)\n os.rmdir(tmpdir)", "def maybe_eol(self):\n if self.current == CR:\n self.next()\n if self.current == LF:\n self.next()\n elif self.current == LF:\n self.next()", "def convert_line_endings():\n files = []\n for ext in [\n \".py\",\n \".sh\",\n \"Dockerfile\",\n \".txt\",\n \".csv\",\n \".mhd\",\n \".gitignore\",\n ]:\n files.extend(Path(\".\").glob(f\"**/*{ext}\"))\n\n for file in files:\n with open(str(file), \"rb\") as f:\n lines = f.read()\n\n lines = lines.replace(EOL_WIN, EOL_UNIX).replace(EOL_MAC, EOL_UNIX)\n\n with open(str(file), \"wb\") as f:\n f.write(lines)", "def do_EOF(self, line):\n print()\n models.storage.save()\n return True", "def __convertEOL(self):\n aw = self.activeWindow()\n aw.convertEols(aw.eolMode())", "def do_EOF(self, line):\n print(\"\")\n return True", "def fix_line_endings(fname, eol=b'\\n'):\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)", "def logwrite(self, line):\n sql = b\"update log set log_text=concat(log_text,'\" + self.__timestamp() + line + \"') where log_id=\" + self.logid +\";\\n\"\n self.logme.stdin.write(sql)\n self.logme.stdin.flush()\n return True", "def _check_last_character(line_index, input_line, code_character):\n global _total_lines_of_code\n if input_line.endswith(code_character):\n _code_lines.append(line_index)\n _total_lines_of_code += 1", "def GetEOLChar(self):\n m_id = self.GetEOLMode()\n if m_id == wx.stc.STC_EOL_CR:\n return u'\\r'\n elif m_id == wx.stc.STC_EOL_CRLF:\n return u'\\r\\n'\n else:\n return u'\\n'", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def do_EOF(self, line):\n return True", "def eol(self):\n return self.pos == len(self.tokens)", "def git_append(msg):\n pipe = Popen('git log -1 --pretty=%B', stdout=PIPE, shell=True)\n old_msg = pipe.stdout.read()\n new_msg = '%s\\n%s' % (old_msg.rstrip(), msg)\n\n pipe = Popen('git commit --amend --file=-', stdin=PIPE, shell=True)\n pipe.communicate(new_msg)", "def do_EOF(self, line):\n print()\n return True", "def expect_eol(self):\n if self.length != 0:\n raise ParseError('Spurius words after parsing instruction')", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)", "def FixLineEndingsForWindows(self,str):\n # TODO: this should not really be part of this class\n if str[-2:]==\"\\r\\n\":\n return str\n if str[-1:]==\"\\n\":\n return str[:-1]+\"\\r\\n\"\n else:\n return str + \"\\r\\n\"", "def do_eof(self, line):\n print \"\"\n return True", "def open_editor_to_amend_commit():\n command = f\"git commit --amend\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()", "def test_EOF(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF\")\n out = f.getvalue()\n self.assertTrue(len(out) == 1)\n self.assertEqual(\"\\n\", out)\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"EOF fake\")\n msj = f.getvalue().strip()\n self.assertFalse(len(msj) == 1)\n self.assertEqual(\"\", msj)", "def test_archive_commitlog_point_in_time_ln(self):\n self.run_archive_commitlog(restore_point_in_time=True, archive_command='ln')", "def lf(self):\n self._write('\\n')", "def _clean_commit(self, line):\n cleaned_line = {\n 'repo': line['origin'],\n 'hash': line['data_commit'],\n 'author': line['data_Author'],\n 'category': \"commit\",\n 'created_date': utils.str_to_dt_data(line['data_AuthorDate']),\n 'commit': line['data_Commit'],\n 'commit_date': utils.str_to_dt_data(line['data_CommitDate']),\n 'files_no': len(line['data_files']),\n 'refs': line['data_refs'],\n 'parents': line['data_parents'],\n 'files': line['data_files']\n }\n\n actions = 0\n for file in line['data_files']:\n if 'action' in file:\n actions += 1\n cleaned_line['files_action'] = actions\n\n try:\n non_merge = math.isnan(line['data_Merge'])\n\n except (TypeError, KeyError):\n non_merge = False\n\n cleaned_line['merge'] = not non_merge\n return cleaned_line", "def test_end_of_buffer(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.3\", \"1.3\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"end-of-buffer\",\n )", "def test_kill_to_end_of_line_after_last_visible_char(self):\n before_b = \"\"\"\\\n line 1\n # The next line contains two trailing blanks.\n line 3 \n line 4\n \"\"\"\n after_b = \"\"\"\\\n line 1\n # The next line contains two trailing blanks.\n line 3line 4\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.6\", \"3.6\"),\n after_sel=(\"3.6\", \"3.6\"),\n command_name=\"kill-to-end-of-line\",\n )", "def test_end_of_line_internal_blank_line(self):\n before_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.0\", \"2.0\"),\n after_sel=(\"2.0\", \"2.0\"),\n command_name=\"end-of-line\",\n )", "def git_commit(self, msg):\n self.git_repo.git.add(all=True)\n self.git_repo.git.commit(message='[dots] {}'.format(msg))", "def fix_end(self, node):\n if node.header.tokens[0].type == Token.SEPARATOR:\n indent = node.header.tokens[0]\n else:\n indent = Token(Token.SEPARATOR, self.formatting_config.separator)\n node.end = End([indent, Token(Token.END, \"END\"), Token(Token.EOL)])", "def test_end_of_line_blank_last_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last non-blank line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"7.0\", \"7.0\"),\n after_sel=(\"7.0\", \"7.0\"),\n command_name=\"end-of-line\",\n )", "def commit_history(cli):\n result = []\n record = OrderedDict()\n for line in cli.splitlines():\n r = re.search(' ([A-Z][a-z]+(?: ID)?): (.*?) +([A-Z][a-z]+): (.*)', line)\n if not r:\n continue\n record[r.group(1)] = r.group(2)\n record[r.group(3)] = r.group(4)\n if r.group(3) == 'Comment':\n result.append(record)\n record = OrderedDict()\n return result", "def deleteLastChar (self) :\r\n c = self.data_.drop();\r\n # Notice the \\n so we can notice when new lines begin\r\n if (c=='\\n') :\r\n self.lineNumber_ -= 1\r\n # Find last \\n ... if we can\r\n index_of_last_newline = -1\r\n for ii in xrange(0, len(self.data_)) :\r\n if (self.data_.peek(len(self.data_)-ii-1)=='\\n') :\r\n index_of_last_newline = ii\r\n break \r\n \r\n self.charNumber_ = index_of_last_newline\r\n if (index_of_last_newline==-1) : self.charNumber = 80\r\n else :\r\n self.charNumber_-=1;", "def _output_skip_line(self):\n self.buf += '...'\n self._pad_horizontally(3)\n\n if self.num_parents >= 3 and self.commit_index < self.num_columns - 1:\n self._update_state(GraphState.PRE_COMMIT)\n else:\n self._update_state(GraphState.COMMIT)", "def test_diff_git_line_without_a_b_quotes(self):\n diff = (\n b'diff --git \"foo\" \"foo\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def postcmd(self, stop, line):\n\n if line and shlex.split(line)[0] == 'commit':\n # for the moment, self.target is the indication of a successful creation\n if self.target:\n return True\n else:\n return False\n\n return AttributeEditor.postcmd(self, stop, line)", "def test_very_verbose_output_not_truncated(self, monkeypatch):\n hooks = setup_hooks(very_verbose=True)\n line_length = 20\n monkeypatch.setattr(\n \"repobee_junit4._output._truncate_lines\",\n partial(_output._truncate_lines, max_len=line_length),\n )\n\n result = hooks.act_on_cloned_repo(FAIL_REPO)\n\n lines = result.msg.split(os.linesep)\n assert len(lines) > 1\n # the first line can be somewhat longer due to staus message\n # and color codes\n assert any([len(line) > line_length for line in lines[1:]])", "def tprint_raw(self, cmd, end='\\n'):\n self.fileHandle.write(cmd + end)", "def multiline_carriage_return(event):\n b = event.cli.current_buffer\n carriage_return(b, event.cli)", "def _update_end_lineno():\n if origin:\n record.origin.line_end = lineno", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def commit(self, msg):\n self.runtime.logger.info('Commit config: {}'.format(msg))\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"add\", \".\"])\n exectools.cmd_assert([\"git\", \"commit\", \"--allow-empty\", \"-m\", msg])", "def test_verbose_output_is_truncated(self, monkeypatch):\n hooks = setup_hooks(verbose=True)\n line_length = 20\n monkeypatch.setattr(\n \"repobee_junit4._output._truncate_lines\",\n partial(_output._truncate_lines, max_len=line_length),\n )\n\n result = hooks.act_on_cloned_repo(FAIL_REPO)\n\n lines = result.msg.split(os.linesep)[1:] # skip summary line\n assert len(lines) > 1\n # the first line can be somewhat longer due to staus message\n # and color codes\n assert all([len(line) <= line_length for line in lines[1:]])", "def test_opt_charsetEndOfLine(self):\n line = b\"CHARSET UTF-8\"\n identifier, remainder = self.server.opt_charset(line)\n self.assertEqual(identifier, b\"UTF-8\")\n self.assertEqual(remainder, b\"\")", "def writeline(self, line):\n self.sendall((six.text_type(line) + u'\\r\\n').encode(self.encoding))", "def amend_commit_with_file(tmp_file_name):\n command = f\"git commit --amend --allow-empty -F {tmp_file_name}\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()", "def test_end_of_line(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.0\", \"1.0\"),\n after_sel=(\"1.10\", \"1.10\"),\n command_name=\"end-of-line\",\n )", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def eos(comment):\n comment=re.sub(r'(\\W+/.)',r'\\1 \\n',comment,flags=re.IGNORECASE)\n return comment", "def _get_line(self):\n line = self.file.readline(self.maxline + 1)\n if len(line) > self.maxline:\n print(f\"ERROR: got more than {self.maxline} bytes\")\n if not line:\n print(\"Received EOF\")\n if line[-2:] == CRLF:\n line = line[:-2]\n elif line[-1:] in CRLF:\n line = line[:-1]\n return line + CRLF", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def end_of_line():\n d = get_app().current_buffer.document\n at_end = d.is_cursor_at_the_end_of_line\n last_line = d.is_cursor_at_the_end\n\n return bool(at_end and not last_line)", "def __read_last_lines(self) -> str:\n with open(LOGFILE_OPENINGS, \"r\", encoding=\"utf-8\") as f:\n last_lines = f.readlines()[-10:]\n return \" 🌸 \" + \"\\n🌸 \".join(\n map(lambda l: repr(LogLine.from_line(l)), last_lines)\n )", "def process_IN_CLOSE_WRITE(self, event):\n self.git.post_change(event.pathname, commit_msg=\"dotfile_tracker update: \"+event.pathname)", "def is_eof(line):\n return line == \"\"", "def getEOLComment(self, address: ghidra.program.model.address.Address) -> unicode:\n ...", "def ensure_ending_newline(self, text):\n if text and text[-1] != '\\n':\n return text + '\\n'\n else:\n return text", "def svn_diff_hunk_readline_modified_text(*args):\n return _diff.svn_diff_hunk_readline_modified_text(*args)", "def post_command(self) -> str:\n rtn = ''\n if self.terminator:\n rtn += self.terminator\n\n if self.suffix:\n rtn += ' ' + self.suffix\n\n if self.pipe_to:\n rtn += ' | ' + self.pipe_to\n\n if self.output:\n rtn += ' ' + self.output\n if self.output_to:\n rtn += ' ' + self.output_to\n\n return rtn", "def test_nextLineAtEnd(self):\n s = 'hello world'\n self.widget.buffer = s\n self.widget.setInputHistory(History(['first', 'second', 'last']))\n self.widget.keystrokeReceived('\\x0e', None)\n self.assertEqual(self.widget.buffer, s)\n self.assertEqual(self.widget.cursor, 0)", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def _(event):\n buffer = event.current_buffer\n\n if buffer.document.is_cursor_at_the_end_of_line:\n buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=False)\n else:\n buffer.cursor_position += buffer.document.get_end_of_line_position()", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def _padding_line(self):\n if self.state != GraphState.COMMIT:\n self._next_line()\n return\n\n # Output the row containing this commit\n # Iterate up to and including self.num_columns, since the current commit\n # may not be in any of the existing columns. (This happens when the\n # current commit doesn't have any children that we have already\n # processed.)\n for i in range(self.num_columns):\n col = self.columns[i]\n self._write_column(col, '|')\n if col.commit == self.commit and self.num_parents > 2:\n self.buf += ' ' * (self.num_parents - 2) * 2\n else:\n self.buf += ' '\n\n self._pad_horizontally(self.num_columns)\n\n # Update self.prev_state since we have output a padding line\n self.prev_state = GraphState.PADDING", "def test_newlinesBeforeLineBreaking(self):\n # Because MAX_COMMAND_LENGTH includes framing characters, this long\n # line is slightly longer than half the permissible message size.\n longline = \"o\" * (irc.MAX_COMMAND_LENGTH // 2)\n\n self.client.msg(\"foo\", longline + \"\\n\" + longline)\n self.assertEqual(\n self.client.lines, [\"PRIVMSG foo :\" + longline, \"PRIVMSG foo :\" + longline]\n )", "def test_diff_git_line_without_a_b_and_spaces_changed(self):\n diff = (b'diff --git foo bar1 foo bar2\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n')\n\n with self.assertRaises(DiffParserError) as cm:\n self.tool.get_parser(diff).parse()\n\n self.assertTrue(str(cm.exception).startswith(\n 'Unable to parse the \"diff --git\" line'))", "def test_diff_git_line_without_a_b_and_spaces_quotes(self):\n diff = (\n b'diff --git \"foo bar1\" \"foo bar1\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def _newLine(self, usePos = True):", "def end_of_line():\r\n set_point(point().end_of_line())", "def test_write_quotes_unix(self):\n\n # Set up the\n self.config[api.APP_NAME]['line_separator'] = 'unix'\n\n path = tests.test_util.init_quotefile(self.tempdir, \"quotes1.txt\")\n quotes = api.read_quotes(path)\n\n # Call write_quotes to write file\n api.write_quotes(path, quotes)\n\n # Verify unix line separator used when file written\n with open(path, \"rb\") as openfile:\n whole_file = openfile.read().decode(\"utf-8\")\n expected = \"The Linux philosophy is 'Laugh in the face of danger'. Oops. Wrong One. 'Do it yourself'. Yes, that's it. | Linus Torvalds | | U\\n\" + \\\n \"The depressing thing about tennis is that no matter how good I get, I'll never be as good as a wall. | Mitch Hedberg | | U\\n\" + \\\n \"Ask for what you want and be prepared to get it. | Maya Angelou | | U\\n\" + \\\n \"They that can give up essential liberty to obtain a little temporary safety deserve neither liberty nor safety. | Ben Franklin | | U\\n\"\n self.assertEqual(expected, whole_file)", "def test_diff_git_line_without_a_b_and_spaces(self):\n diff = (\n b'diff --git foo bar1 foo bar1\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def end(self, commit: bool) -> None:\n ...", "def test_emptyline(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"\\n\")\n out = \"\"\n self.assertEqual(out, f.getvalue())\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\" \\n\")\n out = \"\"\n self.assertEqual(out, f.getvalue())", "def __init__(self):\n super(LineEnd, self).__init__(r\"$\", regex.MULTILINE)", "def writeLog(msg, addEndline=True):\n\n with open(LOG_FILE, \"a\") as f:\n f.write(\"\\n\")\n f.write(msg)\n \n if addEndline == True:\n f.write(\"\\n---------------------------------------------\\n\")", "def commit(self) -> None:\n if self._edits and len(self._edits) > 0:\n self._update(\"\\n;\\n\".join(self._edits))\n self._edits = None", "def newLineEvent(self, line):\n self.newLine_callback(line)", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def commit(self):\n # PEP 249\n pass", "def output_line(self, line, eol=b'\\r\\n'):\n self.queue_output(bytes(line, 'utf-8') + eol)", "def _writeline(self, data):\n self._write(data+chr(13)+chr(10))", "def test_no_body_max_line_length_option_ignored(self, custom_config):\n del custom_config['body']['max_line_length']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run(\n {\n 'commits': [\n {\n 'stats': {'total': 2},\n 'message': 'xxxxx\\n\\n{}'.format('A' * 1000),\n 'sha': 'aa',\n 'url': '',\n }\n ]\n }\n )[0]\n assert result.success is True", "def commit(self):\n run('git', 'add', '.')\n run('git', 'commit', '-a', '-m', 'updates')", "def end_of_line_marker(self,event):\n for child in self.app.children:\n child.source.SetViewEOL(event.IsChecked())\n self.set('ViewEol',event.IsChecked())", "def ignore_newline(self, t):\n self.lineno += t.value.count('\\n')", "def _output_pre_commit_line(self):\n assert self.num_parents >= 3, 'not enough parents to add expansion row'\n num_expansion_rows = (self.num_parents - 2) * 2\n\n # self.expansion_row tracks the current expansion row we are on.\n # It should be in the range [0, num_expansion_rows - 1]\n assert (0 <= self.expansion_row < num_expansion_rows), \\\n 'wrong number of expansion rows'\n\n # Output the row\n seen_this = False\n chars_written = 0\n for i in range(self.num_columns):\n col = self.columns[i]\n if col.commit == self.commit:\n seen_this = True\n self._write_column(col, '|')\n self.buf += ' ' * self.expansion_row\n chars_written += 1 + self.expansion_row\n elif seen_this and (self.expansion_row == 0):\n # This is the first line of the pre-commit output. If the\n # previous commit was a merge commit and ended in the\n # GraphState.POST_MERGE state, all branch lines after\n # self.prev_commit_index were printed as \"\\\" on the previous\n # line. Continue to print them as \"\\\" on this line. Otherwise,\n # print the branch lines as \"|\".\n if (self.prev_state == GraphState.POST_MERGE and\n self.prev_commit_index < i):\n self._write_column(col, '\\\\')\n else:\n self._write_column(col, '|')\n chars_written += 1\n elif seen_this and (self.expansion_row > 0):\n self._write_column(col, '\\\\')\n chars_written += 1\n else:\n self._write_column(col, '|')\n chars_written += 1\n self.buf += ' '\n chars_written += 1\n\n self._pad_horizontally(chars_written)\n\n # Increment self.expansion_row, and move to state GraphState.COMMIT if\n # necessary\n self.expansion_row += 1\n if self.expansion_row >= num_expansion_rows:\n self._update_state(GraphState.COMMIT)", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def on_commit_comment(self, payload):\n pass" ]
[ "0.6151211", "0.6065239", "0.57468516", "0.5741316", "0.5723494", "0.5639385", "0.5574638", "0.5561204", "0.5554823", "0.55486727", "0.553186", "0.5530341", "0.55275774", "0.5481987", "0.54660696", "0.540383", "0.5398025", "0.5388231", "0.53565466", "0.53498983", "0.5348075", "0.53350383", "0.53350383", "0.53350383", "0.5301547", "0.5287191", "0.52785945", "0.5272429", "0.5216899", "0.5199439", "0.51965606", "0.51958996", "0.5182201", "0.51658374", "0.5151651", "0.5131205", "0.5123198", "0.5110141", "0.5095969", "0.5095288", "0.50862616", "0.50832283", "0.5070124", "0.50539064", "0.5050461", "0.5045376", "0.5043486", "0.5032805", "0.50019383", "0.4998493", "0.49959862", "0.4994089", "0.4983356", "0.49682492", "0.49674013", "0.49418634", "0.49400648", "0.49352655", "0.49313456", "0.49303907", "0.49302313", "0.49287176", "0.49250227", "0.49109894", "0.49083823", "0.49077713", "0.49035642", "0.49013", "0.4892948", "0.48900348", "0.48793516", "0.4862601", "0.48596126", "0.4858863", "0.4855316", "0.48519164", "0.48485437", "0.48484778", "0.4840892", "0.48404077", "0.48399553", "0.48392496", "0.48383242", "0.4836133", "0.48309672", "0.48260388", "0.48030323", "0.48012698", "0.47939387", "0.4791329", "0.47811088", "0.47797728", "0.47680917", "0.47644937", "0.47640824", "0.47640216", "0.47631606", "0.47631282", "0.47598407", "0.47589427" ]
0.83223575
0
Audit the file names in the commit.
def audit_filename(self): for commit in self.repository.commits.values(): for filename in commit.files_changed: if commit.files_changed[ filename ]["change"] not in ["A","R","C"]: continue for restriction in self.filename_limits: if re.search(restriction, filename): self.__log_failure(commit.sha1, "Invalid filename: " + filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue", "def commit_names(self, commit):\n return []", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def commit (files):\n\n version = get_tag(comp_versions, 'ACE')\n root_path = get_path()\n files = [i[len(root_path):] if i.startswith(root_path) else i for i in files]\n\n print (\"Committing the following files for \" + version + ':', \" \".join (files))\n\n if opts.take_action:\n for file in files:\n print (\"Adding file \" + file + \" to commit\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git add \" + file)\n\n ex (\"cd $DOC_ROOT/ACE_TAO && git commit -m\\\"\" + version + \"\\\"\")", "def saveStatResults(self, changes, file_stats):\n\n # commit_obj = rpc.RpcProxy('software_dev.commit')\n fchange_obj = rpc.RpcProxy('software_dev.filechange')\n \n commit_ids = []\n for chg in changes:\n if not chg.number:\n continue\n commit_ids.append(chg.number)\n \n while len(commit_ids) and len(file_stats):\n cid = commit_ids.pop() # so, we attribute the stats to the\n # last commit that matches their files\n fc_ids = fchange_obj.search([('commit_id','=', cid)])\n fcres = fchange_obj.read(fc_ids, ['filename'])\n # We read all the filenames that belong to the commit and\n # then try to see if we have any stats for them.\n if not fcres:\n continue\n for fcd in fcres:\n fcstat = file_stats.pop(fcd['filename'], False)\n if not fcstat:\n continue\n # now, we have a filechange.id and stats\n fchange_obj.write(fcd['id'], fcstat)", "def audit_eol(self):\n\n # Regex's....\n re_commit = re.compile(\"^\\xff(.+)\\xff$\")\n re_filename = re.compile(\"^diff --(cc |git a\\/.+ b\\/)(.+)$\")\n blocked_eol = re.compile(r\"(?:\\r\\n|\\n\\r|\\r)$\")\n\n # Bool to allow special files such as vcards to bypass the check\n eol_allowed = False\n\n\n # Do EOL audit!\n process = get_change_diff( self.repository, [\"-p\"] )\n for line in process.stdout:\n commit_change = re.match( re_commit, line )\n if commit_change:\n commit = commit_change.group(1)\n continue\n\n file_change = re.match( re_filename, line )\n if file_change:\n filename = file_change.group(2)\n eol_violation = False\n eol_allowed = False\n\n # Check if it's an allowed mimetype\n # First - check with the mimetypes system, to see if it can tell\n guessed_type, _ = mimetypes.guess_type(filename)\n if guessed_type in self.ALLOWED_EOL_MIMETYPES:\n eol_allowed = True\n continue\n\n # Second check: by file extension\n # NOTE: This uses the FIRST dot as extension\n splitted_filename = filename.split(os.extsep)\n # Check if there's an extension or not\n # NOTE This assumes that files use dots for extensions only!\n if len(splitted_filename) > 1:\n extension = splitted_filename[1]\n if extension in self.ALLOWED_EOL_EXTENSIONS:\n eol_allowed = True\n\n continue\n\n # Unless they added it, ignore it\n if not line.startswith(\"+\"):\n continue\n\n if re.search( blocked_eol, line ) and not eol_violation:\n # Is this an allowed filename?\n if eol_allowed:\n continue\n\n # Failure has been found... handle it\n eol_violation = True\n self.__log_failure(commit, \"End of Line Style (non-Unix): \" + filename);", "def touched_files(self, parent):", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def file_changes(self):\n new = []\n changed = []\n deleted = []\n parent = self.parent_tag\n # Loop through the files and find the ones that have changed\n for relative_path, file_dict in self.checksum[\"files\"].items():\n if relative_path not in parent[\"files\"]:\n new.append(relative_path)\n elif file_dict[\"checksum\"] != parent[\"files\"][relative_path][\"checksum\"]:\n changed.append(relative_path)\n # Loop through the parent files and see which files have been deleted\n for relative_path in parent[\"files\"].keys():\n if relative_path not in self.checksum[\"files\"]:\n deleted.append(relative_path)\n return {\"new\": new, \"changed\": changed, \"deleted\": deleted}", "def amend_commit_with_file(tmp_file_name):\n command = f\"git commit --amend --allow-empty -F {tmp_file_name}\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()", "def _warn_about_git_filters(files):\n repository = project_context.repository\n\n src_attrs = []\n dst_attrs = []\n\n for path, attrs in repository.get_attributes(*files).items():\n src = Path(path)\n dst = files[src].relative_to(project_context.path)\n src = src.relative_to(project_context.path)\n attrs_text = \"\"\n for name, value in attrs.items():\n if value == \"unset\":\n attrs_text += f\" -{name}\"\n elif value == \"set\":\n attrs_text += f\" {name}\"\n else:\n attrs_text += f\" {name}={value}\"\n\n src_attrs.append(f\"{str(src)}{attrs_text}\")\n dst_attrs.append(f\"{str(dst)}{attrs_text}\")\n\n if src_attrs:\n src_attrs_str = \"\\n\\t\".join(src_attrs)\n dst_attrs_str = \"\\n\\t\".join(dst_attrs)\n communication.warn(\n f\"There are custom git attributes for the following files:\\n\\t{src_attrs_str}\\n\"\n f\"You need to edit '.gitattributes' and add the following:\\n\\t{dst_attrs_str}\"\n )", "def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list", "def get_filenames_in_commit(git_reference: str = \"\"):\n c = cmd.run(f\"git show --name-only --pretty=format: {git_reference}\")\n if c.return_code == 0:\n return c.out.strip().split(\"\\n\")\n else:\n raise GitCommandError(c.err)", "def _log_changed_names(changed_names: Iterable[Tuple[str, str]]) -> None:\n if not changed_names:\n return\n from .utils import logger\n\n logger.warning(\"New names:\")\n for orig_name, new_name in changed_names:\n logger.warning(\"* %r -> %r\", orig_name, new_name)", "def audit_emails_in_metadata(self):\n\n # Iterate over commits....\n disallowed_domains = [\"localhost\", \"localhost.localdomain\", \"(none)\", \"bombardier.com\", \"rail.bombardier.com\"]\n for commit in self.repository.commits.values():\n for email_address in [ commit.committer_email, commit.author_email ]:\n # Extract the email address, and reject them if extraction fails....\n extraction = re.match(\"^(\\S+)@(\\S+)$\", email_address)\n if not extraction:\n self.__log_failure(commit.sha1, \"Seemingly invalid email address: \" + email_address)\n continue\n\n # Don't allow domains which are disallowed...\n domain = extraction.group(2)\n if domain in disallowed_domains:\n self.__log_failure(commit.sha1, \"Email address using a blocked domain: \" + email_address)\n continue\n\n # Ensure they have a valid MX/A entry in DNS....\n try:\n dns.resolver.query(domain, \"MX\")\n except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel):\n try:\n dns.resolver.query(domain, \"A\")\n except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN):\n self.__log_failure(commit.sha1, \"Email address has an invalid domain : \" + email_address)\n except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):\n self.__log_failure(commit.sha1, \"Email address has an invalid domain : \" + email_address)", "def FormatSubversionPropertyChanges(filename, props):\r\n prop_changes_lines = [\r\n \"Property changes on: %s\" % filename,\r\n \"___________________________________________________________________\"]\r\n for key, value in props:\r\n prop_changes_lines.append(\"Added: \" + key)\r\n prop_changes_lines.append(\" + \" + value)\r\n return \"\\n\".join(prop_changes_lines) + \"\\n\"", "def get_changed_files_from(old_commit_sha, new_commit_sha):\n return check_output(\n \"git diff-tree --no-commit-id --name-only -r {0}..{1}\".format(\n old_commit_sha,\n new_commit_sha\n ).split(\" \")\n ).decode('utf-8').strip()", "def changed(self, filename='.md5', glob=None):\n if glob is not None:\n filename += '.glob-' + ''.join(ch.lower()\n for ch in glob if ch.isalpha())\n return changed(self, filename, glob=glob)", "def audit(self, message):\n channel = self.config.get('AUDIT_CHANNEL', False)\n log_file = self.config.get('AUDIT_FILE', False)\n if channel: outputs.append([channel, message])\n if log_file:\n with open(log_file, 'a') as f: f.write(message)\n logging.warning('AUDIT: ' + message)", "def _audit_cli_args(self):\n\n args = [\n \"--operation=audit\",\n \"--operation=status\",\n \"--logtostderr\",\n ]\n\n return args", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def onApply(self, event):\n\n # Rename all of the files based on the substitution.\n for (old, new) in zip(self.m_diskNames, self.m_newNames):\n if old != new:\n old = os.path.join(self.m_curPath, old)\n new = os.path.join(self.m_curPath, new)\n try:\n os.rename(old, new)\n except OSError:\n pass\n\n # Now we out the lists so that what the user sees after this\n # reflects what's on disk.\n self.m_diskNames[:] = []\n self.m_newNames[:] = []\n\n # Update.\n self.updateDiskFileList()", "def _add_commit_sha1_to_lists(self):\n sha1_num_commits = \"-\" + self.commit_number\n sha1_args = [sha1_num_commits, \"--pretty=%h\"]\n # git log -[N] --pretty=%h ===> newline delimited list of SHA1 x N commit\n sha1_string = self.git.log(sha1_args)\n # do not modify to os.linesep, Win fails tests with this change\n self.commit_sha1_list = sha1_string.split(\"\\n\")", "def get_changed_files(self, old_commit, new_commit):\n if old_commit is not None and not self.pygit.descendant_of(\n new_commit, old_commit\n ):\n raise ValueError(\"Second commit must be a descendant of first commit\")\n\n old_index = pygit2.Index()\n new_index = pygit2.Index()\n if old_commit is not None:\n old_tree = self.pygit.get(old_commit).tree\n old_index.read_tree(old_tree)\n else:\n # This is a special hash that represents an empty tree\n old_tree = self.pygit.get(\"4b825dc642cb6eb9a060e54bf8d69288fbee4904\")\n\n new_tree = self.pygit.get(new_commit).tree\n new_index.read_tree(new_tree)\n\n for patch in self.pygit.diff(old_tree, new_tree):\n if patch.delta.status_char() != \"M\":\n continue\n\n if not patch.delta.new_file.path.startswith(\"locales/\"):\n continue\n\n old_file_oid = old_index[patch.delta.old_file.path].oid\n new_file_oid = new_index[patch.delta.new_file.path].oid\n old_file = self.pygit.get(old_file_oid)\n new_file = self.pygit.get(new_file_oid)\n yield patch.delta.new_file.path, old_file.data, new_file.data", "def stage_changes(c):\n c.run(f\"git add -u\")", "def get_filenames(commit: git.Commit) -> List[str]:\n\n if not commit.parents:\n return []\n diffs = commit.tree.diff(commit.parents[0])\n # Sometimes a path is in A and not B but we want all filenames.\n return sorted(\n {diff.a_path for diff in diffs if diff.a_path is not None}\n | {diff.b_path for diff in diffs if diff.b_path is not None}\n )", "def log_revision(self, revision):\n to_file = self.to_file\n\n date_str = format_date(revision.rev.timestamp,\n revision.rev.timezone or 0,\n self.show_timezone,\n date_fmt='%Y-%m-%d',\n show_offset=False)\n\n authors = revision.rev.get_apparent_authors()\n to_file.write('%s %s\\n\\n' % (date_str, \", \".join(authors)))\n\n if revision.delta is not None and revision.delta.has_changed():\n for c in revision.delta.added + revision.delta.removed + \\\n revision.delta.modified:\n path, = c[:1]\n to_file.write('\\t* %s:\\n' % (path,))\n for c in revision.delta.renamed:\n oldpath, newpath = c[:2]\n # For renamed files, show both the old and the new path\n to_file.write('\\t* %s:\\n\\t* %s:\\n' % (oldpath, newpath))\n to_file.write('\\n')\n\n if not revision.rev.message:\n to_file.write('\\tNo commit message\\n')\n else:\n message = revision.rev.message.rstrip('\\r\\n')\n for l in message.split('\\n'):\n to_file.write('\\t%s\\n' % (l.lstrip(),))\n to_file.write('\\n')", "def changed_files(self, base=None, remote=None, single_commit=None):\n if single_commit:\n cmd = ['git', 'diff', '{}^!'.format(single_commit), '--name-only']\n elif base and remote:\n if base == 'WORKING':\n cmd = ['git', 'diff', remote, '--name-only']\n elif base == 'INDEX':\n cmd = ['git', 'diff', '--staged', remote, '--name-only']\n else:\n cmd = ['git', 'diff', base, remote, '--name-only']\n else:\n raise HTTPError(400, 'Either single_commit or (base and remote) must be provided')\n\n \n response = {}\n try:\n stdout = subprocess.check_output(\n cmd, \n cwd=self.root_dir,\n stderr=subprocess.STDOUT\n )\n response['files'] = stdout.decode('utf-8').strip().split('\\n')\n response['code'] = 0\n except CalledProcessError as e:\n response['message'] = e.output.decode('utf-8')\n response['code'] = e.returncode\n\n return response", "def commit(self, file) -> dict:\n cl = ChatLog(file)\n for log in self.__logs:\n cl.log = log\n result = cl.create()\n if result[\"affect\"] != 1:\n self.__failed_logs.append(log)\n return {\n \"import\": len(self.__logs) - len(self.__failed_logs),\n \"failed\": len(self.__failed_logs)\n }", "def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None\n for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):\n paths = [os.path.join(dirpath, filename) for filename in filenames]\n files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def _git_commit_all(message=''):\n\n # Remove deleted files\n result = local('git ls-files --deleted -z', capture=True)\n for path in result.split('\\x00'):\n if len(path.strip()) > 0:\n local('git rm %s' % path, capture=True)\n\n # Add new files\n local('git add .', capture=True)\n\n # Commit\n with settings(warn_only=True):\n if not message:\n message = \"$(date)\"\n local('git commit -m \"%s\"' % message)", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def _find_changes(self):\n added = set()\n modified = set()\n existing_files = set()\n for dirpath_str, _, filenames in walk(str(self.path)):\n dirpath = Path(dirpath_str)\n for filename in filenames:\n if filename == DB_FILENAME:\n continue\n abs_filename = (dirpath / filename).absolute()\n if abs_filename in self.entries:\n entry = self.entries[abs_filename]\n existing_files.add(entry)\n st = lstat(str(abs_filename))\n if entry != st:\n modified.add(entry)\n else:\n try:\n entry = HashEntry(abs_filename)\n entry.update_attrs()\n added.add(entry)\n except FileNotFoundError:\n # If file was removed between listing and processing,\n # just treat it as if it never existed\n # We have nothing to compare it to anyway\n pass\n removed = set(self.entries.values()) - existing_files\n return added, removed, modified", "def _get_relevant_files(self, local_repo_path):\n r = GitRepo(local_repo_path)\n all_commits = r.git.log('--name-only', '--pretty=format:').split()\n counted_commits = Counter(all_commits)\n # Sort the files according to the number of commits they appear in\n sorted_commits = sorted(counted_commits.items(),\n key=lambda x: x[1],\n reverse=True)\n # Return the file names sorted per commits number\n return list(zip(*sorted_commits))[0]", "def handleCommit(self):\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.doCommit(filePath)\n logging.debug(\"Committing to \" + filePath)\n # Special case for PDFs\n # obj = self.filesList.getFileObj(filePath)\n # if obj.type == 'Pdf':\n # self.printPdfPersonalData(filePath, 'Pdf', \n # self.filesList.getOrigPath(filePath))\n self.pDataCancelButton.hide()\n self.pDataCommitButton.hide()\n self.resetCursor()", "def _on_watch_changes(self, *changes):\n self.dirty = self._git.is_dirty()\n if self._watcher:\n for change in self._watcher.changes:\n for tracker in self._trackers:\n tracked_path = Path(self._git.working_dir) / change[\"path\"]\n if tracker.path.resolve() == tracked_path.resolve():\n tracker._on_file_change(None)\n return [\n dict(a_path=diff.a_path, b_path=diff.b_path, change_type=diff.change_type)\n for diff in self._git.index.diff(None)\n ] + [\n dict(a_path=None, b_path=ut, change_type=\"U\")\n for ut in self._git.untracked_files\n ]", "def calc_test(commits, author):\n\topen('modifications.csv', 'w').close()\n\t\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 5 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\t\t\t# getting every blob from a given commit\n\t\tquery = ('for x in $(echo ' + commit + ' | ~/lookup/getValues c2b | ' +\n\t\t\t# splitting it and discarding the newlines and the commit's hash\n\t\t\t'awk -v RS=\"[;\\\\n]\" 1 | tail -n+2); do ' +\n\t\t\t# We look up the content's of each blob, and discard the STDERR,\n\t\t\t# in the case of trying to look up a blob that does not exist in the database\n\t\t\t'echo $x | ~/lookup/showCnt blob 2> /dev/null; done | ' +\n\t\t\t# We search for the use of a unit testing library, using the above regex, and\n\t\t\t# keeping the first result only, since that is enough to know that the commit contains\n\t\t\t# a unit testing file, to make the execution faster\n\t\t\t'egrep -m 1 \"' + final_reg + '\"')\n\t\tif bash(query): # if contains unit testing lib\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\n\t\t\t# at this point we could search the parent's tree for the existence of tests, but this\n\t\t\t# would require recursively looking at every directory and parsing every file in the tree, so, due\n\t\t\t# to the complexity, we skip it and consider it a modification instead of a possible introduction\n\n\t\t\tf = open(\"modifications.csv\", \"a\")\n\t\t\tprint 'modification'\n\t\t\tf.write(author + ', ' + 'TEST' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def get_commit_change_stats(self, commit_url='', full_name='', commit_sha=''):\n if commit_url == '' and (commit_sha == '' and full_name == ''):\n raise BaseException('commit url could not be generated. Commit url, commit sha and full name not set')\n return None\n url = commit_url\n if url == '':\n url = COMMIT_DETAILS.format(commit_sha=commit_sha, full_name=full_name)\n url = self.get_full_url(url)\n\n json_data = loads(self.get_from_net(url))\n stats = {'additions': 0, 'deletions': 0}\n if 'stats' in json_data:\n stats['additions'] = json_data['stats']['additions']\n stats['deletions'] = json_data['stats']['deletions']\n\n return stats", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def AddSubversionPropertyChange(filename):\r\n if self.options.emulate_svn_auto_props and IsFileNew(filename):\r\n svnprops = GetSubversionPropertyChanges(filename)\r\n if svnprops:\r\n svndiff.append(\"\\n\" + svnprops + \"\\n\")", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def calc_CI(commits, author):\n\t# delete contents\n\topen('modifications.csv', 'w').close()\n\topen('introductions.csv', 'w').close()\n\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 50 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\t\t# c2f does seems to result in a tie error, so c2b and b2f is used instead\t\t\n\t\t#getting the blobs\n\t\tquery = (\"for x in $(echo \" + commit + \" | ~/lookup/getValues c2b |\" +\n\t\t\t# splitting on the semicolon and discarding the newlines\n\t\t\t\" awk -v RS='[;\\\\n]' 1 |\" +\n\t\t\t# discarding the commit's hash (it appears before the blobs' hashes)\n\t\t\t\" tail -n+2); do\" +\n\t\t\t\t# for each blob, we look up it's filename\n\t\t\t\t\" echo $x | ~/lookup/getValues b2f;\" + \n\t\t\t\" done |\" +\n\t\t\t# we discard the first field of the results (blobs' hash)\n\t\t\t\" cut -d ';' -f2 |\" +\n\t\t\t# we check whether one of the modified files is a CI configuration file\n\t\t\t\" egrep '\" + \"|\".join(ci_files) + \"'\")\n\t\tresult = bash(query)\n\t\tif result:\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\t\t\t\t\n\t\t\tif check_if_introduction(commit, result):\n\t\t\t\tf = open(\"introductions.csv\", \"a\")\n\t\t\t\tprint 'introduction'\n\t\t\telse:\n\t\t\t\tf = open(\"modifications.csv\", \"a\")\n\t\t\t\tprint 'modification'\n\t\t\tf.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def last_modified_commit(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n '--',\n *paths\n ], **kwargs).decode('utf-8')", "def calc_CI_diff(commits, author):\n\t# delete contents\n\topen('modifications.csv', 'w').close()\n\topen('introductions.csv', 'w').close()\n\n\tfor count, commit in enumerate(commits):\n\t\t#status update\n\t\tif (count + 1) % 50 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\t\t# cmputeDiff2.perl seems to produce junk to the stdout occasionally\n\t\tdiff = bash(\"echo \" + commit + \" | ssh da4 ~/lookup/cmputeDiff2.perl\")\n\n\t\t# if a CI configuration file is in the diff\n\t\tif re.search(\"|\".join(ci_files), diff):\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\n\t\t\tfor blob in diff.split():\n\t\t\t\t# looking for the CI config blob and checking if parent blob exists\n\t\t\t\tif re.search(\"|\".join(ci_files), blob):\n\t\t\t\t\t# if we have both an introduction and a modification\n\t\t\t\t\t# in the same commit, we count it as an introduction\n\t\t\t\t\tif blob.endswith(';'):\n\t\t\t\t\t# if we don't have the parent blob, after the last semicolon,\n\t\t\t\t\t# it is an introduction\n\t\t\t\t\t\tf = open(\"introductions.csv\", \"a\")\n\t\t\t\t\t\tprint 'introduction'\n\t\t\t\t\telse:\n\t\t\t\t\t\tf = open(\"modifications.csv\", \"a\")\n\t\t\t\t\t\tprint 'modification'\n\t\t\t\t\tbreak\n\t\t\tf.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def gen_changes_file_name(cls, package, version, arch, mbd_type=TYPE_DEFAULT):\n return \"{p}_{v}{x}_{a}.changes\".format(p=package,\n v=mini_buildd.misc.strip_epoch(version),\n a=arch,\n x=cls.TYPE2FILENAME_ID[mbd_type])", "def status():\n if not check_for_wit():\n raise NoWitError(f'No .wit folder exists in {os.getcwd()}')\n if not os.path.exists(refs_path):\n print('No files have been committed yet')\n return False\n print(f'Current commit ID: {get_current_commit_id()}')\n print('Changes to be committed:')\n print('-' * 20)\n for num, file in enumerate(get_files_to_be_committed()):\n print(f'{num + 1}: {file}')\n print('\\n')\n print('Changes not staged for commit')\n print('-' * 20)\n for num, file in enumerate(get_files_not_staged()):\n print(f'{num + 1}: {file}')\n for file in deleted_files:\n print(f'{file} - deleted from main folder')\n print('\\n')\n print('Untracked files')\n print('-' * 20)\n for num, file in enumerate(get_untracked_files()):\n print(f'{num + 1}: {file}')", "def _git_diff_files(ref=\"master\"):\n result = []\n command = [\"git\", \"diff\", \"--name-status\", \"%s\" % (ref)]\n exit_code, output = _execute(command)\n if exit_code != 0:\n print(\"Failed to diff files.\")\n sys.exit(1)\n\n for line in output.decode(\"utf-8\").splitlines():\n parts = line.split(\"\\t\")\n action = parts[0]\n name = parts[-1]\n action = action.lower()\n result.append((action, name))\n\n return result", "def contributions_by_file(self, owner, repo, start=None, end=None):\n df = []\n for commit in self.__api.get_repo((owner + \"/\" + repo)).get_commits(since=start,until=end):\n for file in commit.files:\n try:\n df.append({'user': commit.author.login, 'file': file.filename, 'additions': file.additions, 'deletions': file.deletions, 'total': file.changes})\n except AttributeError:\n pass\n\n df = pd.DataFrame(df)\n\n df.groupby([\"file\" ,\"user\"]).sum()\n\n return df", "def get_changed_files():\n upstream = \"origin/master\"\n local_commit = subprocess.check_output(\n \"git rev-list HEAD ^{} -- 2>/dev/null | tail -1\".format(upstream),\n shell=True).strip().decode()\n diff_base = subprocess.check_output(\n ['git', 'rev-parse', local_commit +\n '^']).strip().decode() if local_commit else \"HEAD\"\n files = subprocess.check_output(['git', 'diff', '--name-only',\n diff_base]).strip().decode().split('\\n')\n\n repo = subprocess.check_output(['git', 'rev-parse',\n '--show-toplevel']).strip().decode()\n # add prefixes so that all and targets can be specified relative to FUCHSIA_DIR\n if repo.endswith('topaz'):\n files = [os.path.join('topaz', p) for p in files]\n elif repo.endswith('third_party/go'):\n files = [os.path.join('third_party/go', p) for p in files]\n\n return files", "def changed_names(self, directory):\n return [os.path.split(p)[1] for p in self.changed_paths(directory)]", "def do_files(self, args):\n file_names = self.regexprutils.get_file_names()\n print 'File names:'\n for name in file_names:\n print ' %s' % (name, )", "def auditAgainstMetadata(syn, synId, metaDf, refCol, cols2Check,fileExts):\n entityMissMetadata = []\n incorrectAnnotated = {}\n missingAnno = {}\n print \"Check annotations against metadata.\\n\"\n starting = syn.get(synId,downloadFile = False)\n if not is_container(starting):\n print \"%s is a File \\n\" % synId\n _helperAuditMetadata(syn,starting,metaDf,refCol,cols2Check,fileExts,\n entityMissMetadata,incorrectAnnotated,missingAnno)\n noMeta = False\n if len(entityMissMetadata):\n noMeta = True\n return noMeta,incorrectAnnotated.keys(),missingAnno.keys()\n else:\n directory = synu.walk(syn,synId)\n for dirpath,dirname,filename in directory:\n for i in filename:\n temp = syn.get(i[1],downloadFile = False)\n print \"Getting File %s ...\" % i[1]\n _helperAuditMetadata(syn,temp,metaDf,refCol,cols2Check,fileExts,\n entityMissMetadata,incorrectAnnotated,missingAnno)\n return entityMissMetadata,incorrectAnnotated,missingAnno", "def file_summaries(self):\n summaries = {}\n if 'data' not in self.coverage_data:\n return summaries\n for info in self.coverage_data['data']:\n totals = info.get('totals', None)\n if totals:\n summaries['totals'] = totals\n for finfo in info['files']:\n filename = finfo.get('filename', None)\n if not filename:\n continue # :|\n if filename in summaries:\n raise ClangCoverageFilenameCollision(\n \"colliding file name: {}\".format(filename))\n summaries[filename] = finfo.get('summary', None)\n return summaries", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def files_changed(revish: Text,\n ignore_rules: Optional[Sequence[Text]] = None,\n include_uncommitted: bool = False,\n include_new: bool = False\n ) -> Tuple[List[Text], List[Text]]:\n files = repo_files_changed(revish,\n include_uncommitted=include_uncommitted,\n include_new=include_new)\n if not files:\n return [], []\n\n return exclude_ignored(files, ignore_rules)", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def status() -> None:\n wit = WitStatus()\n\n print(f'Commit ID: {wit.last_commit_id}')\n\n if wit.last_commit_id:\n full_changes = wit.get_changes_to_be_committed()\n print(f'Changes to be committed: {\", \".join(full_changes)}')\n\n changed, untracked = wit.compare_two_list_files(\n wit.original_files, wit.stage_files,\n wit.parent_wit_dir, wit.stage_dir\n )\n print(f'Changes not staged for commit: {\", \".join(changed)}')\n print(f'Untracked files: {\", \".join(untracked)}')", "def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )", "def edited_file_locations(self):", "def extract_commit_modified_file_features(self, hash_list):\n\n # modified_file_repo_dict [dict<commit hash, list<modified files>>] -- modified files list for each commit hash\n modified_file_repo_dict = self.extract_modified_file_repo(hash_list)\n\n pro_modified_file_dict = {}\n num_modified_file_dict = {}\n\n nsd_similarity_obj = nsd_similarity.NSDSimilarity(repodir=self.repo_dir)\n extension_set = nsd_similarity_obj.extension_set\n\n if self.verbose > 0:\n len_commit_hash = len(modified_file_repo_dict)\n\n for idx_commit_hash, commit_hash in enumerate(modified_file_repo_dict.keys()):\n\n if self.verbose > 0:\n if (idx_commit_hash%1000)==0:\n print(\"modified file feature -- Done {0}/{1}\".format(idx_commit_hash, len_commit_hash))\n\n cnt = 0\n for f_path in modified_file_repo_dict[commit_hash]:\n root, ext = os.path.splitext(f_path)\n if ext in extension_set:\n cnt += 1\n\n len_modified_file_repo_dict = len(modified_file_repo_dict[commit_hash])\n if len_modified_file_repo_dict==0:\n pro_modified_file_dict[commit_hash] = 0\n else:\n pro_modified_file_dict[commit_hash] = cnt/len_modified_file_repo_dict\n num_modified_file_dict[commit_hash] = cnt\n\n return pro_modified_file_dict, num_modified_file_dict", "def commit(self):\n run('git', 'add', '.')\n run('git', 'commit', '-a', '-m', 'updates')", "def commit_from_svn_log_entry(entry, files=None, keep_author=False):\r\n # This will use the local timezone for displaying commit times\r\n timestamp = int(entry['date'])\r\n svn_date = str(datetime.fromtimestamp(timestamp))\r\n # Uncomment this one one if you prefer UTC commit times\r\n #svn_date = \"%d 0\" % timestamp\r\n if keep_author:\r\n options = [\"ci\", \"--force-log\", \"-m\", entry['message'] + \"\\nDate: \" + svn_date, \"--username\", entry['author']]\r\n else:\r\n options = [\"ci\", \"--force-log\", \"-m\", entry['message'] + \"\\nDate: \" + svn_date + \"\\nAuthor: \" + entry['author']]\r\n if files:\r\n options += list(files)\r\n run_svn(options)", "def detailed_log(self, selected_hash, current_path):\n p = Popen(\n [\"git\", \"log\", \"-1\", \"--stat\", \"--numstat\", \"--oneline\", selected_hash],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n note = [0] * 3\n count = 0\n temp = \"\"\n line_array = my_output.decode(\"utf-8\").splitlines()\n length = len(line_array)\n INSERTION_INDEX = 0\n DELETION_INDEX = 1\n MODIFIED_FILE_PATH_INDEX = 2\n if length > 1:\n temp = line_array[length - 1]\n words = temp.split()\n for i in range(0, len(words)):\n if words[i].isdigit():\n note[count] = words[i]\n count += 1\n for num in range(1, int(length / 2)):\n line_info = line_array[num].split()\n words = line_info[2].split(\"/\")\n length = len(words)\n result.append(\n {\n \"modified_file_path\": line_info[MODIFIED_FILE_PATH_INDEX],\n \"modified_file_name\": words[length - 1],\n \"insertion\": line_info[INSERTION_INDEX],\n \"deletion\": line_info[DELETION_INDEX],\n }\n )\n\n if note[2] == 0 and length > 1:\n if \"-\" in temp:\n exchange = note[1]\n note[1] = note[2]\n note[2] = exchange\n\n return {\n \"code\": p.returncode,\n \"modified_file_note\": temp,\n \"modified_files_count\": note[0],\n \"number_of_insertions\": note[1],\n \"number_of_deletions\": note[2],\n \"modified_files\": result,\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git log_1\",\n \"message\": my_error.decode(\"utf-8\"),\n }", "def get_changed_files(path_to_repository, ignore_subrepositories):\n diff = _get_diff_to_last_commit(path_to_repository, ignore_subrepositories)\n return [item.b_path for item in diff if item.change_type in _CHANGE_TYPES_CONSIDERED_FOR_PRECOMMIT]", "def make_log_entries(commits, git_repo):\n entries = []\n # Add header\n author = git_repo.get_author_info()\n entries.append(\"* %s %s <%s> %s\" % \\\n (datetime.datetime.now().strftime(\"%a %b %d %Y\"),\n author.name, author.email, get_version(git_repo,\n commits[0])))\n for commit in commits:\n commit_info = git_repo.get_commit_info(commit)\n entries.append(\"- %s\" % commit_info[\"subject\"])\n return entries", "def __gitStatistics(self):\n self.vcs.gitStatistics(self.project.getProjectPath())", "def commit_data(self, file_list, comment=None):\n if not comment:\n comment = 'Svn2: autocommit'\n\n # First try to update\n if not self.Update():\n self.logger.error(\"Failed to update svn repository, refusing to commit changes\")\n return\n\n #FIXME - look for conflicts?\n\n for fname in file_list:\n stat = self.client.status(fname)\n self.client.add([f.path for f in stat \\\n if f.text_status == pysvn.wc_status_kind.unversioned])\n try:\n self.revision = self.client.checkin([self.datastore], comment,\n recurse=True)\n self.revision = self.client.update(self.datastore, recurse=True)[0]\n self.logger.info(\"Svn2: Commited changes. At %s\" %\n self.revision.number)\n except Exception, err:\n # try to be smart about the error we got back\n details = None\n if \"callback_ssl_server_trust_prompt\" in str(err):\n details = \"SVN server certificate is not trusted\"\n elif \"callback_get_login\" in str(err):\n details = \"SVN credentials not cached\"\n\n if details is None:\n self.logger.error(\"Svn2: Failed to commit changes\",\n exc_info=1)\n else:\n self.logger.error(\"Svn2: Failed to commit changes: %s\" %\n details)", "def get_files_to_be_committed():\n current_staging_hashes = get_all_path_hashes(staging_path)\n head_path = get_wit_path(keyword=get_current_commit_id())\n head_hashes = get_all_path_hashes(path=head_path)\n new_file_hashes = []\n files_to_be_committed = []\n for staging_hash in current_staging_hashes:\n if staging_hash not in head_hashes:\n new_file_hashes.append(staging_hash)\n files_to_be_committed = [staging_hash_decoder(h) for h in new_file_hashes]\n return files_to_be_committed", "def git_append(msg):\n pipe = Popen('git log -1 --pretty=%B', stdout=PIPE, shell=True)\n old_msg = pipe.stdout.read()\n new_msg = '%s\\n%s' % (old_msg.rstrip(), msg)\n\n pipe = Popen('git commit --amend --file=-', stdin=PIPE, shell=True)\n pipe.communicate(new_msg)", "def getChanges():", "def on_file_changed(self, path):\n\t\tpass", "def _pkg_changes(self, local=False, **kwargs):\n seen = set()\n for pkg in self.parse_git_log(self.location, pkgs=True, **kwargs):\n atom = pkg.atom\n key = (atom, pkg.status)\n if key not in seen:\n seen.add(key)\n self.data.setdefault(atom.category, {}).setdefault(\n atom.package, {}).setdefault(pkg.status, []).append((\n atom.fullver,\n pkg.commit.commit_date,\n pkg.commit.hash if not local else pkg.commit,\n ))", "def audit(self, database=None):\n listOfErrors = []\n listOfWarnings = []\n\n for e in self.children:\n err, war = e.audit(database)\n listOfErrors += err\n listOfWarnings += war\n return listOfErrors, listOfWarnings", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def committers_changes(self) -> Iterator[CommitterChange]:\n for committer_change in self._yaml[\"committers\"]:\n # Start ignoring PyLintBear\n match action := CommitterActions(committer_change[\"action\"]):\n case CommitterActions.ADDITION:\n yield CommitterChange(\n name=committer_change[\"name\"],\n action=action,\n link=committer_change[\"link\"],\n email=committer_change[\"email\"],\n company=committer_change[\"company\"],\n committer_id=committer_change[\"id\"],\n timezone=committer_change[\"timezone\"],\n )\n case CommitterActions.DELETION:\n yield CommitterChange(\n name=committer_change[\"name\"],\n action=action,\n link=committer_change[\"link\"],\n )\n # Stop ignoring", "def _get_changed_files():\n if not ci_diff_helper:\n return None\n\n try:\n config = ci_diff_helper.get_config()\n except OSError: # Not on CI.\n return None\n\n changed_files = ci_diff_helper.get_changed_files('HEAD', config.base)\n\n changed_files = set([\n './{}'.format(filename) for filename in changed_files])\n\n return changed_files", "def commit_history(cli):\n result = []\n record = OrderedDict()\n for line in cli.splitlines():\n r = re.search(' ([A-Z][a-z]+(?: ID)?): (.*?) +([A-Z][a-z]+): (.*)', line)\n if not r:\n continue\n record[r.group(1)] = r.group(2)\n record[r.group(3)] = r.group(4)\n if r.group(3) == 'Comment':\n result.append(record)\n record = OrderedDict()\n return result", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def change_files(self):\n change_files = []\n change_files_url = self._url + '/files' + OAUTH_TOKEN\n change_files_data = json.load(urllib2.urlopen(change_files_url))\n for item in change_files_data:\n change_files.append(item['filename'])\n return change_files", "def all_changed_files(self):\n return [path_to_file_type(os.path.join(self.path, p)) for p in self.changed_paths() if p]", "def on_modified(self, event):\n \n if not event.is_directory: \n\n file_name = os.path.basename(event.src_path)\n \n if file_name not in self.ignore_files:\n parent = os.path.dirname(event.src_path)\n file_id = list(filter(lambda f: f[\"name\"] == file_name, self.filesystem[parent][\"files\"]))[0][\"id\"]\n self.gapy.update_file(file_id, path=parent)\n self.gapy.logger.info(\"The file {} was modified, the content was updated\".format(file_name, parent))\n print(f\"\\nThe file {file_name} was modified and synchronized\")", "def print_file_stats(self):\n\n # current epoch time, file number, filename, filesize, trans secs, status\n print(f\"TRANS_STATS_FILE: {time.time()} {self.batchvals['numfiles']} {self.filevals['filename']} {self.filevals['numbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def test_two_commits(self, tmpgitdir):\n with tmpgitdir.join('file_a.txt').open('w') as handle:\n handle.write('first file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'first'])\n first_hash = subprocess.check_output(\n ['git', 'show', '-s', '--format=format:%H']).decode()\n\n with tmpgitdir.join('file_b.txt').open('w') as handle:\n handle.write('second file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'second'])\n second_hash = subprocess.check_output(\n ['git', 'show', '-s', '--format=format:%H']).decode()\n\n assert is_git_ancestor(tmpgitdir, first_hash, second_hash)\n assert not is_git_ancestor(tmpgitdir, second_hash, first_hash)", "def history(name):\n from pybel.resources.arty import get_knowledge_history\n from pybel.resources.document import get_bel_knowledge_hash\n\n for path in get_knowledge_history(name):\n h = get_bel_knowledge_hash(path.as_posix())\n click.echo('{}\\t{}'.format(path, h))", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)", "def mtime(name):", "def write_commits(self, logfile, repo, commits):\n csv__writer = writer(logfile, delimiter=',', quoting=QUOTE_MINIMAL)\n\n for c in commits:\n try:\n csv__writer.writerow([\n repo['full_name'],\n repo['language'],\n c['user'],\n c['login'],\n c['date'],\n c['additions'],\n c['deletions']\n ])\n except UnicodeEncodeError, ue:\n #TODO Handle unicode errors since csv lib doesn't like to work with unicode\n print str(ue)\n continue", "def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def test_diff_with_tabs_after_filename(self):\n diff = (\n b'diff --git a/README b/README\\n'\n b'index 712544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'f88b7f15c03d141d0bb38c8e49bb6c411ebfe1f1 100644\\n'\n b'--- a/README\\t\\n'\n b'+++ b/README\\t\\n'\n b'@ -1,1 +1,1 @@\\n'\n b'-blah blah\\n'\n b'+blah\\n'\n b'-\\n'\n b'1.7.1\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'README',\n orig_file_details=b'712544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'README',\n modified_file_details=b'f88b7f15c03d141d0bb38c8e49bb6c411ebfe1f1',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=1,\n delete_count=2,\n data=diff)", "def get_changed_files(\n cls,\n repo: git.Repo,\n begin_sha: str,\n end_sha: str\n ) -> dict:\n _changed_files = {}\n _diff_begin_commit = repo.commit(begin_sha)\n _diff_end_commit = repo.commit(end_sha)\n\n if _diff_begin_commit.committed_datetime > _diff_end_commit.committed_datetime:\n raise ValueError(\"Begin commit happened before the end commit\")\n\n # If we compare changes between the beginning and the end, we will not get\n # the files changed in the first commit itself.\n # The first commit has to be set one commit back to get all changed files.\n\n # If begin_commit is the first commit of the repository,\n # the git.Commit.parents ivar will be empty.\n # We hav to treat the first commit differently, since it has\n # no previous commit to compare changes from beginning to end.\n if not _diff_begin_commit.parents:\n # Since it is the first commit, files can only have been added.\n for file in _diff_begin_commit:\n _changed_files[os.path.abspath(file)] = 'A'\n # git.Commit.parents returns a tuple,\n # possibly to handle merges.\n # So we expect to work with a tuple from here.\n _diff_begin_commit = (_diff_begin_commit)\n else:\n # Set the beginning to a commit earlier.\n # (or commits {plural} if original commit is a merge)\n _diff_begin_commit = _diff_begin_commit.parents\n\n for commit in _diff_begin_commit:\n for file in commit.diff(_diff_end_commit):\n _changed_files[os.path.abspath(file.a_path)] = file.change_type\n\n return _changed_files", "def log_git_info():\n try:\n git_dir = Path('.git')\n head_file = git_dir / 'HEAD'\n with head_file.open() as f:\n head_contents = f.readline().strip()\n log.info(f'Contents of .git/HEAD: {head_contents}')\n if head_contents.split()[0] == 'ref:':\n hash_file = git_dir / head_contents.split()[1]\n with hash_file.open() as f:\n log.info(f'Current reference hash: {f.readline().strip()}')\n except FileNotFoundError:\n return", "def filename(self):\n fname1, fname2 = self.ad1.filename, self.ad2.filename\n if fname1 != fname2:\n return [f'{fname1} v {fname2}']", "def committees():\n os_committees = Committee()\n os_committees.query()\n os_committees.parse()\n wiki_functions.write_to_csv_file_for_DataTransfer(os_committees,\n os_committees.table)", "def changelog(self, branch, since=None):\n walker = Walker(self.repo, [self.latest_branch_revision(branch)])\n for entry in walker:\n if since is not None and entry.commit.id == since:\n break\n commit = entry.commit\n files = Command(\n 'git show --pretty=\"format:\" --name-only %s' % commit.id,\n cwd=self.path,\n ).out.split()\n yield Commit(\n commit.id,\n commit.committer,\n datetime.fromtimestamp(commit.commit_time),\n commit.message,\n files,\n )", "def file_stat(self, file_path):", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()" ]
[ "0.67635244", "0.63775945", "0.6034091", "0.5988995", "0.58283305", "0.5755764", "0.5654512", "0.56192213", "0.5432775", "0.5430367", "0.52749014", "0.5257821", "0.52496487", "0.5248489", "0.5241954", "0.5239385", "0.5227689", "0.5223898", "0.5222077", "0.52184683", "0.52133906", "0.52131665", "0.5185653", "0.51828104", "0.5182539", "0.515562", "0.5154641", "0.51522595", "0.5150393", "0.5134131", "0.51173", "0.5116726", "0.5110673", "0.50873935", "0.50862515", "0.50792754", "0.5056856", "0.5051165", "0.5048547", "0.5041562", "0.50365484", "0.5032214", "0.50179094", "0.50125027", "0.50098884", "0.5006838", "0.49981907", "0.49937695", "0.49805027", "0.4960274", "0.49482283", "0.4930222", "0.49248093", "0.4902537", "0.48981315", "0.48959488", "0.48800728", "0.48629013", "0.48625088", "0.48508257", "0.48496163", "0.484914", "0.48178154", "0.4805411", "0.48052117", "0.47998455", "0.47982374", "0.47969863", "0.4791386", "0.47856015", "0.47785097", "0.47756168", "0.47691435", "0.47667423", "0.47643498", "0.47597197", "0.4759666", "0.4756035", "0.47549307", "0.47520912", "0.47411078", "0.47410044", "0.47366467", "0.4736243", "0.4734251", "0.4730873", "0.4725528", "0.47232002", "0.47231027", "0.47224152", "0.4721438", "0.47202682", "0.4702779", "0.47025627", "0.47009805", "0.46998125", "0.46977657", "0.4692057", "0.46919858", "0.46915522" ]
0.7895987
0
Audit names in commit metadata. Names which do not have a first name and a surname are extremely uncommon and when present are therefore generally invalid. As we want people to use their actual name when committing we do some checks to make sure that what looks like an actual name is present.
def audit_names_in_metadata(self): # Iterate over commits.... for commit in self.repository.commits.values(): for name in [ commit.committer_name, commit.author_name ]: # Is the name whitelisted? if name in self.FullNameWhitelist: continue # As a special case, allow the name 'GitHub' for certain repositories if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist: self.__log_warning(commit.sha1, "Commit has username 'GitHub' (web merge of PR); allowing anyway") continue # Check to see if the name contains spaces - if not - it is probably misconfigured.... if " " not in name.strip(): self.__log_failure(commit.sha1, "Non-full name: " + name) continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)", "def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix", "def _maybe_set_name(self) -> None:\n if not self.name:\n if isinstance(self.github, dict):\n if self.github.get(\"commit\"):\n self.name = f\"{self.reason}: {self.github['commit']}\"", "def test_first_last_name(self):\n formatted_name = get_formatted_name('jimi', 'hendrix')\n self.assertEqual(formatted_name, 'Jimi Hendrix')", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def convert_name(self, human_name):\n\n human_name = HumanName(human_name)\n if human_name.suffix:\n self.metadata[\"gutenberg_name_suffix\"] = human_name.suffix\n human_name.suffix = \"\"\n if human_name.nickname:\n # LOGGER.debug(\"%s nickname: %s\", str(human_name), human_name.nickname)\n no_nickname = copy.copy(human_name)\n no_nickname.nickname = \"\"\n first_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.first, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.first,\n # re.UNICODE\n # ),\n # human_name.nickname\n # )\n if first_name_match and len(first_name_match.group(0)) >= len(human_name.first):\n human_name.first = first_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.first):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"] = set([str(no_nickname)])\n middle_name_match = re.match(\n re.sub(r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\", human_name.middle, re.UNICODE),\n human_name.nickname,\n re.UNICODE\n )\n # LOGGER.debug(\n # \"%s, %s\",\n # re.sub(\n # r\"(([A-Z])[a-z]*[.])\", r\"\\2\\\\w+\",\n # human_name.middle, re.UNICODE\n # ),\n # human_name.nickname\n # )\n if middle_name_match and len(middle_name_match.group(0)) >= len(human_name.middle):\n human_name.middle = middle_name_match.group(0)\n human_name.nickname = human_name.nickname[len(human_name.middle):].strip()\n # LOGGER.debug(\"Adding %s to aliases\", str(no_nickname))\n self.metadata[\"aliases\"].add(str(no_nickname))\n return human_name", "def sanitize_author(name, email):\n # deal with inconsistent email addresses/names in commits.\n # feel free to fill this method out.\n return name", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n # Asserting that formatted_name equals 'Janis Joplin'\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('janis', 'joplin')\n self.assertEqual(formatted_name, 'Janis Joplin')", "def test_first_last_name(self):\n\t\tformatted_name = get_formatted_name('janos', 'jk')\n\t\tself.assertEqual(formatted_name, 'Janos Jk')", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def series_statement_added_entry_personal_name(self, key, value):\n indicator_map1 = {\"0\": \"Forename\", \"1\": \"Surname\", \"3\": \"Family name\"}\n indicator_map2 = {\n \"0\": \"Main entry not represented by pronoun\",\n \"1\": \"Main entry represented by pronoun\"}\n field_map = {\n 'p': 'name_of_part_section_of_a_work',\n '6': 'linkage',\n 'u': 'affiliation',\n 'b': 'numeration',\n '4': 'relator_code',\n 'x': 'international_standard_serial_number',\n 'n': 'number_of_part_section_of_a_work',\n 'a': 'personal_name',\n '8': 'field_link_and_sequence_number',\n 'k': 'form_subheading',\n 't': 'title_of_a_work',\n 'e': 'relator_term',\n 'l': 'language_of_a_work',\n 'c': 'titles_and_other_words_associated_with_a_name',\n 'g': 'miscellaneous_information',\n 'f': 'date_of_a_work',\n 'd': 'dates_associated_with_a_name',\n 'v': 'volume_sequential_designation',\n }\n\n order = utils.map_order(field_map, value)\n\n if key[3] in indicator_map1:\n order.append('type_of_personal_name_entry_element')\n\n if key[4] in indicator_map2:\n order.append('pronoun_represents_main_entry')\n\n return {\n '__order__': tuple(order) if len(order) else None,\n 'name_of_part_section_of_a_work': utils.force_list(\n value.get('p')\n ),\n 'linkage': value.get('6'),\n 'affiliation': value.get('u'),\n 'numeration': value.get('b'),\n 'relator_code': utils.force_list(\n value.get('4')\n ),\n 'international_standard_serial_number': value.get('x'),\n 'number_of_part_section_of_a_work': utils.force_list(\n value.get('n')\n ),\n 'personal_name': value.get('a'),\n 'field_link_and_sequence_number': utils.force_list(\n value.get('8')\n ),\n 'form_subheading': utils.force_list(\n value.get('k')\n ),\n 'title_of_a_work': value.get('t'),\n 'relator_term': utils.force_list(\n value.get('e')\n ),\n 'language_of_a_work': value.get('l'),\n 'titles_and_other_words_associated_with_a_name': utils.force_list(\n value.get('c')\n ),\n 'miscellaneous_information': value.get('g'),\n 'date_of_a_work': value.get('f'),\n 'dates_associated_with_a_name': value.get('d'),\n 'volume_sequential_designation': value.get('v'),\n 'type_of_personal_name_entry_element': indicator_map1.get(key[3]),\n 'pronoun_represents_main_entry': indicator_map2.get(key[4]),\n }", "def ValidateName(args):\n account = properties.VALUES.core.account.Get(required=True)\n if account.find('@') == -1:\n username = account\n else:\n username = account[0:account.find('@')]\n\n args.name = args.name or username", "def combine_name(self):\n if self.first_name.isalpha() and self.last_name.isalpha():\n username = self.first_name + \" \" + self.last_name\n return username\n return 'Names must be alphabets'", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')\n self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')", "def test_first_last_middle_name(self):\n\t\tformatted_name = get_formatted_name('Wolfgang','mozart','amadues')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadues Mozart')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('marie', 'curie', 'francis')\n self.assertEqual(formatted_name, 'Marie Francis Curie')", "def test_last_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__last_name=unromanized,\n profile__romanized_last_name=romanized,\n )\n assert CDDWriter.last_name(profile) == expected", "def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected", "def test_contributor_name_no_last_name(self):\n user = User.objects.create(username='admin', first_name='Jordan')\n \n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline,\n author=user)\n self.assertEqual(story.contributor_name, 'Jordan')", "def test_contributor_name_no_last_name(self):\n user = User.objects.create(username='admin', first_name='Jordan')\n \n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline,\n author=user)\n self.assertEqual(story.contributor_name, 'Jordan')", "def test_first_last_name(self):\n formatted_name = get_formatted_name('david', 'Malan')\n self.assertEqual(formatted_name, 'David Malan')", "def change_name(change_account):\n change_data(change_account, changed_data='name')", "def test_first_last_middle_name(self):\n formatted_name = get_formatted_name('john', 'smith', 'billy')\n self.assertEqual(formatted_name, 'John Billy Smith')", "def fullname(self, name):\n f, l = name.split(' ')\n self.first = f\n self.last = l", "def test_first_name(self) :\n\t\tformatted_name = get_formatted_name('janis','joplin')\n\t\tself.assertEqual(formatted_name,'Janis Joplin')", "def test_reformatted_full_name():\n assert reformatted_full_name(\"\") == \"\"\n assert reformatted_full_name(\"George\") == \"george\"\n assert reformatted_full_name(\"X Y Z A B\") == \"x b\"", "def test_super_short_author_name(self):\n spi_search = \"fin a er and cn cms\"\n inv_search = \"author:er collaboration:cms\"\n self._compare_searches(inv_search, spi_search)", "def fix_names(users):\n for user in users:\n id = user['id']\n first_name = user['first_name'].strip()\n last_name = user['last_name'].strip()\n if not first_name and not last_name:\n # Empty name: skip\n print (f'Skipping empty name in record {id}')\n continue\n elif first_name == last_name:\n full_name = first_name\n elif first_name.endswith(last_name):\n full_name = first_name\n elif not last_name:\n full_name = first_name\n elif not first_name:\n full_name = last_name\n else:\n # In this case, the user has most likely entered the name\n # correctly split, so skip\n full_name = first_name + last_name\n print (f'Skipping already split name: {first_name} / {last_name} ({id})')\n continue\n \n print (f'Working on \"{full_name}\" ({id})')\n\n # Handle email addresses\n if '@' in full_name:\n print (f' - fixing email address')\n # Remove domain part\n e_name = full_name[:full_name.find('@')]\n if '+' in e_name:\n # Remove alias\n e_name = e_name[:e_name.find('+')]\n # Try to split name parts\n e_name = e_name.replace('.', ' ')\n e_name = e_name.replace('_', ' ')\n e_name = e_name.strip()\n if len(e_name) < 4:\n # Probably just initials: leave email as is\n pass\n else:\n full_name = e_name\n \n # Parse name\n name = nameparser.HumanName(full_name)\n name.capitalize()\n first_name = name.first\n last_name = name.last\n print (f' - splitting name into: {first_name} / {last_name} ({id})')\n yield (first_name, last_name, id)", "def fullname(self, name):\n\n first, last = name.split(' ')\n self.first = first\n self.last = last", "def test_name_attribute_in_base_metadata(self):\n name = 'idsvc.basemeta'\n meta = { 'name': name }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, meta=meta)\n self.assertEqual(base_meta.name, meta['name'])", "def _abbreviate_name(self, row: Series)->str:\n return row['first_name'][0]+'.'+row['last_name']", "def name_check(f_name):\r\n if len(f_name) == 0:\r\n print('The first name must be filled in.')\r\n if len(f_name) < 2:\r\n print(f_name + ' is not a valid name. Itis too short.')", "def test_first_name_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_first_name(input_val)\n self.assertEqual(output_val, self.line.first_name)", "def user_profile_setname(token, name_first, name_last):\n if (len(name_first) > 50 or name_first == \"\"):\n raise error.InputError(description=\"First name is not within 1-50 characters\")\n\n if (len(name_last) > 50 or name_last == \"\"):\n raise error.InputError(description=\"Last name is not within 1-50 characters\")\n\n u_id = database.get_current_user(token)\n user = database.get_user_data(u_id)\n user['name_first'] = name_first\n user['name_last'] = name_last\n database.set_user_data(user)", "def concantenation_of_names(self):\n for rec in self:\n if (rec.last_name and rec.first_name and rec.middle_name) and rec.name == False:\n rec.name= str(self.first_name) + \" \" + str(self.middle_name) + \" \" + str(self.last_name)", "def test_get_github_name_positive(self):\n self.assertIsNotNone(app.get_github_name(\"dhh\")[\"user\"])", "def _check_name(self):\n\t\tpass", "def sanitize_name(self):\n self._name = self.get_name().strip()", "def test_organization_valid_name(self):\n hufflepuffs = models.Organization(name='hufflepuffs', title='Huffle Puffs')\n self.assertFalse(hufflepuffs.is_valid_name('#$%#%___2836273untitled'))\n self.assertTrue(hufflepuffs.is_valid_name('hufflepuffs'))", "def full_name(self):\n \tif self.first_name and self.last_name:\n \t\treturn \"{} {}\".format(self.first_name, self.last_name)", "def verify_name_syntax(sv, name, here, argtext, last):\r\n if name.find(Equal)!=-1: # \"=\" is not allowed in names\r\n print(\"\\n\", Err_equal_in_name, \"\\n\", name) # *** Illegal character in name: \"+ Equal +\" *** \r\n raise ReferenceError\r\n\r\n if not name or here==0: # name may not start with a bracket\r\n print(\"\\n\", Err_empty_name) # *** Syntax error: empty name *** \r\n print(name)\r\n if num>2: # common source of empty name error\r\n print(Help_continuation+Mline+\"' ):\") # you may have meant (with continuation character '\"+Mline):\r\n print(lines[num-3].strip(Space)+Col, Mline, Crlf, name) # suggested correction\r\n raise ReferenceError\r\n\r\n if argtext: # name is a function or a dict\r\n fun=name[:here]\r\n if fun in Internal_Functions: \r\n print(\"\\n\", Err_redef_internal_func) # *** Error: You cannot define an internal function *** \r\n print(fun, \"in\", fun+Obr+argtext+Cbr)\r\n raise ReferenceError\r\n \r\n if name[last:]: # name must end with closing bracket after args\r\n print(\"\\n\", Err_text_after_args) # *** Syntax error: text found after arguments *** \r\n print(name)\r\n raise ReferenceError", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def clean_user_names(record):\n if 'first_name' in record and 'last_name' in record:\n #Remove all special characters from first_name/last name\n lower_first_name = record['first_name'].replace('-', '')\\\n .replace('_', '').replace('[', '')\\\n .replace(']', '').replace(' ', '')\\\n .lower()\n lower_last_name = record['last_name'].replace('-', '')\\\n .replace('_', '').replace('[', '')\\\n .replace(']', '').replace(' ', '')\\\n .lower()\n return lower_first_name, lower_last_name\n else:\n return None, None", "def test_last_name_first_name(self):\n current_resume = resume.objects.first()\n expected = 'Bielinski, Nicholas'\n case = current_resume.last_name_first_name()\n self.assertEqual(case, expected)", "def name_line_edit_changed(self, text):\n if re.findall(r\"[^a-zA-Z0-9\\-_ ]+\", text):\n self.name_line_edit.set_invalid(\"Invalid character\")\n else:\n if text == \"\":\n self.name_line_edit.set_invalid(\"Enter a name\")\n else:\n self.name_line_edit.set_valid()", "def process(business: Business, filing: Dict, filing_meta: FilingMeta):\n logger.debug('processing Change of Name: %s', filing)\n\n if name_request_json := filing['changeOfName'].get('nameRequest'):\n new_name = name_request_json.get('legalName')\n else:\n new_name = filing['changeOfName'].get('legalName')\n\n filing_meta.change_of_name = {'fromLegalName': business.legal_name,\n 'toLegalName': new_name}\n\n business.legal_name = new_name", "def test_commit_author(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n author = pygit2.Signature(\"Katherine\", \"[email protected]\")\n repository.commit(message=\"empty\", author=author)\n\n head = repository.head.commit\n assert author.name == head.author.name and author.email == head.author.email", "def last_name():\r\n\r\n return surnames()", "def test_named_entities(self) -> None:\n for named_entitity_rule in self.rules.named_entities:\n identity: str = named_entitity_rule[\"identity\"]\n type: Optional[str] = named_entitity_rule.get(\"type\")\n subtype: Optional[str] = named_entitity_rule.get(\"subtype\")\n invalid: Optional[str] = named_entitity_rule.get(\"invalid\")\n valid: Optional[str] = named_entitity_rule.get(\"valid\")\n\n for named_entity in self.report.get_named_entities(identity, type, subtype):\n text: str = \" \".join([w.text for w in named_entity.words])\n if valid and (not re.search(valid, text, re.I)):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )\n elif invalid and re.search(invalid, text, re.I):\n self.add_error(\n named_entitity_rule[\"message\"],\n self.report.get_words_position(named_entity.words),\n )", "def process_name(name):\n def getnames_form3(a):\n \"\"\"\n Case with two commas: the name is of the format\n von Last, Jr, First\n like in: von Hicks, III, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[2].strip()\n junior = a[1].strip()\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def getnames_form2(a):\n \"\"\"\n Case with one comma: the name is of the format\n von Last, First\n like in: von Hicks, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[1].strip()\n junior = ''\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior]\n\n def getnames_form1(a):\n \"\"\"\n Case with NO commas: the name is of the format\n First von Last\n like in: Michael von Hicks\n \"\"\"\n last = a[0].split(' ')\n nfn = 0\n for l in last:\n if l != \"\" and not l[0].islower():\n nfn += 1\n else:\n break\n if nfn == len(last):\n nfn = -1\n\n full_first = ' '.join(last[:nfn])\n full_first = full_first.replace('.', ' ')\n full_last = ' '.join(last[nfn:])\n junior = \" \"\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def get_vonlast(full_last):\n von = \"\"\n last = \"\"\n\n for l in full_last.split(' '):\n if len(l) > 0 and l[0].islower():\n von += l.lower() + \" \"\n else:\n last += l + \" \"\n return von, last\n\n # Start the processing\n a = name.split(',')\n if len(a) == 3:\n fullname = getnames_form3(a)\n elif len(a) == 2:\n fullname = getnames_form2(a)\n elif len(a) == 1:\n fullname = getnames_form1(a)\n else:\n fullname = []\n\n return fullname", "def format_name(first_name, last_name, middle_name=\"\"):\r\n if middle_name:\r\n full_name = first_name + \" '\" + middle_name + \"' \" + last_name\r\n else:\r\n full_name = first_name + \" \" + last_name\r\n return full_name.title()", "def _name_changed ( self, name ):\n self.name_last = parse_name( name )[-1]\n self.inputs_changed()", "def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name", "def formatted_name(first_name, last_name, middle_name = ''):\n full_name = first_name + ' ' + middle_name + ' ' + last_name\n return full_name.title()", "def test_last_name_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_last_name(input_val)\n self.assertEqual(output_val, self.line.last_name)", "def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])", "def confirm_name_change(user, pending_name_change):\n user_profile = _UserProfile.objects.get(user=user)\n\n # Store old name in profile metadata\n meta = user_profile.get_meta()\n if 'old_names' not in meta:\n meta['old_names'] = []\n meta['old_names'].append(\n [user_profile.name, pending_name_change.rationale, datetime.datetime.now(UTC).isoformat()]\n )\n user_profile.set_meta(meta)\n\n user_profile.name = pending_name_change.new_name\n user_profile.save()\n pending_name_change.delete()", "def test_contributor_name(self):\n user = User.objects.create(username='admin', first_name='Jordan',\n last_name='Wirfs-Brock')\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline,\n author=user)\n self.assertEqual(story.contributor_name, 'Jordan W.')", "def test_contributor_name(self):\n user = User.objects.create(username='admin', first_name='Jordan',\n last_name='Wirfs-Brock')\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline,\n author=user)\n self.assertEqual(story.contributor_name, 'Jordan W.')", "def unify_profile_name(first_name: str, last_name: str):\n concat_title = first_name + \" \" + last_name\n # Strip leading and trailing spaces and then replace double white space two times\n # (3 -> 2 -> 1)\n concat_title = concat_title.strip().replace(\" \", \" \"). replace(\" \", \" \")\n\n # The unified title is again the lowercase version without spaces\n unified_title = concat_title.replace(\" \", \"\").lower()\n unified_title = re.sub('[-_.,:;\\|/\\{\\}\\(\\)\\[\\]\\'\\\"\\+]','', unified_title)\n trimmed_unified_title = unified_title[:150]\n return trimmed_unified_title, concat_title", "def _username_from_name(self, name):\r\n return name.replace(' ', '_')", "def audit_emails_in_metadata(self):\n\n # Iterate over commits....\n disallowed_domains = [\"localhost\", \"localhost.localdomain\", \"(none)\", \"bombardier.com\", \"rail.bombardier.com\"]\n for commit in self.repository.commits.values():\n for email_address in [ commit.committer_email, commit.author_email ]:\n # Extract the email address, and reject them if extraction fails....\n extraction = re.match(\"^(\\S+)@(\\S+)$\", email_address)\n if not extraction:\n self.__log_failure(commit.sha1, \"Seemingly invalid email address: \" + email_address)\n continue\n\n # Don't allow domains which are disallowed...\n domain = extraction.group(2)\n if domain in disallowed_domains:\n self.__log_failure(commit.sha1, \"Email address using a blocked domain: \" + email_address)\n continue\n\n # Ensure they have a valid MX/A entry in DNS....\n try:\n dns.resolver.query(domain, \"MX\")\n except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel):\n try:\n dns.resolver.query(domain, \"A\")\n except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN):\n self.__log_failure(commit.sha1, \"Email address has an invalid domain : \" + email_address)\n except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers):\n self.__log_failure(commit.sha1, \"Email address has an invalid domain : \" + email_address)", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def last_name(self, name):\n self._last_name = name", "def get_formatted_name(first_name,last_name):\n\tfull_name=first_name+ ' ' +last_name\n\treturn full_name.title()", "def to_db2_submitting_name(name_json):\n if name_json.get('businessName'):\n return str(name_json.get('businessName')).strip().upper()[0:40]\n ind_name = name_json.get('personName')\n db2_name = str(ind_name['first']).strip().upper() + ' '\n last_name = str(ind_name['last']).strip().upper()\n if (len(db2_name) + len(last_name)) < 40 and ind_name.get('middle'):\n middle_name = str(ind_name['middle']).strip().upper()\n if (len(db2_name) + len(middle_name) + len(last_name)) < 40:\n db2_name += middle_name + ' '\n db2_name += last_name\n return db2_name[:40]", "def get_formatted_name(first, last):\n\tfull_name = first + ' ' + last\n\treturn full_name.title()", "def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)", "def full_name(first_name, last_name):\n\t\n\treturn first_name + \" \" + last_name", "def test_first_last(self):\n\n full_name = get_full_name(\"pony\", \"cat\")\n self.assertEqual(full_name, \"Pony Cat\")\n\n full_name = get_full_name(\"goat\", \"cat\")\n self.assertEqual(full_name, \"Goat Cat\")", "def get_formatted_name(first_name, last_name): \r\n full_name = f\"{first_name} {last_name}\"\r\n return full_name.title()", "def fix_name_wiki(artist):\n if \"Alonzo Cano\" in artist:\n return \"Alonso Cano\"\n if \"Michelangelo\" in artist:\n return \"Michelangelo Buonarroti\"\n return artist", "def full_name(self,first_name):\n full_name = self.first_name + ' ' + self.last_name\n return full_name", "async def name(self, ctx:utils.Context, *, username:str):\n\n if len(username) > 32:\n await ctx.send('That username is too long.')\n return\n await self.bot.user.edit(username=username)\n await ctx.send('Done.')", "def get_formatted_name(first_name,last_name):\n full_name= first_name + \" \"+last_name\n return full_name.title()", "def set_owner_name(self, data, **kwargs):\n try:\n git_url = GitURL.parse(data[\"git_url\"])\n except UnicodeError as e:\n raise ValidationError(\"`git_url` contains unsupported characters\") from e\n except ConfigurationError as e:\n raise ValidationError(\"Invalid `git_url`\") from e\n\n if git_url.owner is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"owner\"] = git_url.owner\n\n if git_url.name is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"name\"] = git_url.name\n data[\"slug\"] = normalize_to_ascii(data[\"name\"])\n\n return data", "async def name(self, ctx, *, name):\n # [p]set name <name>\n\n name = name.strip()\n if name != \"\":\n try:\n await self.bot.edit_profile(username=name)\n except:\n await self.bot.say(\"Failed to change name. Remember that you\"\n \" can only do it up to 2 times an hour.\"\n \"Use nicknames if you need frequent \"\n \"changes. {}set nickname\".format(ctx.prefix))\n else:\n await self.bot.say(\"Done.\")\n else:\n await send_command_help(ctx)", "def test_contributor_name_no_names(self):\n user = User.objects.create(username='admin')\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline,\n author=user)\n self.assertEqual(story.contributor_name, 'admin')", "def format_rapp_resp_name(names):\n if names is not None:\n #remove trailing \"'\"\n if names[-1]==\"'\":\n names=names[:-1]\n #change name format: \"Firstname LASTNAME\" -> \"LASTNAME Firstname\"\n names=names.split()\n first_name=last_name=\"\"\n for name in names:\n #get last names\n if name.isupper():\n last_name+=name+\" \"\n #get first names\n else:\n first_name+=name+\" \"\n\n names=last_name+first_name[:-1]\n\n return names", "def full_name(self) -> str:\r\n\t\tname = f'{self.last_name} {self.first_name}'\r\n\t\tif self.middle_name:\r\n\t\t\tname += ' ' + self.middle_name\r\n\t\treturn name", "def account_name(self):\n\n name1 = self.business_trade_name\n name2 = self.business_name\n\n if not name1 and not name2:\n return 'NAME MISSING - ' + self.license_number\n elif name1 and not name2:\n return name1\n elif name2 and not name1:\n return name2\n else:\n return name1 + ' (' + name2 + ')'", "def parse_name(first_name, last_name):\n\n return first_name + \" \" + last_name", "def split_name(fullname):", "def update_firstname(state: UserCreate, firstname: str) -> None:\n state.name.first = firstname\n state.slug = slugify(f\"super-user: {state.name.first} {state.name.last}\")", "def initialled_name(obj):\n initials = ''.join([name[0] for name in obj.first_names.split(' ')])\n return \"{}, {}\".format(obj.last_names, initials)", "def extract_names_from_metadata_sheet(self):\n\n # parse all of the names (not orgs) and add them to a counter\n names_counter = Counter()\n with open(METADATA_CSV, encoding='utf-8') as file:\n csv_file = csv.DictReader(file)\n\n for line in csv_file:\n for element in ['author', 'recipients', 'cced']:\n for person_or_org in [p.strip() for p in line[element].split(';')]:\n # if at least a comma -> most likely a person\n if len(person_or_org.split(',')) > 1:\n names_counter[person_or_org] += 1\n\n # for each element in the counter, add them to the people set.\n for name in names_counter:\n self.people.add(Person(name_raw=name, count=names_counter[name], aliases=[name]))\n self.merge_all_duplicates()", "def expected_entity_names(emplacement,\n association,\n composition,\n violation,\n membership_organization,\n membership_person):\n return [\n emplacement[0].site.get_value().value.name,\n association[0].area.get_value().value.name,\n composition[0].child.get_value().value.name.get_value().value,\n truncatewords(violation.description.get_value(), 10),\n membership_organization.organization.get_value().value.name.get_value().value,\n membership_person[0].member.get_value().value.name.get_value().value\n ]", "def test_check_metadata_matches_nametable(self):\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n for font_metadata in fm.fonts:\n ttfont = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)\n\n report = '%s: Family name was supposed to be \"%s\" but is \"%s\"'\n report = report % (font_metadata.name, fm.name,\n ttfont.familyname)\n self.assertEqual(ttfont.familyname, fm.name, report)\n self.assertEqual(ttfont.fullname, font_metadata.full_name)", "async def namegen(self, ctx):\n genname = names.get_full_name()\n await say(ctx, f\":abc: - Your generated name is ``{genname}``! They live at ``{fake.address()}``.\")", "def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()", "def get_formatted_name(first, last):\n full_name = first + ' ' + last\n return full_name.title()", "def test_change_name_post_request(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': 'waqas',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n user = UserProfile.objects.get(user=self.student.id)\r\n meta = json.loads(user.meta)\r\n self.assertEquals(user.name, 'waqas')\r\n self.assertEqual(meta['old_names'][0][1], 'change identity')\r\n self.assertTrue(response_data['success'])", "def _log_changed_names(changed_names: Iterable[Tuple[str, str]]) -> None:\n if not changed_names:\n return\n from .utils import logger\n\n logger.warning(\"New names:\")\n for orig_name, new_name in changed_names:\n logger.warning(\"* %r -> %r\", orig_name, new_name)", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )", "def test_duplicate_name_refs(renderer):\n assert renderer.name_ref(User.age) == renderer.name_ref(User.age) == \"#n0\"", "def commit_names(self, commit):\n return []" ]
[ "0.62963563", "0.6226209", "0.6223605", "0.6148646", "0.6140524", "0.61402285", "0.6099476", "0.60691816", "0.6056856", "0.6056856", "0.6024149", "0.6016685", "0.5969354", "0.5957472", "0.5951923", "0.5932206", "0.5932206", "0.5906392", "0.59056324", "0.589487", "0.58702075", "0.5865046", "0.5865046", "0.5857882", "0.5834754", "0.5789346", "0.57806265", "0.5774827", "0.57658744", "0.575959", "0.57590365", "0.5707801", "0.56984067", "0.56901383", "0.5689867", "0.56658316", "0.56619555", "0.56558216", "0.56163645", "0.55919147", "0.5589174", "0.55814195", "0.5577876", "0.5566218", "0.55657923", "0.5543422", "0.5528703", "0.55200076", "0.55077815", "0.5505791", "0.5505693", "0.5505358", "0.54988235", "0.5498595", "0.54897594", "0.5488915", "0.5482248", "0.5480699", "0.5480358", "0.54769206", "0.54680145", "0.54680145", "0.5460632", "0.54571223", "0.54565376", "0.5454895", "0.54482025", "0.5445054", "0.54425925", "0.5436634", "0.5429443", "0.54291666", "0.5421788", "0.5410421", "0.5407113", "0.5402416", "0.539863", "0.53981566", "0.5398058", "0.5387605", "0.5386874", "0.53836334", "0.5380802", "0.5375221", "0.5368565", "0.53605586", "0.535806", "0.53533536", "0.5346589", "0.53462976", "0.53455216", "0.5344478", "0.5341528", "0.5339894", "0.5334183", "0.5333517", "0.53331095", "0.5332314", "0.5328704", "0.5327812" ]
0.8450045
0
Audit commit metadata. Invalid hostnames such as localhost or (none) will be caught by this auditor. This will ensure that invalid email addresses or users will not show up in commits.
def audit_emails_in_metadata(self): # Iterate over commits.... disallowed_domains = ["localhost", "localhost.localdomain", "(none)", "bombardier.com", "rail.bombardier.com"] for commit in self.repository.commits.values(): for email_address in [ commit.committer_email, commit.author_email ]: # Extract the email address, and reject them if extraction fails.... extraction = re.match("^(\S+)@(\S+)$", email_address) if not extraction: self.__log_failure(commit.sha1, "Seemingly invalid email address: " + email_address) continue # Don't allow domains which are disallowed... domain = extraction.group(2) if domain in disallowed_domains: self.__log_failure(commit.sha1, "Email address using a blocked domain: " + email_address) continue # Ensure they have a valid MX/A entry in DNS.... try: dns.resolver.query(domain, "MX") except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel): try: dns.resolver.query(domain, "A") except (dns.resolver.NoAnswer, dns.exception.Timeout, dns.name.EmptyLabel, dns.resolver.NXDOMAIN): self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address) except (dns.resolver.NXDOMAIN, dns.resolver.NoNameservers): self.__log_failure(commit.sha1, "Email address has an invalid domain : " + email_address)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue", "def lint_commit_author(commit):\n success = True\n if commit.author.email.endswith('users.noreply.github.com'):\n error(\n 'Commit author has no valid email address set: %s. '\n 'Use \"git config user.email [email protected]\" to '\n 'set a valid email address, then update the commit '\n 'with \"git rebase -i\" and/or '\n '\"git commit --amend --reset-author\". '\n 'Also check your GitHub settings at '\n 'https://github.com/settings/emails: your email address '\n 'must be verified, and the option \"Keep my email address '\n 'private\" must be disabled.' % (commit.author.email, ), commit)\n success = False\n\n if ' ' not in commit.author.name:\n warning(\n 'The commit author name \"%s\" contains no space. '\n 'Use \"git config user.name \\'Johnny English\\'\" to '\n 'set your real name, and update the commit with \"git rebase -i \" '\n 'and/or \"git commit --amend --reset-author\".' %\n (commit.author.name, ), commit)\n # A warning doesn't fail lint.\n\n return success", "def prepare_commit(self, commit):\n header = yaml.dump(commit.meta, default_flow_style=False)\n header += \"---\\n\"\n if commit.value is None:\n return bytes(header)\n else:\n return bytes(header) + bytes(commit.value)", "def get_commit_change_stats(self, commit_url='', full_name='', commit_sha=''):\n if commit_url == '' and (commit_sha == '' and full_name == ''):\n raise BaseException('commit url could not be generated. Commit url, commit sha and full name not set')\n return None\n url = commit_url\n if url == '':\n url = COMMIT_DETAILS.format(commit_sha=commit_sha, full_name=full_name)\n url = self.get_full_url(url)\n\n json_data = loads(self.get_from_net(url))\n stats = {'additions': 0, 'deletions': 0}\n if 'stats' in json_data:\n stats['additions'] = json_data['stats']['additions']\n stats['deletions'] = json_data['stats']['deletions']\n\n return stats", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def commit_names(self, commit):\n return []", "def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)", "def _get_commit_info(commit: git.Commit, pretty_format: str) -> str:\n try:\n return commit.repo.git.show(commit.hexsha, pretty=f\"format:{pretty_format}\")\n except git.GitCommandError as error:\n raise PackitException(\n f\"Cannot find commit {commit.hexsha!r} to check its signature.\", error\n )", "def _trusted_commit(\n self, committer_id, commit_type, commit_message, commit_cmds):\n base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access\n self, committer_id, commit_type, commit_message, commit_cmds)\n\n # Create and delete events will already be recorded in the\n # ExplorationModel.\n if commit_type not in ['create', 'delete']:\n exp_models.ExplorationCommitLogEntryModel(\n id=('rights-%s-%s' % (self.id, self.version)),\n user_id=committer_id,\n exploration_id=self.id,\n commit_type=commit_type,\n commit_message=commit_message,\n commit_cmds=commit_cmds,\n version=None,\n post_commit_status=self.status,\n post_commit_community_owned=self.community_owned,\n post_commit_is_private=(\n self.status == constants.ACTIVITY_STATUS_PRIVATE)\n ).put()", "def test_commit_author(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n author = pygit2.Signature(\"Katherine\", \"[email protected]\")\n repository.commit(message=\"empty\", author=author)\n\n head = repository.head.commit\n assert author.name == head.author.name and author.email == head.author.email", "def format_commit_header(commit):\n\n result = {\n \"repository\": commit[0][0],\n \"published\": commit[0][1],\n \"author\": commit[0][2],\n \"description\": commit[0][5],\n \"commit\": commit[0][6],\n \"timestamp\": commit[0][7]\n }\n return result", "def make_log_entries(commits, git_repo):\n entries = []\n # Add header\n author = git_repo.get_author_info()\n entries.append(\"* %s %s <%s> %s\" % \\\n (datetime.datetime.now().strftime(\"%a %b %d %Y\"),\n author.name, author.email, get_version(git_repo,\n commits[0])))\n for commit in commits:\n commit_info = git_repo.get_commit_info(commit)\n entries.append(\"- %s\" % commit_info[\"subject\"])\n return entries", "def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors", "def FakeCommitAsDict(commit_self):\n git_hash = commit_self.git_hash\n n = git_hash[len('git_hash_'):]\n return {\n 'repository': 'chromium',\n 'git_hash': git_hash,\n 'url': 'https://example.com/repository/+/' + git_hash,\n 'author': 'author%[email protected]' % (n,),\n 'subject': 'Subject.',\n 'message': 'Subject.\\n\\nCommit message.',\n }", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def test_git_commits(self):\n event_id = dog.Event.create(title=\"Testing git commits\", text=\"\"\"$$$\n eac54655 * Merge pull request #2 from DataDog/alq-add-arg-validation ([email protected])\n |\\\n 760735ef | * origin/alq-add-arg-validation Simple typecheck between metric and metrics ([email protected])\n |/\n f7a5a23d * missed version number in docs ([email protected])\n $$$\"\"\", event_type=\"commit\", source_type_name=\"git\", event_object=\"0xdeadbeef\")['event']['id']\n event = self.get_event_with_retry(event_id)\n self.assertEqual(event['event']['title'], \"Testing git commits\")", "def commit(self, msg):\n self.runtime.logger.info('Commit config: {}'.format(msg))\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"add\", \".\"])\n exectools.cmd_assert([\"git\", \"commit\", \"--allow-empty\", \"-m\", msg])", "def _trusted_commit(\n self, committer_id, commit_type, commit_message, commit_cmds):\n base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access\n self, committer_id, commit_type, commit_message, commit_cmds)\n\n # Create and delete events will already be recorded in the\n # CollectionModel.\n if commit_type not in ['create', 'delete']:\n collection_models.CollectionCommitLogEntryModel(\n id=('rights-%s-%s' % (self.id, self.version)),\n user_id=committer_id,\n collection_id=self.id,\n commit_type=commit_type,\n commit_message=commit_message,\n commit_cmds=commit_cmds,\n version=None,\n post_commit_status=self.status,\n post_commit_community_owned=self.community_owned,\n post_commit_is_private=(\n self.status == constants.ACTIVITY_STATUS_PRIVATE)\n ).put()", "def test_host_file_audit(host):\n with host.sudo():\n host.run(\"touch /etc/hosts\")\n audit_log = host.run(\"journalctl -u auditd --since \\\"10 seconds ago\\\" | grep \\\"/etc/hosts\\\"\")\n assert audit_log.stdout", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def _helperAuditMetadata(syn,temp,metaDf,refCol,cols2Check,fileExts,\n entityMissMetadata,incorrectAnnotated,missingAnno):\n \n print \"Checking annotations against metadata...\"\n tempDict = temp.annotations\n tempId = temp.id\n exts = ')|('.join(fileExts)\n exts = r'(' + exts + ')'\n tempName = re.sub(exts,\"\",temp.name)\n \n if bool(tempDict):\n row = metaDf.loc[metaDf[refCol] == tempName]\n if row.empty:\n entityMissMetadata.append(tempId)\n print \"missing metadata\"\n else:\n for colName in cols2Check:\n print \">%s checking...\" % colName\n if colName in tempDict.keys():\n if map(str,row[colName])[0] != temp[colName][0]:\n if colName in incorrectAnnotated.keys():\n incorrectAnnotated[colName].append(tempId)\n else:\n incorrectAnnotated[colName] = [tempId]\n print \">>incorrect\"\n else:\n print \">>Passed!\"\n else:\n if colName in missingAnno.keys():\n missingAnno[colName].append(tempId)\n else:\n missingAnno[colName] = [tempId]\n print \">>missing\"\n print \"\"", "def _maybe_set_name(self) -> None:\n if not self.name:\n if isinstance(self.github, dict):\n if self.github.get(\"commit\"):\n self.name = f\"{self.reason}: {self.github['commit']}\"", "def test_commit_committer(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n committer = pygit2.Signature(\"Katherine\", \"[email protected]\")\n repository.commit(message=\"empty\", committer=committer)\n\n head = repository.head.commit\n assert (\n committer.name == head.committer.name\n and committer.email == head.committer.email\n )", "def _clean_commit(self, line):\n cleaned_line = {\n 'repo': line['origin'],\n 'hash': line['data_commit'],\n 'author': line['data_Author'],\n 'category': \"commit\",\n 'created_date': utils.str_to_dt_data(line['data_AuthorDate']),\n 'commit': line['data_Commit'],\n 'commit_date': utils.str_to_dt_data(line['data_CommitDate']),\n 'files_no': len(line['data_files']),\n 'refs': line['data_refs'],\n 'parents': line['data_parents'],\n 'files': line['data_files']\n }\n\n actions = 0\n for file in line['data_files']:\n if 'action' in file:\n actions += 1\n cleaned_line['files_action'] = actions\n\n try:\n non_merge = math.isnan(line['data_Merge'])\n\n except (TypeError, KeyError):\n non_merge = False\n\n cleaned_line['merge'] = not non_merge\n return cleaned_line", "def svn_client_commit_info_t_author_set(svn_client_commit_info_t_self, char_author): # real signature unknown; restored from __doc__\n pass", "def sanitize_author(name, email):\n # deal with inconsistent email addresses/names in commits.\n # feel free to fill this method out.\n return name", "def _trusted_commit(\n self, committer_id, commit_type, commit_message, commit_cmds):\n base_models.VersionedModel._trusted_commit( # pylint: disable=protected-access\n self, committer_id, commit_type, commit_message, commit_cmds)\n\n topic_rights = MockTopicRightsModel.get_by_id(self.id)\n if topic_rights.topic_is_published:\n status = constants.ACTIVITY_STATUS_PUBLIC\n else:\n status = constants.ACTIVITY_STATUS_PRIVATE\n\n topic_models.TopicCommitLogEntryModel(\n id=('rights-%s-%s' % (self.id, self.version)),\n user_id=committer_id,\n topic_id=self.id,\n commit_type=commit_type,\n commit_message=commit_message,\n commit_cmds=commit_cmds,\n version=None,\n post_commit_status=status,\n post_commit_community_owned=False,\n post_commit_is_private=not topic_rights.topic_is_published\n ).put()", "def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def _generate_commit(\n self, msg: Optional[str] = None, author: Optional[str] = None\n ) -> dict:\n if author:\n mes_author = author\n else:\n mes_author = self._author\n if not msg:\n msg = f\"Commit via python client {__version__}\"\n ci = {\"commit_info\": {\"author\": mes_author, \"message\": msg}}\n return ci", "def commit_msg(self) -> Optional[List[str]]:\n return self._yaml[\"commit\"].get(\"message\")", "def test_blog_manual_commit():", "def check_commit_msg(commitish):\n\n hdr = CommitSubHeader()\n line_list = dump_raw_body(commitish)\n\n if COMMIT_MESSAGE_CHECK and line_list[1] != \"\":\n if line_list[1].find('REF: ') == -1:\n add_error(\"Summary field must have just one line in %s\" % commitish)\n else:\n add_error(\"No empty line after Summary field in %s\" % commitish)\n\n if COMMIT_MESSAGE_CHECK and len(line_list[0]) < 5 or len(line_list[0]) > 78:\n add_error(\"Wrong size (%d) of Summary field in %s\" % (len(line_list[0]), commitish))\n\n while len(line_list) != 0:\n line = line_list.pop(0)\n\n if line.find('REF: ') == 0:\n if hdr.ref == None:\n hdr.ref = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'REF:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['REF: '].match(line[len('REF: '):]):\n add_error(\"Wrong field 'REF:' in %s\" % commitish)\n else:\n hdr.ref = line[len('REF: '):]\n\n elif line.find('Signed-off-by: ') == 0:\n if hdr.signed == None:\n hdr.signed = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'Signed-off-by:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['Signed-off-by: '].match(line[len('Signed-off-by: '):]):\n add_error(\"Wrong field 'Signed-off-by:' in %s\" % commitish)\n else:\n hdr.signed = line[len('Signed-off-by: '):]\n\n elif len(line) != 0:\n hdr.desc = 1\n if COMMIT_MESSAGE_CHECK and len(line) > 78:\n add_error(\"Wrong size (%d) of field 'Description' in %s\" % (len(line), commitish))\n\n if COMMIT_MESSAGE_CHECK and hdr.ref == None:\n add_error(\"No field 'REF:' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.desc == None:\n add_error(\"No field 'Description' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.signed == None:\n add_error(\"No field 'Signed-off-by:' in %s\" % commitish)\n\n return hdr", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def current_commit():\n prepare_metrics(lambda: Metric('robot_commit', get_current_git_sha(), {'region': REGION_NAME}))", "def lint_commit_message(commit):\n success = True\n lines = commit.message.splitlines()\n\n # Check length of summary line.\n summary_line_len = len(lines[0])\n if summary_line_len > COMMIT_MSG_MAX_SUMMARY_LEN:\n error(\n \"The summary line in the commit message is %d characters long; \"\n \"only %d characters are allowed.\" %\n (summary_line_len, COMMIT_MSG_MAX_SUMMARY_LEN), commit)\n success = False\n\n # Check that summary line does not end with a period\n if lines[0].endswith('.'):\n error(\"The summary line must not end with a period.\", commit)\n success = False\n\n # Check that we don't have any fixups.\n if lines[0].startswith('fixup!'):\n error(\"Fixup commits are not allowed. Please resolve by rebasing.\",\n commit)\n success = False\n\n # Try to determine whether we got an area prefix in the commit message:\n summary_line_split = lines[0].split(':')\n summary_line_split_len = len(summary_line_split)\n\n # We didn't get an area prefix, so just make sure the message started with a\n # capital letter.\n if summary_line_split_len == 1:\n if not re.match(r'[A-Z]', lines[0]):\n error(\"The summary line must start with a capital letter.\", commit)\n success = False\n # The user specified an area on which she worked.\n elif summary_line_split_len == 2:\n if not re.match(r'[a-z_A-Z\\-]*(/[a-z_A-Z\\-]+)*', summary_line_split[0]):\n error(\n 'The area specifier is mal-formed. Only letters,'\n 'underscores and hyphens are allowed. Different areas must be'\n 'separated by a slash.', commit)\n success = False\n # Check the second part of the commit message.\n if not summary_line_split[1].startswith(' '):\n error(\"The area must be separated by a single space.\", commit)\n success = False\n if not re.match(r'\\s[A-Z]', summary_line_split[1]):\n error(\n \"The summary line after the colon must start with a capital letter.\",\n commit)\n success = False\n # We do not allow more than one area i.e., colon.\n else:\n error(\"Only one colon is allowed to specify the area of changes.\",\n commit)\n success = False\n\n # Check for an empty line separating the summary line from the long\n # description.\n if len(lines) > 1 and lines[1] != \"\":\n error(\n \"The second line of a commit message must be empty, as it \"\n \"separates the summary from the long description.\", commit)\n success = False\n\n return success", "def test_commit(self):\n # TODO: Test errors while committing and recovery\n pass", "def author_committer_facts(model, date):\n return dict(\n author=model.developer,\n author_date=date,\n committer=model.developer,\n commit_date=date,\n )", "def test_git_ident(self):\r\n git_export_utils.export_to_git(\r\n self.course.id,\r\n 'file://{0}'.format(self.bare_repo_dir),\r\n 'enigma'\r\n )\r\n expect_string = '{0}|{1}\\n'.format(\r\n git_export_utils.GIT_EXPORT_DEFAULT_IDENT['name'],\r\n git_export_utils.GIT_EXPORT_DEFAULT_IDENT['email']\r\n )\r\n cwd = os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR / 'test_bare')\r\n git_log = subprocess.check_output(['git', 'log', '-1',\r\n '--format=%an|%ae'], cwd=cwd)\r\n self.assertEqual(expect_string, git_log)\r\n\r\n # Make changes to course so there is something to commit\r\n self.populate_course()\r\n git_export_utils.export_to_git(\r\n self.course.id,\r\n 'file://{0}'.format(self.bare_repo_dir),\r\n self.user.username\r\n )\r\n expect_string = '{0}|{1}\\n'.format(\r\n self.user.username,\r\n self.user.email,\r\n )\r\n git_log = subprocess.check_output(\r\n ['git', 'log', '-1', '--format=%an|%ae'], cwd=cwd)\r\n self.assertEqual(expect_string, git_log)", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def audit_eol(self):\n\n # Regex's....\n re_commit = re.compile(\"^\\xff(.+)\\xff$\")\n re_filename = re.compile(\"^diff --(cc |git a\\/.+ b\\/)(.+)$\")\n blocked_eol = re.compile(r\"(?:\\r\\n|\\n\\r|\\r)$\")\n\n # Bool to allow special files such as vcards to bypass the check\n eol_allowed = False\n\n\n # Do EOL audit!\n process = get_change_diff( self.repository, [\"-p\"] )\n for line in process.stdout:\n commit_change = re.match( re_commit, line )\n if commit_change:\n commit = commit_change.group(1)\n continue\n\n file_change = re.match( re_filename, line )\n if file_change:\n filename = file_change.group(2)\n eol_violation = False\n eol_allowed = False\n\n # Check if it's an allowed mimetype\n # First - check with the mimetypes system, to see if it can tell\n guessed_type, _ = mimetypes.guess_type(filename)\n if guessed_type in self.ALLOWED_EOL_MIMETYPES:\n eol_allowed = True\n continue\n\n # Second check: by file extension\n # NOTE: This uses the FIRST dot as extension\n splitted_filename = filename.split(os.extsep)\n # Check if there's an extension or not\n # NOTE This assumes that files use dots for extensions only!\n if len(splitted_filename) > 1:\n extension = splitted_filename[1]\n if extension in self.ALLOWED_EOL_EXTENSIONS:\n eol_allowed = True\n\n continue\n\n # Unless they added it, ignore it\n if not line.startswith(\"+\"):\n continue\n\n if re.search( blocked_eol, line ) and not eol_violation:\n # Is this an allowed filename?\n if eol_allowed:\n continue\n\n # Failure has been found... handle it\n eol_violation = True\n self.__log_failure(commit, \"End of Line Style (non-Unix): \" + filename);", "def svn_client_commit_info_t_author_get(svn_client_commit_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def test_parse_changesets_emails(self):\n \n self.assertEqual(bitchangesets.parse_changeset(self.changeset), {'timestamp': '2013-07-27 01:56:46', 'parsed_author': 'David Leonard'})", "def commit_hash(self):\n return self._commit_hash", "def test_parse_changesets_no_emails(self):\n \n self.assertEqual(bitchangesets.parse_changeset(self.changeset2), {'timestamp': '2013-07-27 01:56:46', 'parsed_author': 'David Leonard'})", "def lint(self, commit):\n LOG.debug(\"Linting commit %s\", commit.sha or \"[SHA UNKNOWN]\")\n LOG.debug(\"Commit Object\\n\" + str(commit))\n\n # Ensure the Deprecation class has a reference to the config currently being used\n Deprecation.config = self.config\n\n # Apply config rules\n for rule in self.configuration_rules:\n rule.apply(self.config, commit)\n\n # Skip linting if this is a special commit type that is configured to be ignored\n ignore_commit_types = [\"merge\", \"squash\", \"fixup\", \"fixup_amend\", \"revert\"]\n for commit_type in ignore_commit_types:\n if getattr(commit, f\"is_{commit_type}_commit\") and getattr(self.config, f\"ignore_{commit_type}_commits\"):\n return []\n\n violations = []\n # determine violations by applying all rules\n violations.extend(self._apply_line_rules([commit.message.title], commit, self.title_line_rules, 1))\n violations.extend(self._apply_line_rules(commit.message.body, commit, self.body_line_rules, 2))\n violations.extend(self._apply_commit_rules(self.commit_rules, commit))\n\n # Sort violations by line number and rule_id. If there's no line nr specified (=common certain commit rules),\n # we replace None with -1 so that it always get's placed first. Note that we need this to do this to support\n # python 3, as None is not allowed in a list that is being sorted.\n violations.sort(key=lambda v: (-1 if v.line_nr is None else v.line_nr, v.rule_id))\n return violations", "def commit_history(cli):\n result = []\n record = OrderedDict()\n for line in cli.splitlines():\n r = re.search(' ([A-Z][a-z]+(?: ID)?): (.*?) +([A-Z][a-z]+): (.*)', line)\n if not r:\n continue\n record[r.group(1)] = r.group(2)\n record[r.group(3)] = r.group(4)\n if r.group(3) == 'Comment':\n result.append(record)\n record = OrderedDict()\n return result", "def test_info(config):\n conventional_commits = ConventionalCommitsCz(config)\n info = conventional_commits.info()\n assert isinstance(info, str)", "def validateMetadata(self, cur, hist):\n raise NotImplementedError(\"missing validateMetadata() method\")", "def _is_commit_sha(commit):\n return len(commit) == 40 and all([\n ch.isdigit() or (ch >= \"a\" and ch <= \"f\")\n for ch in commit.elems()\n ])", "async def changelog(self, ctx: commands.Context):\n status, commits = GitHub().repos.harkonenbade.yutu.commits.get(per_page=10)\n if status == 200:\n await ctx.send(content=\"```Changelog:\\n{}```\".format(\"\\n\".join([\"- {}\".format(c['commit']['message'])\n for c in commits])))\n else:\n await ctx.send(content=\"Error: Cannot reach github\")", "def __create_audit_alerts():\n\n # Create a log-based metric to count all calls to SetIamPolicy:\n metric1_name = \"iam-policy-change\"\n run_command('gcloud logging metrics create {} --description=\"Count of IAM policy changes.\" --project={} --log-filter=\"\\\n resource.type=project AND \\\n protoPayload.serviceName=cloudresourcemanager.googleapis.com AND \\\n protoPayload.methodName=SetIamPolicy\"'.format(metric1_name, PROJECT_ID))\n\n # Create a log-based metric to count all calls to setIamPermissions or storage.objects.update on GCS buckets:\n metric2_name = \"bucket-permission-change\"\n run_command('gcloud logging metrics create {} --description=\"Count of GCS permission changes.\" --project={} --log-filter=\"\\\n resource.type=gcs_bucket AND \\\n protoPayload.serviceName=storage.googleapis.com AND \\\n (protoPayload.methodName=storage.setIamPermissions OR protoPayload.methodName=storage.objects.update)\"'\n .format(metric2_name, PROJECT_ID))\n\n # Create a log-based metric to count unexpected accesses to the data bucket:\n metric3_name = \"unexpected-bucket-access-{}\".format(DATA_BUCKET_ID)\n logFilter = 'resource.type=gcs_bucket AND \\\n logName=projects/{}/logs/cloudaudit.googleapis.com%2Fdata_access AND \\\n protoPayload.resourceName=projects/_/buckets/{} AND \\\n protoPayload.authenticationInfo.principalEmail!=({})'\\\n .format(PROJECT_ID, DATA_BUCKET_ID, WHITELIST_USERS)\n\n run_command('gcloud logging metrics create {} \\\n --description=\\\"Count of unexpected data access to {}.\\\" \\\n --project={} --log-filter=\\\"{}\\\"'.format(metric3_name, DATA_BUCKET_ID, PROJECT_ID, logFilter))\n\n # Create an email notification channel. Refer to https://cloud.google.com/monitoring/support/notification-options\n notification_channel_name = __create_notification_channel()\n\n # There is a lag between when log-based metrics are created and when they become available in Stackdriver.\n # 30 seconds should work, but you may have to adjust it.\n time.sleep(30)\n\n # Create an alert based on metric 1:\n __create_alert_policy (\"global\", metric1_name, notification_channel_name, \"IAM Policy Change Alert\",\n \"This policy ensures the designated user/group is notified when IAM policies are altered.\")\n\n # Create an alert based on metric 2:\n __create_alert_policy(\"gcs_bucket\", metric2_name, notification_channel_name, \"Bucket Permission Change Alert\",\n \"This policy ensures the designated user/group is notified when bucket/object permissions are altered.\")\n\n # Create an alert based on metric 3:\n __create_alert_policy (\"gcs_bucket\", metric3_name, notification_channel_name, \"Unexpected Bucket Access Alert\",\n \"This policy ensures the designated user/group is notified when data bucket is \\\n accessed by an unexpected user.\")", "async def on_message(self, message: Message) -> None:\n if str(message.channel.type) != \"text\":\n self.logger.debug(\"Not text channel\")\n return\n\n if str(message.author.id) not in self.allow_list:\n self.logger.debug(\"Not the mama\")\n return\n\n if not message.content.startswith(\"audit!\"):\n self.logger.debug(\"Not the magic words\")\n return\n\n audit_result: Optional[AuditResults] = None\n\n try:\n command = getattr(self, self.COMMAND_CONFIG[message.content.split()[0]])\n audit_result = await command(message)\n\n except (KeyError, AttributeError):\n pass\n\n if audit_result is not None:\n\n output_names = \"\\n\".join(audit_result.authors)\n output_top = f\"Audit: {audit_result.channel} ({audit_result.channel_id})\\n\"\n output_range = f\"Start: {audit_result.start} - End: {audit_result.end}\\n\"\n output_desc = f\"Of {audit_result.counter} messages the unique names are:\\n\"\n output_msg = f\"{output_top}{output_range}{output_desc}```{output_names}```\"\n\n await message.channel.send(output_msg)", "def summary(self, *, branch: str = '', commit: str = '') -> None:\n self.__verify_repo_initialized()\n try:\n ppbuf = summarize.summary(self._env, branch=branch, commit=commit)\n except ValueError:\n if commiting.number_commits_recorded(self._env.refenv) == 0:\n ppbuf = StringIO()\n ppbuf.write(f'No commits have been made in the repository. \\n')\n ppbuf.write(f'Please make a commit and try again.')\n else:\n raise\n print(ppbuf.getvalue())\n return None", "def test_commit_message_default(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n repository.commit()\n\n head = repository.head.commit\n assert \"\" == head.message", "def test_invalid(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n with pytest.raises(AssertionError):\n key.audit(5, 1, 1, 1)", "def add_commit(self, commit):\n sha1 = commit.hex\n if sha1 in self._commits:\n return self._commits[sha1]\n title, separator, body = commit.message.partition(\"\\n\")\n commit = {\n 'explored': False,\n 'sha1': sha1,\n 'name': GitUtils.abbreviate_sha1(sha1),\n 'describe': GitUtils.describe(sha1),\n 'refs': GitUtils.refs_to(sha1, self.repo()),\n 'author_name': commit.author.name,\n 'author_mail': commit.author.email,\n 'author_time': commit.author.time,\n 'author_offset': commit.author.offset,\n 'committer_name': commit.committer.name,\n 'committer_mail': commit.committer.email,\n 'committer_time': commit.committer.time,\n 'committer_offset': commit.committer.offset,\n # 'message': commit.message,\n 'title': title,\n 'separator': separator,\n 'body': body.lstrip(\"\\n\"),\n }\n self._json['commits'].append(commit)\n self._commits[sha1] = len(self._json['commits']) - 1\n return self._commits[sha1]", "def _set_commit_hash_rules(self, data, gh_link):\n hashes_range = data['hashesRange']\n # If there is no range, it means all commits are vulnerable.\n if not hashes_range:\n data['commitRules'] = '*'\n return data\n gh = GithubUtils()\n # This is needed to get the org and name from the gh link.\n gh_splitted = gh_link.split(\"/\")\n length = len(gh_splitted)\n org = gh_splitted[length - 2]\n name = gh_splitted[length - 1]\n regex_vr = \"[<>=*]+\"\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n rules = \"\"\n for range in hashes_range:\n # Remove any blank spaces.\n range = range.replace(\" \", \"\")\n operands = re.split(regex_vr, range)\n operators = re.split(regex_op, range)\n if len(operators) == 2 and len(operands) == 2:\n # It means there is only 1 condition.\n date = gh._get_commit_date(org, name, self.__format_golang_version(operands[1]))\n if date:\n rules = rules + operators[0] + \"#\" + date + \",\"\n else:\n logger.error(\"No such data present on Github. Contact Snyk.\")\n elif len(operators) == 3 and len(operands) == 3:\n # It means there is a nesting. Ex >x & <y.\n date1 = gh._get_commit_date(org, name, self.__format_golang_version(operands[1]))\n date2 = gh._get_commit_date(org, name, self.__format_golang_version(operands[2]))\n if date1 and date2:\n rules = rules + operators[0] + \"#\" + date1 +\\\n \"&\" + operators[1] + \"#\" + date2 + \",\"\n else:\n logger.error(\"No such data present on Github. Contact Snyk.\")\n else:\n logger.error(\"Incorrect hashesRange data. Contact Snyk.\")\n # Remove extra , which is get appended.\n if rules:\n rules = rules[:-1]\n data['commitRules'] = rules\n return data", "def get_commit_title() -> str:\n git_commit_message = [\"git\", \"log\", \"-n\", \"1\", \"--pretty=format:%s\"]\n process = subprocess.run(git_commit_message, stdout=subprocess.PIPE)\n message = process.stdout.decode('utf-8')\n logging.debug(f\"Git commit title = {message}\")\n return message", "def _audit_cli_args(self):\n\n args = [\n \"--operation=audit\",\n \"--operation=status\",\n \"--logtostderr\",\n ]\n\n return args", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def omit_invalid_hostname(event, hint):\n if 'exc_info' in hint:\n exc_type, exc_value, tb = hint['exc_info']\n if isinstance(exc_value, DisallowedHost):\n return None\n return event", "def set_owner_name(self, data, **kwargs):\n try:\n git_url = GitURL.parse(data[\"git_url\"])\n except UnicodeError as e:\n raise ValidationError(\"`git_url` contains unsupported characters\") from e\n except ConfigurationError as e:\n raise ValidationError(\"Invalid `git_url`\") from e\n\n if git_url.owner is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"owner\"] = git_url.owner\n\n if git_url.name is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"name\"] = git_url.name\n data[\"slug\"] = normalize_to_ascii(data[\"name\"])\n\n return data", "def commit(self) -> None:\n pass", "def commit_hash(self, commit_hash):\n\n self._commit_hash = commit_hash", "def on_commit_comment(self, payload):\n pass", "def _format_commits(self, client, repo, commit_list):\n return [\n {\n 'id': c['id'],\n 'repository': repo.name,\n 'author_email': c['author_email'],\n 'author_name': c['author_name'],\n 'message': c['title'],\n 'timestamp': self.format_date(c['created_at']),\n 'patch_set': self._get_patchset(client, repo, c['id'])\n } for c in commit_list\n ]", "def commit(self, commit_url, github_id):\r\n response = self._send_health_txn(\r\n txn_type='commit',\r\n txn_id=github_id,\r\n data=commit_url,\r\n state='new',\r\n url=self._base_url)\r\n\r\n return response", "def is_commit_signature_valid(self, commit: git.Commit) -> bool:\n commit_status = self.get_commit_signature_status(commit)\n if commit_status in VALID_SIGNATURE_STATUSES:\n logger.debug(f\"Commit {commit.hexsha!r} signature is valid.\")\n return True\n\n logger.warning(f\"Commit {commit.hexsha!r} signature is not valid.\")\n return False", "def get_all_commits(index=\"mms\", elasticHost=\"localhost\", excludeCommits=[]):\n query = {\n \"query\":{\n \"match_all\":{}\n },\n \"size\":\"30\"\n }\n res = requests.post(\"http://{}:9200/{}/commit/_search\".format(elasticHost,index), data=json.dumps(query))\n commitObjectList = {}\n for hit in res.json()[\"hits\"][\"hits\"]:\n if(hit['_source']['_elasticId']) not in excludeCommits:\n commitObjectList[hit['_source']['_created']] = hit['_source']['_elasticId']\n return commitObjectList", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def committers_changes(self) -> Iterator[CommitterChange]:\n for committer_change in self._yaml[\"committers\"]:\n # Start ignoring PyLintBear\n match action := CommitterActions(committer_change[\"action\"]):\n case CommitterActions.ADDITION:\n yield CommitterChange(\n name=committer_change[\"name\"],\n action=action,\n link=committer_change[\"link\"],\n email=committer_change[\"email\"],\n company=committer_change[\"company\"],\n committer_id=committer_change[\"id\"],\n timezone=committer_change[\"timezone\"],\n )\n case CommitterActions.DELETION:\n yield CommitterChange(\n name=committer_change[\"name\"],\n action=action,\n link=committer_change[\"link\"],\n )\n # Stop ignoring", "def _analyze_author(self, response, frontpage_author):\n author_location = response.get_url().get_domain_path().url_join(\n frontpage_author.group(1))\n\n # Check for anomalies in the location of author.exe\n if frontpage_author.group(1) != '_vti_bin/_vti_aut/author.exe':\n name = 'Customized frontpage configuration'\n\n desc = 'The FPAuthorScriptUrl is at: \"%s\" instead of the default'\\\n ' location: \"/_vti_bin/_vti_adm/author.exe\". This is very'\\\n ' uncommon.'\n desc = desc % author_location\n else:\n name = 'FrontPage FPAuthorScriptUrl'\n\n desc = 'The FPAuthorScriptUrl is at: \"%s\".'\n desc = desc % author_location\n\n i = Info(name, desc, response.id, self.get_name())\n i.set_url(author_location)\n i['FPAuthorScriptUrl'] = author_location\n \n kb.kb.append(self, 'frontpage_version', i)\n om.out.information(i.get_desc())", "def prepare_for_commit(self):", "def _parse_audit_entry(entry):\n try:\n integralstor_action_dict = {\n \"create_alert_notification\": \"Alert notification created.\",\n \"delete_alert_notification\": \"Alert notification deleted.\",\n \"create_audit_notification\": \"Audit notification created.\",\n \"delete_audit_notification\": \"Audit notification deleted.\",\n \"update_system_datetimezone\": \"Updated system date/time/timezone\",\n \"update_manifest\": \"System manifest updated\",\n \"update_ntp_servers\": \"Updated NTP server configuration\",\n \"ntp_sync\": \"Performed manual NTP time sync\",\n 'delete_remote_monitoring_server': 'Removed remote monitoring server',\n 'update_remote_monitoring_server': 'Created/updated remote monitoring server',\n \"factory_defaults_reset\": \"Factory defaults reset\",\n \"delete_certificate\": \"Deleted a SSL certificate\",\n \"edit_aces\": \"Access control entry modified\",\n \"add_aces\": \"Access control entry created\",\n \"delete_ace\": \"Access control entry removed\",\n \"create_dir\": \"Directory created\",\n \"create_self_signed_certificate\": \"Created a self signed SSL certificate\",\n \"upload_certificate\": \"Uploaded a SSL certificate\",\n \"add_zfs_spares\": \"Spare disk(s) added to pool\",\n \"schedule_zfs_snapshot\": \"Snapshot scheduling added/modified\",\n \"remove_zfs_spare\": \"Spare disk removed from pool\",\n \"remove_zfs_quota\": \"Removed ZFS quota\",\n \"set_zfs_quota\": \"Set ZFS quota\",\n \"create_vlan\": \"Created network VLAN\",\n \"remove_vlan\": \"Removed network VLAN\",\n \"modify_local_user_gid\": \"Local user's primary group set\",\n \"modify_local_user_grp_membership\": \"Local user's group membership modified\",\n \"create_local_user\": \"Local user created\",\n \"create_local_group\": \"Local group created\",\n \"delete_local_group\": \"Local group removed\",\n \"delete_local_user\": \"Local user removed\",\n \"change_local_user_password\": \"Local user password modified\",\n \"modify_dir_owner_permissions\": \"Directory ownership/permissions modified\",\n \"modify_dir_sticky_bit\": \"Directory sticky bit modified\",\n \"modify_cifs_share\": \"CIFS share modified\",\n \"delete_cifs_share\": \"CIFS share removed\",\n \"create_cifs_share\": \"CIFS share created\",\n \"modify_samba_settings\": \"CIFS authentication settings modified\",\n \"delete_nfs_share\": \"NFS share removed\",\n \"edit_nfs_share\": \"NFS share modified\",\n \"create_nfs_share\": \"NFS share created\",\n \"create_iscsi_target\": \"ISCSI target created\",\n \"delete_iscsi_target\": \"ISCSI target removed\",\n \"create_iscsi_lun\": \"ISCSI LUN created\",\n \"delete_iscsi_lun\": \"ISCSI LUN removed\",\n \"add_iscsi_target_authentication\": \"ISCSI target authentication added\",\n \"remove_iscsi_target_authentication\": \"ISCSI target authentication removed\",\n \"add_iscsi_acl\": \"ISCSI ACL added\",\n \"remove_iscsi_acl\": \"ISCSI ACL removed\",\n \"change_service_status\": \"Service status modified\",\n \"set_interface_state\": \"Network interface state modified\",\n \"edit_interface_address\": \"Network interface address modified\",\n \"create_bond\": \"Network interface bond created\",\n \"remove_bond\": \"Network interface bond removed\",\n \"edit_hostname\": \"System hostname modified\",\n \"set_dns_nameservers\": \"DNS nameservers modified\",\n \"modify_admin_password\": \"Administrator password modified\",\n \"create_zfs_pool\": \"ZFS pool created\",\n \"expand_zfs_pool\": \"ZFS pool expanded\",\n \"import_zfs_pool\": \"ZFS pool imported\",\n \"export_zfs_pool\": \"ZFS pool exported\",\n \"scrub_zfs_pool\": \"ZFS pool scrub initiated\",\n \"delete_zfs_pool\": \"ZFS pool removed\",\n \"edit_zfs_slog\": \"ZFS pool write cache modified\",\n \"remove_zfs_slog\": \"ZFS pool write cache removed\",\n \"edit_zfs_l2arc\": \"ZFS pool read cache modified\",\n \"remove_zfs_l2arc\": \"ZFS pool read cache removed\",\n \"edit_zfs_dataset\": \"ZFS dataset modified\",\n \"delete_zfs_dataset\": \"ZFS dataset removed\",\n \"create_zfs_zvol\": \"ZFS block device volume created\",\n \"delete_zfs_zvol\": \"ZFS block device volume removed\",\n \"create_zfs_dataset\": \"ZFS dataset created\",\n \"create_zfs_snapshot\": \"ZFS snapshot created\",\n \"delete_zfs_snapshot\": \"ZFS snapshot removed\",\n \"rollback_zfs_snapshot\": \"ZFS snapshot rolled back\",\n \"replace_disk_offline_disk\": \"Disk replacement - old disk offlined\",\n \"replace_disk_replaced_disk\": \"Disk replacement - disk replaced successfully\",\n \"rename_zfs_snapshot\": \"ZFS snapshot renamed\",\n \"create_rsync_share\": \"Created new RSync share \",\n \"edit_rsync_share\": \"Edited RSync share \",\n \"delete_rsync_share\": \"Deleted RSync share \",\n \"remove_background_task\": \"Removed background task \",\n \"create_remote_replication\": \"Created remote replication \",\n \"modify_remote_replication\": \"Modified remote replication \",\n \"remove_remote_replication\": \"Removed remote replication \",\n \"task_fail\": \"Task failed \",\n \"task_start\": \"Task started \",\n \"task_complete\": \"Task completed \",\n \"remove_ssh_user_key\": \"Removed ssh user key \",\n \"upload_ssh_user_key\": \"Uploaded ssh user key \",\n \"remove_ssh_host_key\": \"Removed ssh host key \",\n \"upload_ssh_host_key\": \"Uploaded ssh host key \",\n }\n\n action_dict = integralstor_action_dict\n\n d = {}\n\n d['time'], err = datetime_utils.convert_from_epoch(\n entry['audit_time'], return_format='str', str_format='%c', to='local')\n if err:\n raise Exception(err)\n\n d[\"ip\"] = entry['source_ip']\n d[\"username\"] = entry['username']\n action = entry['audit_code']\n if action in action_dict:\n d[\"action\"] = action_dict[action]\n else:\n d[\"action\"] = \"Unknown\"\n d[\"action_str\"] = entry['audit_str']\n d[\"audit_id\"] = entry['audit_id']\n\n except Exception, e:\n return None, 'Error decoding audit entry: %s' % (e)\n else:\n return d, None", "def _get_git_hash(self):\n try:\n with open(os.path.join(self._base_dir, '.git', 'HEAD'), 'r') as head_file:\n ref = head_file.read().strip()\n if ref[:5] == 'ref: ':\n with open(os.path.join(self._base_dir, '.git', ref[5:]), 'r') as commit_file:\n return commit_file.read().strip()\n else:\n return ref[5:]\n except Exception as err:\n self._logger.warning('Couldnt read the git commit hash: %s :: %s',\n err.__class__.__name__, err)\n return 'UNKNOWN'", "def svn_info_t_last_changed_author_set(svn_info_t_self, char_last_changed_author): # real signature unknown; restored from __doc__\n pass", "def _add_commit_sha1_to_lists(self):\n sha1_num_commits = \"-\" + self.commit_number\n sha1_args = [sha1_num_commits, \"--pretty=%h\"]\n # git log -[N] --pretty=%h ===> newline delimited list of SHA1 x N commit\n sha1_string = self.git.log(sha1_args)\n # do not modify to os.linesep, Win fails tests with this change\n self.commit_sha1_list = sha1_string.split(\"\\n\")", "def check_pr_details(self, pr_number):\n pr = self.repo.get_pull(pr_number)\n email_pattern = re.compile(r'^.*@suse\\.(com|cz|de)$')\n\n for commit in pr.get_commits():\n sha = commit.sha\n author = commit.author\n title = message = commit.commit.message\n # Not sure why we need to use the nested commit for the email\n email = commit.commit.author.email\n user_id = f'{author.login}({email})'\n body = ''\n\n # This could be probably smarter but commit contains something like the following\n # message=\"$commit_title\\n\\n$long_commit_message\" and as such maybe we can split it and\n # check for the following limits: title max 50 chars, body max 72 chars per line and at\n # least as long as the commit title to avoid commit message bodies full of whitespaces\n try:\n title, body = message.split('\\n\\n', 1)\n except ValueError:\n print('No commit body was detected')\n\n print(f'Checking commit \"{sha}: {title}\"')\n\n if not email_pattern.fullmatch(email):\n print(f'Checking if {user_id} is part of the SUSE organization...')\n\n if self.org.has_in_members(commit.author):\n print(f'{user_id} is part of SUSE organization but a SUSE e-mail address was not used for commit: {sha}')\n sys.exit(1)\n\n # replace case-insensitive \"(bsc#)\" (or []) and surrounding spaces\n # with a single space, then prune leading/trailing spaces\n title = re.sub(r'\\s*[([]\\s*(?i:bsc)#\\d+\\s*[)\\]]\\s*', ' ', title).strip()\n if len(title) > 50:\n print('Commit message title should be less than 50 characters (excluding the bsc# reference)')\n sys.exit(1)\n\n # No body detected. Nothing else to do here.\n if not body:\n continue\n\n if len(body) < len(title):\n print('Commit message body is too short')\n sys.exit(1)\n\n # strip multi-line '```code```' blocks & lines starting w\\ `code`\n code_pattern = re.compile(\n r'''\n ((?m:^)\\s*```) # multi-line beginning, 0-more whitespace, ```\n (?s:.*?) # non-greedy, zero or more chars, including \\n\n \\1 # whatever matched at the beginning\n | # or...\n (?m:^)\\s*` # start of line, optional whitespace, backtick\n [^`]+ # oneor more non-backtick chars\n `\\s*(?m:$) # and a backtick at the end of the line\n ''',\n re.VERBOSE\n )\n for body_line in re.sub(code_pattern, '', body).splitlines():\n if len(body_line) > 72:\n print('Each line in the commit body should be less than 72 characters')\n sys.exit(1)\n\n print(f'PR-{pr_number} commits verified.')", "def commit(self, revision: Dict, diff: Dict):\n self.repo.git.add('-A')\n diff.setdefault('authorName', 'unknown')\n diff.setdefault('authorEmail', 'unknown')\n author = git.Actor(name=diff['authorName'], email=diff['authorEmail'])\n message = (f\"{revision['title']}\\n\\n\"\n f\"Automated commit created by applying diff {self.diff_id}\\n\"\n f\"\\n\"\n f\"Phabricator-ID: {self.phid}\\n\"\n f\"Review-ID: {diff_to_str(revision['id'])}\\n\")\n self.repo.index.commit(message=message, author=author)", "def test_parse_test(self, sha, data):\n self.instance = Commit(sha)\n self.instance.load()\n\n msg = 'Test failed for commit with sha %s' % sha\n\n self.assertEqual(data['author'], self.instance.author.name, msg)\n self.assertEqual(data['commit'], self.instance.commit.name, msg)\n\n self.assertEqual(dp.parse(data['authorDate']), self.instance.author_date, msg)\n self.assertEqual(dp.parse(data['commitDate']), self.instance.commit_date, msg)\n\n self.assertEqual(data['title'], self.instance.title, msg)\n self.assertEqual(data['msg'], self.instance.message, msg)\n\n self.assertEqual(data['parents'], list(map(lambda x: x.sha, self.instance.parents)), msg)\n\n self.assertEqual(data['numFiles'], len(self.instance.changes().data))", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def maybe_commit(job):", "def get_commit_message():\n return shell_output('git log HEAD -1 --pretty=%B')", "def _warn_about_git_filters(files):\n repository = project_context.repository\n\n src_attrs = []\n dst_attrs = []\n\n for path, attrs in repository.get_attributes(*files).items():\n src = Path(path)\n dst = files[src].relative_to(project_context.path)\n src = src.relative_to(project_context.path)\n attrs_text = \"\"\n for name, value in attrs.items():\n if value == \"unset\":\n attrs_text += f\" -{name}\"\n elif value == \"set\":\n attrs_text += f\" {name}\"\n else:\n attrs_text += f\" {name}={value}\"\n\n src_attrs.append(f\"{str(src)}{attrs_text}\")\n dst_attrs.append(f\"{str(dst)}{attrs_text}\")\n\n if src_attrs:\n src_attrs_str = \"\\n\\t\".join(src_attrs)\n dst_attrs_str = \"\\n\\t\".join(dst_attrs)\n communication.warn(\n f\"There are custom git attributes for the following files:\\n\\t{src_attrs_str}\\n\"\n f\"You need to edit '.gitattributes' and add the following:\\n\\t{dst_attrs_str}\"\n )", "def meta():\n\n meta_version = lain_yaml(ignore_prepare=True).repo_meta_version()\n if meta_version is None:\n error(\"please git commit.\")\n else:\n info(\"meta version : %s\" % lain_yaml(ignore_prepare=True).repo_meta_version())", "def parse_git_log(cls, repo_path, commit=None, pkgs=False, verbosity=-1):\n cmd = shlex.split(cls._git_cmd)\n # custom git log format, see the \"PRETTY FORMATS\" section of the git\n # log man page for details\n format_lines = [\n '# BEGIN COMMIT',\n '%h', # abbreviated commit hash\n '%cd', # commit date\n '%an <%ae>', # Author Name <[email protected]>\n '%cn <%ce>', # Committer Name <[email protected]>\n '%B', # commit message\n '# END MESSAGE BODY',\n ]\n format_str = '%n'.join(format_lines)\n cmd.append(f'--pretty=tformat:{format_str}')\n\n if commit:\n if '..' in commit:\n cmd.append(commit)\n else:\n cmd.append(f'{commit}..origin/HEAD')\n else:\n cmd.append('origin/HEAD')\n\n git_log = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=repo_path)\n line = git_log.stdout.readline().decode().strip()\n if git_log.poll():\n error = git_log.stderr.read().decode().strip()\n logger.warning('skipping git checks: %s', error)\n return\n\n count = 1\n with base.ProgressManager(verbosity=verbosity) as progress:\n while line:\n hash = git_log.stdout.readline().decode().strip()\n commit_date = git_log.stdout.readline().decode().strip()\n author = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n committer = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n\n message = []\n while True:\n line = git_log.stdout.readline().decode('utf-8', 'replace').strip('\\n')\n if line == '# END MESSAGE BODY':\n # drop trailing newline if it exists\n if not message[-1]:\n message.pop()\n break\n message.append(line)\n\n # update progress output\n progress(f'{hash} commit #{count}, {commit_date}')\n count += 1\n\n commit = GitCommit(hash, commit_date, author, committer, message)\n if not pkgs:\n yield commit\n\n # file changes\n while True:\n line = git_log.stdout.readline().decode()\n if line == '# BEGIN COMMIT\\n' or not line:\n break\n if pkgs:\n parsed = cls._parse_file_line(line.strip())\n if parsed is not None:\n atom, status = parsed\n yield GitPkgChange(atom, status, commit)", "def audit(self, message):\n channel = self.config.get('AUDIT_CHANNEL', False)\n log_file = self.config.get('AUDIT_FILE', False)\n if channel: outputs.append([channel, message])\n if log_file:\n with open(log_file, 'a') as f: f.write(message)\n logging.warning('AUDIT: ' + message)", "def add_committer(self, commiter_change: CommitterChange) -> None:\n self._info[\"committers\"].append(\n {\n key: SingleQuotedScalarString(value)\n for key, value in commiter_change.addition_change.items()\n }\n )", "def commit(self):\n # PEP 249\n pass", "def audit(self, database=None):\n listOfErrors = []\n listOfWarnings = []\n if database is None:\n raise excep.biogemeError(\n 'The database must be provided to audit the variable.'\n )\n if self.name not in database.data.columns:\n theError = f'Variable {self.name} not found in the database.'\n listOfErrors.append(theError)\n return listOfErrors, listOfWarnings", "def load_authors():\n\n ret = {}\n for token in util.git('log', '--format=%aE:::%aN').split('\\n'):\n email, name = token.split(':::')\n ret[email] = name\n return ret", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def get_commit_stats(self):\n return self.commit_stats" ]
[ "0.6614517", "0.5485047", "0.5467171", "0.54115015", "0.5338054", "0.5191764", "0.5172768", "0.5139828", "0.5138755", "0.5125162", "0.50731736", "0.5002091", "0.49640554", "0.49508968", "0.49492618", "0.49048898", "0.49002182", "0.4897812", "0.4882828", "0.4845588", "0.48252293", "0.48036435", "0.4801142", "0.47736818", "0.47500736", "0.4743845", "0.4734618", "0.4719037", "0.4700205", "0.46714467", "0.46611005", "0.4651414", "0.4650502", "0.46406245", "0.46322665", "0.4630139", "0.4627179", "0.46221608", "0.4582499", "0.4582135", "0.45813206", "0.4559628", "0.45554274", "0.4543507", "0.45387906", "0.45324", "0.45259455", "0.45255405", "0.45242912", "0.45098326", "0.45083493", "0.45049077", "0.45045936", "0.4499848", "0.4493963", "0.4490183", "0.4476797", "0.4474233", "0.44725135", "0.44672537", "0.44553974", "0.44519627", "0.4449658", "0.4446525", "0.44416475", "0.44397223", "0.44158456", "0.44045708", "0.4387253", "0.43861324", "0.43808338", "0.43777755", "0.43768188", "0.43760923", "0.4363423", "0.4362265", "0.43600547", "0.43582472", "0.43577674", "0.43543133", "0.43535262", "0.43522516", "0.434712", "0.434712", "0.434712", "0.434712", "0.434712", "0.43464652", "0.4332764", "0.43300062", "0.43280396", "0.43263397", "0.43257886", "0.43167666", "0.43139467", "0.43121693", "0.4304245", "0.43028027", "0.43026847", "0.42974553" ]
0.7368635
0
Helper function to construct an address header for emails as Python stuffs it up
def address_header(self, name, email): fixed_name = Header( name ).encode() return unicode("{0} <{1}>").format(fixed_name, email)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_rfc2822_address_header(header_text):\n def encode_addr(addr):\n name, email = addr\n # If s is a <text string>, then charset is a hint specifying the\n # character set of the characters in the string. The Unicode string\n # will be encoded using the following charsets in order: us-ascii,\n # the charset hint, utf-8. The first character set to not provoke a\n # UnicodeError is used.\n # -> always pass a text string to Header\n\n # also Header.__str__ in Python 3 \"Returns an approximation of the\n # Header as a string, using an unlimited line length.\", the old one\n # was \"A synonym for Header.encode().\" so call encode() directly?\n name = Header(pycompat.to_text(name)).encode()\n # if the from does not follow the (name <addr>),* convention, we might\n # try to encode meaningless strings as address, as getaddresses is naive\n # note it would also fail on real addresses with non-ascii characters\n try:\n return formataddr((name, email))\n except UnicodeEncodeError:\n _logger.warning(_('Failed to encode the address %s\\n'\n 'from mail header:\\n%s') % (addr, header_text))\n return \"\"\n\n addresses = getaddresses([pycompat.to_text(ustr(header_text))])\n return COMMASPACE.join(a for a in (encode_addr(addr) for addr in addresses) if a)", "def add_header(self, header, value):\n if not (header and value):\n raise ValueError('Header not provided!')\n if header.lower() == 'date':\n return False\n recipients_headers = ['to', 'cc', 'bcc']\n if header.lower() in recipients_headers or header.lower() == 'from':\n if not isinstance(value, list):\n value = [value]\n header_value = []\n for addr in value:\n # For each address in the recipients headers\n # Do the Header Object\n # PY3 works fine with Header(values, charset='utf-8')\n # PY2:\n # - Does not escape correctly the unicode values\n # - Must encode the display name as a HEADER\n # so the item is encoded properly\n # - The encoded display name and the address are joined\n # into the Header of the email\n mail_addr = address.parse(addr)\n display_name = Header(\n mail_addr.display_name, charset='utf-8').encode()\n if display_name:\n # decode_header method in PY2 does not look for closed items\n # so a ' ' separator is required between items of a Header\n if PY2:\n base_addr = '{} <{}>'\n else:\n base_addr = '{}<{}>'\n header_value.append(\n base_addr.format(\n display_name,\n mail_addr.address\n ).strip()\n )\n else:\n header_value.append(mail_addr.address)\n header_value = ','.join(header_value)\n else:\n header_value = Header(value, charset='utf-8').encode()\n # Get correct header name or add the one provided if custom header key\n header = Email.fix_header_name(header) or header\n if header.lower() == 'bcc':\n result = []\n for part in decode_header(header_value):\n if part[1]:\n encoded = part[0].decode(part[1])\n elif isinstance(part[0], bytes):\n encoded = part[0].decode('utf-8')\n else:\n encoded = part[0]\n result.append(encoded.strip())\n header_value = ' '.join(result)\n self.bccs = header_value\n else:\n self.email[header] = header_value\n return header_value", "def header_email(strg):\n\taddr = email.utils.parseaddr(strg)\n\tif not addr[1]:\n\t\traise EmailMissed(strg)\n\treturn addr[1]", "def formataddr( pair, charset=None ):\n name, address = pair\n name = name and name.strip()\n address = address and address.strip()\n\n if not name:\n return address\n\n if _is8bitstring( name ):\n header = Header( '\"%s\"' % name, charset )\n header.append( ' <%s>' % address, '8bit' )\n return header\n\n quotes = ''\n if specialsre.search( name ):\n quotes = '\"'\n name = escapesre.sub( r'\\\\\\g<0>', name )\n\n return '%s%s%s <%s>' % ( quotes, name, quotes, address )", "def format_address(value):\n if type(value) in (tuple, list):\n return ', '.join([format_address(v) for v in value])\n name, addr = parseaddr(value)\n return formataddr((encode_header(name), addr.encode('ascii')))", "def build_address(record):\n pass", "def format_header(self, text: str, anchor: Optional[str] = None) -> str:", "def convert_address(self, addr_obj):\n return addr_obj.mailbox.decode() + '@' + addr_obj.host.decode()", "def format_address(**args):\n #Begin with the organisation and PO Box number, if applicable.\n address = ''.join([args[entry] + '\\n' \n for entry in ['organisation', 'PO box']\n if args.get(entry)])\n #Format building name/number components.\n address += format_building_components(*[args.get(x) for x in \n ['sub-building name', \n 'building name', \n 'building number',\n 'concatenation indicator']])\n #Add thoroughfare (if present), locality/town and postcode.\n address += ''.join([args[entry] + '\\n' \n for entry in ['dependent thoroughfare', \n 'thoroughfare',\n 'double dependent locality',\n 'dependent locality',\n 'town',\n 'postcode']\n if args.get(entry)])\n return address.strip()", "def header_format(header, value, form = DEFAULT_FORMAT):\n\tif header in HEADER_ADDRESS_FIELDS:\n\t\treturn header_email(value)\n\telif header == \"Date\":\n\t\tparsed = email.utils.parsedate(value)\n\t\tif parsed:\n\t\t\treturn time.strftime(form, parsed)\n\t\treturn \"\"\n\tif header == \"Message-ID\":\n\t\treturn email.utils.unquote(value)\n\treturn value[:DEFAULT_MAXLEN]", "def headers_add_host(headers, address):\n\n headers.setdefault('Host', address)\n\n return headers", "def get_email_details(header: str) -> dict:\n # this is one way to solve the exercise\n # result_keys = [\"from\", \"to\", \"subject\", \"date\"]\n # search_strings = [\n # r\"From\\:\\s(.*)\",\n # r\"To\\:\\s(.*)\",\n # r\"Subject\\:\\s(.*)\",\n # r\"Date\\:\\s(.*)\\s[+-]\",\n # ]\n # result_values = [re.search(s, EMAIL_HEADER).group(1) for s in search_strings]\n # print(dict(zip(result_keys, result_values)))\n\n # or we could use groupdict as suggested\n m = re.search(\n r\"From\\:\\s(?P<from>.*)\\n.*To\\:\\s(?P<to>.*)\\n.*Subject\\:\\s(?P<subject>.+?)\\n.*Date\\:\\s(?P<date>.*)\\s[+-]\",\n header,\n re.MULTILINE | re.DOTALL,\n )\n return m.groupdict() if m else None", "def _get_address(self, address1, address2):\n return f'{address1}\\n{address2}' if address2 else address1", "def other_mail_address(self):\n return (self.mail_address_2 + ' ' + \n self.mail_address_3 + ' ' +\n self.mail_address_4)", "def header_values(header, mail):\n\tif header not in mail.keys():\n\t\traise HeaderMissed(header)\n\tvalues = [header_decode(mail[header])]\n\tif header in HEADER_ADDRESS_FIELDS:\n\t\treturn [email.utils.formataddr(x) for x in email.utils.getaddresses(values)]\n\treturn values", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to", "def generate_email_address(self):\n return \"%s.%s@%s\" % (uuid.uuid4(), self.mailbox, \"mailosaur.io\")", "def get_address_string(self):\n output = ''\n if self.address_line_1:\n output += '{}'.format(self.address_line_1)\n if self.address_line_2:\n output += ', {}'.format(self.address_line_2)\n if self.city:\n output += ', {}'.format(self.city)\n if self.state:\n output += ', {}'.format(self.state)\n if self.zipcode:\n output += ' {}'.format(self.zipcode)\n return output", "def rfc822_escape(header):\n lines = header.split('\\n')\n sep = '\\n' + 8 * ' '\n return sep.join(lines)", "def get_email_details(header: str) -> dict:\r\n try:\r\n m = re.match(\r\n r\"\"\"\r\n ([\\w\\W]* # remove lines \r\n (\r\n ^Date: \\s*(?P<date>[\\w\\W]{25}) # obtain date (\"date\")\r\n |^From: \\s*(?P<from>[\\w\\W]*?$) # obtain sender (\"from\")\r\n |^To: \\s*(?P<to>[\\w\\W]*?$) # obtain receiver (\"to\")\r\n |^Subject: \\s*(?P<subject>[\\w\\W]*?$) # obtain subject (\"subject\")\r\n )){4}\r\n \"\"\",\r\n header,\r\n re.VERBOSE | re.MULTILINE,\r\n )\r\n\r\n return m.groupdict()\r\n\r\n except:\r\n return None", "def email_f(x: Text) -> Tuple[Text, Text]:\n return \"uri\", \"email://{}\".format(x.lower())", "def header(self, header, default=None):\n result = []\n header_value = self.email.get(header, default)\n if header_value:\n for part in decode_header(header_value):\n if part[1]:\n encoded = part[0].decode(part[1])\n elif isinstance(part[0], bytes):\n encoded = part[0].decode('utf-8')\n else:\n encoded = part[0]\n result.append(encoded.strip())\n header_value = ' '.join(result)\n\n return header_value", "def BuildHeaderString (text):\r\n\r\n return t.BuildHeaderString (text)", "def street_address(self):\n\t\tif self.address2:\n\t\t\treturn '{}, {}'.format(self.address, self.address2)\n\t\treturn self.address", "def address_line_1(self):\n return \"{} {} {}\".format(\n self.fake.randomize_nb_elements(1000),\n self.fake.last_name(),\n self.fake.random_element(elements=STREET_SUFFIX)\n )", "def __addheader(self, msg, headername, headervalue):\n if self.__contains_nonascii_characters(headervalue):\n h = Header(headervalue, 'utf-8')\n msg[headername] = h\n else:\n msg[headername] = headervalue\n return msg", "def get_address(self):\n\n return \"{}\\n{}\\n{},\\n{},\\n{}\".format(\n self.address_line_1, self.city, self.state, self.postal_code, self.country\n )", "def generateSMSEmail(profile):\n if profile['carrier'] is None or not profile['phone_number']:\n return None\n\n return str(profile['phone_number']) + \"@\" + profile['carrier']", "def _get_source_address(course_id, course_title):\r\n course_title_no_quotes = re.sub(r'\"', '', course_title)\r\n\r\n # For the email address, get the course. Then make sure that it can be used\r\n # in an email address, by substituting a '_' anywhere a non-(ascii, period, or dash)\r\n # character appears.\r\n from_addr = u'\"{0}\" Course Staff <{1}-{2}>'.format(\r\n course_title_no_quotes,\r\n re.sub(r\"[^\\w.-]\", '_', course_id.course),\r\n settings.BULK_EMAIL_DEFAULT_FROM_EMAIL\r\n )\r\n return from_addr", "def decode_email_address(address, charset=\"utf8\"):\r\n name = decode_email_header(address[0])\r\n addr = address[1]\r\n addr = \"<\" + addr + \">\"\r\n if not name:\r\n return addr\r\n return name + \" \" + addr", "def format_address(line1, line2, city, state, zipcode):\n\t\n\tstreetlines = line1\n\tcityline = city\n\t\n\tif len(streetlines) > 0 and len(line2) > 0:\n\t\tstreetlines += \"\\n\"\n\t\n\tif len(cityline) > 0 and len(state) > 0:\n\t\tcityline += \", \"\n\t\n\tstreetlines += line2\n\tcityline += state\n\t\n\treturn \"\\n\".join([streetlines, cityline, zipcode])", "def generate_header(value, params):\n parts = [quote(value)]\n for key in params:\n parts.append('%s=\"%s\"' % (key, quote(params[key])))\n return '; '.join(parts)", "def header( self ):\n\t\treturn '; '.join( [ '='.join(i) for i in self.items() ] )", "def get_contact_info(self):\n return f\"Contact {self} at {self.email}\"", "def concat_address_full(**kwargs):\r\n result = \"{concat_address} {city_name}, {state_code}\".format(**kwargs)\r\n if kwargs[\"five_digit_zip_code\"]:\r\n result += \" {five_digit_zip_code}\".format(**kwargs)\r\n if kwargs[\"four_digit_zip_code\"]:\r\n result += \"-{four_digit_zip_code}\".format(**kwargs)\r\n return result", "def add_headers():\n response.set_header('X-Contact', '[email protected]')", "def format_single_address(address: Address | str) -> str:\n address = coerce_address(address)\n name = address.display_name\n if not name:\n return address.addr_spec\n\n if not needs_qp_encode(name):\n if specials_regex.search(name):\n # simple quoting works here, since we disallow\n # backslash escaping double quotes.\n name = f'\"{name}\"'\n return f'{name} <{address.addr_spec}>'\n\n name = qp_encode_display_name(name)\n return f'{name} <{address.addr_spec}>'", "def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full", "def _header_string( self, title='title' ): \n return_str = ''\n return_str += '{}\\n\\n'.format( title )\n return_str += '{} atoms\\n'.format( len(self.atoms) )\n if len(self.bonds) != 0:\n return_str += '{} bonds\\n\\n'.format( len(self.bonds) )\n return_str += '{} atom types\\n'.format( len(self.atom_types ) )\n if len(self.bond_types) != 0:\n return_str += '{} bond types\\n\\n'.format( len(self.bond_types ) )\n return_str += '\\n'\n return return_str", "def header():\n record = cfg.get_current_site_record()\n header = \"{0} ({1})\".format(record['url'], record['id'])\n size = len(header) + 2 + 2\n return \"\"\"{sep}\n# {header} #\n{sep}\"\"\".format(sep='#'*size, header=header)", "def headerFA(block_size,extended=True):\n if(extended):\n header =[\"Address\",\"Tag\",\"Real Address\"]\n else:\n header =[\"Address\"]\n for x in range(0,block_size):\n header.append(\"W%i\"%(x))\n header.append(\"Result\")\n return header", "def _extract_email_address(self, from_email):\n res = email.utils.parseaddr(from_email)\n if len(res[1]) != 0:\n return res[1].lower()\n else:\n print(res, from_email)\n return \"\"", "def street_address():\r\n\r\n return _random.choice(\r\n [\r\n '%d-%d %s' % (\r\n _random.randrange(999),\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%d %s' % (\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%s %d, %s' % (\r\n 'P.O. Box',\r\n _random.randrange(999),\r\n street_name()\r\n )\r\n ]\r\n )", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def build_canonical_ext_headers_str(headers: dict) -> str:\n\n # Return ``''`` instead of ``None`` so that it can be properly concatenated\n if not headers:\n return ''\n\n if len(headers) != 1:\n raise WaterButlerError('The limited provider only supports one canonical extension header.')\n\n headers_str = ''\n for key, value in headers.items():\n headers_str += '{}:{}\\n'.format(key.strip().lower(), value.strip())\n\n return headers_str", "def header_string(headers_dict):\r\n header_list = []\r\n\r\n if 'Content-Type' in headers_dict:\r\n header_list.append(headers_dict['Content-Type'] + \"\\n\")\r\n if 'Date' in headers_dict:\r\n header_list.append(headers_dict['Date'] + \"\\n\")\r\n if 'Content-MD5' in headers_dict:\r\n header_list.append(headers_dict['Content-MD5'] + \"\\n\")\r\n\r\n return \"\".join(header_list) # Note that trailing \\n's are important\r", "def add_header( name, value ):", "def generate_email(name: str, email: str, archive_long_name: str):\n if email:\n return email.strip()\n else:\n user = name.title() + archive_long_name.title()\n return re.sub(r'\\W+', '', unidecode(user)) + '[email protected]'", "def build_hello_email():\n from_email = Email(\"[email protected]\")\n subject = \"Hello World from the SendGrid Python Library\"\n to_email = Email(\"[email protected]\")\n content = Content(\"text/plain\", \"some text here\")\n mail = Mail(from_email, subject, to_email, content)\n mail.personalizations[0].add_to(Email(\"[email protected]\"))\n\n return mail.get()", "def _get_address(self, address_tag, hdr):\n\n # try to find all the span tags in the address tag, the span tags\n # include all the address information we need \n try:\n elements = address_tag.find_all('span')\n\n # scrape the text out of the span tags and remove\n # all the whitespaces and punctuation marks\n address = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n city = elements[1].get_text().strip()\n state = elements[2].get_text().strip()\n zipcode = elements[3].get_text().strip()\n return address, city, state, zipcode\n # however, sometimes the address tag does not include the street\n # info, in this case, use the text in the header tag, which serves\n # as a replacement for the address \n except:\n address = hdr.get_text()\n elements = address_tag.find_all('span')\n city = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n state = elements[1].get_text().strip()\n zipcode = elements[2].get_text().strip()\n return address, city, state, zipcode", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def __str__(self):\n if self._street_name != self.DEFAULT_STREET_NAME and \\\n self._house_num != self.DEFAULT_HOUSE_NUM and \\\n self._apt_num != self.DEFAULT_APT_NUM:\n address = f\"\\n{self._house_num} {self._street_name} Street, \" \\\n f\"#{self._apt_num}\"\n return address\n else:\n return \"<None>\"", "def getmailheader(header_text, default=\"ascii\"):\n try:\n headers = decode_header(header_text)\n except email.Errors.HeaderParseError:\n # This already append in email.base64mime.decode()\n # instead return a sanitized ascii string\n return header_text.encode('ascii', 'replace').decode('ascii')\n else:\n for i, (text, charset) in enumerate(headers):\n try:\n headers[i] = unicode(text, charset or default, errors='replace')\n except LookupError:\n # if the charset is unknown, force default\n headers[i] = unicode(text, default, errors='replace')\n return u\"\".join(headers)", "def request_add_host(request, address):\n\n request.setdefault('headers', {})\n request['headers'].setdefault('Host', address)\n\n return request", "def get_address(self):\n return self.address.line[0]+\", \"+self.address.city+\", \"+self.address.state+\", \"+self.address.country", "def build_headers(self):\n\n # User-agent is always sent\n headers = {'user-agent': self.useragent}\n for hdr in self.config.client_standard_headers:\n val = getattr(self.config, 'client_' + hdr.lower().replace('-','_'))\n headers[hdr] = val\n\n return headers", "def coerce_address(address: Address | str) -> Address:\n if isinstance(address, str):\n header = SMTP.header_factory('sender', address)\n assert isinstance(header, SingleAddressHeader)\n return header.address\n\n assert isinstance(address, Address)\n return address", "def get_str_address(address):\n return \\\n get_ob_value_primitive(address, 'AddrLine1', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine2', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine3', exception_return_value='') + ', ' + \\\n get_ob_value_primitive(address, 'City', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'County', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'StateProvince', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'ZipPostalCode', exception_return_value='')", "def get_host_string(addr: AddressTupleVXType) -> str:\n if len(addr) >= 3:\n addr = cast(AddressTupleV6Type, addr)\n if addr[3]:\n return \"{}%{}\".format(addr[0], addr[3])\n return addr[0]", "def _formatting_address_fields(self):\n return self._address_fields()", "def mail_header(self):\n return self._hdr", "def format_url_address(address):\n try:\n addr = netaddr.IPAddress(address)\n if addr.version == constants.IPV6_FAMILY:\n return \"[%s]\" % address\n else:\n return str(address)\n except netaddr.AddrFormatError:\n return address", "def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"", "def make_header(text, size=80, symbol=\"-\"):\n header = symbol * size + \"\\n\"\n header += \"%s\\n\" % text\n header += symbol * size + \"\\n\"\n return header", "def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)[email protected]\">%(user)s</a>' % {\"user\": user}", "def __str__(self):\n return format_address(**self._get_elements())", "def get_footer_email(object, use_string_1=False, class_link='navLink'):\n if smart_unicode(str(type(object))) == \"<class 'dms.models.DmsItem'>\":\n item = object\n else:\n item = object.item\n if use_string_1:\n return encode_email(item.string_2, item.string_1, 'navLink')\n else:\n name = item.owner.get_full_name()\n if name == u'Unbekannte Person' and item.string_1 != '':\n return encode_email(item.string_2, item.string_1, class_link)\n else:\n return encode_email(item.owner.email, name, class_link)", "def to_string(self):\n return \"Address: {city} {state} {country}\".format(\n city=self.city,\n state=self.state,\n country=self.country\n )", "def send_new_lincom_address_message():\r\n keys = [\"city_name\", \"concat_address\", \"geofeat_id\", \"initial_create_date\"]\r\n addresses = sorted(\r\n addr\r\n for addr in arcetl.attributes.as_iters(\r\n dataset.SITE_ADDRESS.path(\"pub\"),\r\n field_names=keys,\r\n dataset_where_sql=\"psap_code = 'LI' \",\r\n )\r\n if (datetime.datetime.now() - addr[-1]).days < 15\r\n )\r\n table_header = \"<tr>{}</tr>\".format(\r\n \"\".join(\"<th>{}</th>\".format(key) for key in keys)\r\n )\r\n row_template = \"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>\"\r\n if addresses:\r\n LOG.warning(\"Found new addresses in Lincoln PSAP area: sending email.\")\r\n table_rows = \"\".join(row_template.format(*addr) for addr in addresses)\r\n KWARGS_NEW_LINCOLN_MESSAGE[\"body\"] = KWARGS_NEW_LINCOLN_MESSAGE[\"body\"].format(\r\n table_header, table_rows\r\n )\r\n send_email(**KWARGS_NEW_LINCOLN_MESSAGE)\r\n else:\r\n LOG.info(\"No new addresses in Lincoln PSAP area found. Not sending email.\")", "def build_http_header(k: bytes, v: bytes) -> bytes:\n return k + COLON + WHITESPACE + v", "def build_http_header(k: bytes, v: bytes) -> bytes:\n return k + COLON + WHITESPACE + v", "def extract_email_address(logpart):\n # print \"Parsing for email address: {}\".format(logpart)\n return(logpart.split('<')[1].split('>')[0])", "def address(self):\n return f'Address = {self._peer.address}/{self._peer.subnet.prefixlen}'", "def getHeader(self):\n length = self.getInt()\n dest = self._getStr(definition.ADDRESS_LENGTH)\n origin = self._getStr(definition.ADDRESS_LENGTH)\n msgType = self._getStr(definition.MSG_TYPE_LENGTH)\n msgNr = self.getInt()\n return (length, dest, origin, msgType, msgNr)", "def build_header(self, app_name, host_name, message_id, priority,\n process_id, version, timestamp, sd):\n head = SyslogMessageHead()\n head.appname = app_name or '-'\n head.hostname = host_name or '-'\n head.messageid = message_id or '-'\n head.priority = priority or '-'\n head.processid = process_id or '-'\n head.timestamp = timestamp or '-'\n head.version = version or '-'\n head.sd = sd or {}\n return head", "def ad_rep_email(obj):\n return '%s' % obj.ad_rep.email", "def aprs_msg(src,dst,via,addr,msgtext):\n\n to = addr.ljust(9)[:9]\n msg = src + '>' + dst\n if via:\n msg += ',' + via\n msg += '::' + to + ':' + msgtext\n return msg", "def _generate_email(email, name=None):\n result = {'email': email}\n if name:\n result['name'] = name\n return result", "def _format_senders_correspondent_53D(self, val):\n account = val.get('ACCOUNT')\n name = val.get('NAME')\n address = val.get('ADDRESS')\n if name and address:\n name = FSwiftWriterUtils.split_text_and_prefix(name, 35)\n address = FSwiftWriterUtils.split_text_and_prefix(address, 35)\n val = FSwiftWriterUtils.allocate_space_for_name_address_without_constraint(name, address)\n if account:\n val = \"/\" + str(account) + \"\\n\" + str(val)\n return val", "def address(self):\n return str(self.street) + str(self.city) + str(self.state) + str(self.zipcode)", "def getAddress(user):", "def _encode_header(self):\n\t\theader = self.config.get('header')\n\t\tif header is not None:\n\t\t\treturn self._encode_tuple(header)", "def make_email(email: str) -> str:\n name, domain = email.split('@')\n\n empty_and_dot = ('', '.')\n new_email = [char + choice(empty_and_dot) for char in name[:-1]]\n new_email.extend([name[-1], '@', domain])\n\n return ''.join(new_email)", "def get_label(self):\r\n return _(\"Address:\")", "def get_author_email(author, email):\n return encode_email(email, author, 'nav')", "def spamHeaders(self) -> Tuple[List[str], Dict[str, str]]:\n sections = [\"STATUS\", \"TITLE\", \"PROJECT\", \"FILE\", \"SITE\", \"CHANNAME\", \"DATA\"]\n sectionHeaders = {}\n sectionHeaders[\"STATUS\"] = [\"STATUS\"]\n sectionHeaders[\"TITLE\"] = [\"AUTHOR\", \"VERSION\", \"DATE\", \"COMMENT\"]\n sectionHeaders[\"FILE\"] = [\"NAME\", \"FREQBAND\", \"DATE\"]\n sectionHeaders[\"CHANNAME\"] = [\"ITEMS\", \"NAME\"]\n sectionHeaders[\"DATA\"] = [\"ITEMS\", \"CHAN\"]\n return sections, sectionHeaders", "def format_msg(msg):\n if type(msg) == str:\n msg = msg.encode()\n header = str(len(msg))\n header = header.zfill(HEADER_SIZE)\n return header.encode(), msg", "def header_huffington(self):\n head = '\\n ^^Polls ^^fetched ^^from ^^[http://elections.huffingtonpost.com/](http://elections.huffingtonpost.com/).\\n\\n'\n head += '***{}***\\n\\n'.format(self.get_greeting())\n head += '.\\n\\n'\n head += '.\\n\\n'\n return head", "def format_header(self, header):\n raise NotImplementedError()", "def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)", "def get_address(query):\n address = \"Dis-moi, quel endroit tu cherches ?\"\n data = get_data(query)\n try:\n address_data = data[\"results\"][0][\"formatted_address\"]\n address = (\"Si je ne me trompe pas,\"\n \" l'adresse que tu cherche, c'est ... \" + address_data + \". Sinon\"\n \", dis-moi le nom de lieu exact\")\n except IndexError:\n address = \"Désolé, je n'ai pas compris quel endroit tu cherches ?\"\n finally:\n return address", "def get_invitation_email(address, key):\n\n EMAIL = '[email protected]'\n SUBJECT = 'Your Foojal Invitation'\n URL = 'http://app.foojal.com/invites/'\n EMAIL_CONTENT = \"\"\"\nYou have been invited to Foojal.com!\n\nTo accept this invitation, click the following link,\nor copy and paste the URL into your browser's address\nbar:\n\n%s\"\"\"\n\n message = EmailMessage()\n message.sender = EMAIL\n message.to = address\n message.subject = SUBJECT\n message.body = EMAIL_CONTENT % URL + key\n return message", "def append_contacts(self, lines, lang):\n if lang==\"en\":\n lines.append(\"section Contacts\")\n elif lang==\"it\":\n lines.append(\"section Contatti\")\n lines.append(\"mailto://%s e-mail\" % flags['MAIL'])\n lines.append(\"verbatim %s\" % SKYPE)\n lines.append(\"verbatim &nbsp;\")\n return lines", "def getheader(header_text, default=\"ascii\"):\n # Borrowed from: http://ginstrom.com/scribbles/2007/11/19/parsing-multilingual-email-with-python/\n\n headers = email.Header.decode_header(header_text)\n header_sections = [unicode(text, charset or default)\n for text, charset in headers]\n return u\" \".join(header_sections)", "def _addressitem_from_line(line):\n sline = line.split(\"\\t\")\n if len(sline) < 2:\n raise IOError(\"Error parsing address from line. Malformed data.\")\n address = sline[0]\n name = sline[1]\n\n if len(sline) > 2:\n otherinfo = sline[2]\n else:\n otherinfo = \"\"\n if len(sline) > 3:\n extrainfo = sline[3]\n else:\n extrainfo = \"\"\n if len(sline) > 4:\n raw_misc = sline[4:]\n misc = _raw_misc_to_dict(raw_misc)\n else:\n misc = {}\n\n return AddressItem(\n _email_address=address,\n _name=name,\n _otherinfo=otherinfo,\n _extrainfo=extrainfo,\n **misc\n )", "def headerDA(blocks,block_size,extended):\n if(extended):\n header =[\"Address\",\"Tag\",\"Real Address\",\"Index\",\"WordOffset\",\"ByteOffset\"]\n else:\n header =[\"Address\"]\n for i in range(0,blocks):\n for x in range(0,block_size):\n header.append(\"B%i W%i\"%(i,x))\n header.append(\"Result\")\n return header", "def _write_header(self, head_msg=None):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n header = \"\\n%s\\nDateTime: %s \\nMessage: %s \\n\" % (\"*\" * 100, now, head_msg)\n\n return header", "def generate_header(name: str) -> str:\n return MARKDOWN_HEADER.format(name.capitalize(), date.today())", "def mailing_address(self):\n if \"mailingAddress\" in self._prop_dict:\n if isinstance(self._prop_dict[\"mailingAddress\"], OneDriveObjectBase):\n return self._prop_dict[\"mailingAddress\"]\n else :\n self._prop_dict[\"mailingAddress\"] = PhysicalAddress(self._prop_dict[\"mailingAddress\"])\n return self._prop_dict[\"mailingAddress\"]\n\n return None", "def header(self, field, value):\n if field.lower() == 'from':\n logger.debug(f\"({self.id}) \\\"From:\\\" raw: '{value}'\")\n value = normalizeRawFromHeader(value)\n logger.info(f\"({self.id}) \\\"From:\\\" cleaned: '{value}'\")\n if value == '':\n logger.warning(f\"\\\"From:\\\" header empty! WTF, but nothing to do. OK for now.\")\n self.set_suspicious_headers(False, \"EMPTY FROM HEADER - WTF\")\n else:\n decoded_from = get_decoded_header(value)\n logger.debug(f\"({self.id}) \\\"From:\\\" decoded raw: '{value}'\")\n decoded_from = normalizeRawFromHeader(decoded_from)\n logger.info(f\"({self.id}) \\\"From:\\\" decoded cleaned: '{decoded_from}'\")\n all_domains = address_domain_regex.findall(decoded_from)\n all_domains = [a.lower() for a in all_domains]\n if len(all_domains) == 0:\n logger.warning(f\"({self.id}) No domain in decoded \\\"From:\\\" - WTF! OK, though\")\n self.set_suspicious_headers(False, \"No domains in decoded FROM\")\n elif len(all_domains) == 1:\n logger.debug(f\"({self.id}) Only one domain in decoded \\\"From:\\\": '{all_domains[0]}' - OK\")\n self.set_suspicious_headers(False, \"Only one domain in decoded FROM\")\n else:\n logger.info(f\"({self.id}) Raw decoded from header contains multiple domains: '{all_domains}' - Checking\")\n if len(set(all_domains)) > 1:\n logger.info(f\"({self.id}) Multiple different domains in decoded \\\"From:\\\". - NOT OK\")\n self.set_suspicious_headers(True, \"Multiple domains in decoded FROM are different\")\n else:\n logger.info(f\"({self.id}) All domains in decoded \\\"From:\\\" are identical - OK\")\n self.set_suspicious_headers(False, \"Multiple domains in decoded FROM match properly\")\n # CONTINUE so we reach eom hook.\n # TODO: Log and react if multiple From-headers are found?\n return Milter.CONTINUE" ]
[ "0.69923276", "0.67900985", "0.67680126", "0.6404634", "0.6254018", "0.62328947", "0.615882", "0.61046", "0.6023781", "0.6022702", "0.5986854", "0.5960331", "0.59242094", "0.5905854", "0.5892792", "0.58512026", "0.58358437", "0.5810467", "0.57352096", "0.5722051", "0.57130855", "0.5709188", "0.56994635", "0.5692479", "0.56684", "0.5637921", "0.56051266", "0.56034654", "0.55964833", "0.55856055", "0.5554957", "0.5503713", "0.5492971", "0.54877263", "0.54873943", "0.5486609", "0.54858917", "0.5485803", "0.5462644", "0.54441965", "0.5443279", "0.544053", "0.5416912", "0.53901136", "0.53862476", "0.53830546", "0.5376178", "0.53703874", "0.5365459", "0.5358886", "0.5353857", "0.5352142", "0.5337042", "0.53308403", "0.53286433", "0.53173554", "0.5316431", "0.5313129", "0.5304512", "0.52871734", "0.5284381", "0.5273755", "0.52638185", "0.5262176", "0.5261503", "0.5250822", "0.52400315", "0.5235595", "0.52181256", "0.52156883", "0.52156883", "0.5214577", "0.5213344", "0.5212571", "0.521013", "0.5209756", "0.52096313", "0.52038676", "0.5199788", "0.51979923", "0.51940715", "0.519139", "0.51831424", "0.51738393", "0.51733667", "0.5168288", "0.5166792", "0.51541674", "0.5151274", "0.5141778", "0.5124493", "0.51222855", "0.5119623", "0.5114922", "0.51137346", "0.51098716", "0.5106372", "0.5104972", "0.5099926", "0.5097699" ]
0.7850054
0
Parse special keywords in commits to determine further postcommit actions.
def determine_keywords(self): split = dict() split['email_cc'] = re.compile("^\s*CC[-_]?MAIL[:=]\s*(.*)") split['email_cc2'] = re.compile("^\s*C[Cc][:=]\s*(.*)") split['fixed_in'] = re.compile("^\s*FIXED[-_]?IN[:=]\s*(.*)") numeric = dict() numeric['bug_fixed'] = re.compile("^\s*(?:BUGS?|FEATURE)[:=]\s*(.+)") numeric['bug_cc'] = re.compile("^\s*CCBUGS?[:=]\s*(.+)") presence = dict() presence['email_gui'] = re.compile("^\s*GUI:") presence['silent'] = re.compile("(?:CVS|SVN|GIT|SCM).?SILENT") presence['notes'] = re.compile("(?:Notes added by 'git notes add'|Notes removed by 'git notes remove')") results = defaultdict(list) for line in self.commit.message.split("\n"): # If our line starts with Summary: (as it does when using Arcanist's default template) then strip this off # This allows for people to fill keywords in the Differential Summary and have this work smoothly for them line = re.sub("^Summary: (.+)", "\g<1>", line) # Start processing our keywords... for (name, regex) in split.iteritems(): match = re.match( regex, line ) if match: results[name] += [result.strip() for result in match.group(1).split(",")] for (name, regex) in numeric.iteritems(): match = re.match( regex, line ) if match: results[name] += re.findall("(\d{1,10})", match.group(1)) for (name, regex) in presence.iteritems(): if re.match( regex, line ): results[name] = True self.keywords = results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_commit_message(message):\n # ['closes', 'close', 'fix', ...]\n keywords = []\n [keywords.extend(val) for val in KEYWORDS.values()]\n # we need to sort to match longuest command possible\n keywords.sort(lambda x, y: cmp(len(y), len(x)))\n # 'closes|close|fix...'\n keywords_re = '|'.join(keywords)\n\n # [('refs', 'affinitic', '#1'), ('refs', 'affinitic', '#2')]\n refs = re.findall('(%s)[ ]*([a-z]+)[ ]*([# \\d]*)' % keywords_re,\n message,\n re.IGNORECASE)\n\n parseds = []\n for ref in refs:\n if len(ref) != 3:\n # XXX envoi de mail si 1 < ref < 3 ?\n continue\n\n command = _word_to_command(ref[0])\n trac = ref[1].lower()\n tickets = ref[2]\n\n tickets_split = re.findall('\\d+', tickets)\n for ticket in tickets_split:\n parsed = {}\n parsed[\"command\"] = command\n parsed[\"ticket\"] = ticket\n parsed[\"trac\"] = trac\n parseds.append(parsed)\n\n return parseds", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def commits_parsing(query):\n logging.info(\"GET request commit parsing is working\")\n results = {}\n list_of_commits = []\n clear_list_message = []\n clear_list_committer = []\n json_commits = {}\n json_all = {}\n for single_query in query:\n list_of_commits += {single_query[:-6]}\n\n try:\n results = requests.get(single_query[:-6])\n except requests.ConnectionError as exception:\n return f'{exception}'\n\n json_all = results.json()[0]\n\n json_commits = json_all['commit']\n clear_list_message += {json_commits['message']}\n clear_list_committer += {json_commits['committer']['name']}\n\n return clear_list_message, clear_list_committer", "def test_unrecognized_actions_rejected(self):\n # Unexpected whitespace.\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\" git bisect skip c123\")\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect skip c123\")\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect skip c123\")\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\" # git bisect skip c123\")\n # Unrecognized action with commit.\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect foo c123\")\n # Unrecognized action without commit.\n with self.assertRaises(BisectLog.ParserBug):\n _ = BisectLog(\"git bisect bar\")", "def parse(self, text):\n \n self.clear()\n lines = text.split(\"\\n\")\n self.logger.info(\"Parsing Git history\")\n \n for line in lines:\n if len(line) == 0:\n # Line is a spacer\n pass\n \n elif line[0] == ' ':\n # Line is part of a commit message\n pass\n \n else:\n # Line is part of a commit header\n spaceIdx = line.find(' ')\n if spaceIdx == -1:\n self.logger.warn(\"Skipping unrecognizable history line: \" + line)\n continue\n \n keyword = line[:spaceIdx]\n content = line[spaceIdx+1:]\n self.logger.debug(\"Found key-value pair: {0} {1}\".format(keyword, content))\n \n self._handleKeyValue(keyword, content)\n \n # Grab the last commit\n self._commits[self._currentCommit.hashKey] = self._currentCommit\n self._currentCommit = None\n \n # Finalize the commit tree\n self._resolveCommits()", "def commit_names(self, commit):\n return []", "def process_event(self):\n if self.event['text'][0] == \"!\":\n self.parse_bang_command()\n\n elif self.event['text'][-2:] in self.valid_suffixes:\n self.parse_suffix_command()", "def test_blog_manual_commit():", "def _post_argument_parsing(self):\n pass", "def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors", "def postparsing_postcmd(self, stop):\n return stop", "def preprocess_post(self, post):\n # tokenize, clean, & tag part-of-speech for all words\n if self.document_level == 'postwise':\n\n doc_text = all_comments_from_post(post)\n # leave early if there's nothing there\n if doc_text == '':\n return []\n\n tokens = nltk.word_tokenize(doc_text)\n # TODO: skip this if there's no POS filtering args!\n tagged = nltk.pos_tag(tokens)\n\n # filter out most invalid words with valid_word()\n processed_document = []\n for word, pos_tag in tagged:\n if self.valid_word(word, pos_tag):\n cleaned_word = self.clean_word(word)\n # things like digits and other junk become empty string,\n # so exclude them from final document\n if cleaned_word:\n processed_document.append(cleaned_word)\n # finally, update the post\n post['postwise'] = {'tokens': processed_document, 'text': doc_text}\n self.postman.posts_write.update_one({'_id':post['_id']}, {'$set':post}, upsert=True)\n else:\n raise NotImplementedError('document_level: \"%s\"' % self.document_level)\n\n return processed_document", "def test_git_commits(self):\n event_id = dog.Event.create(title=\"Testing git commits\", text=\"\"\"$$$\n eac54655 * Merge pull request #2 from DataDog/alq-add-arg-validation ([email protected])\n |\\\n 760735ef | * origin/alq-add-arg-validation Simple typecheck between metric and metrics ([email protected])\n |/\n f7a5a23d * missed version number in docs ([email protected])\n $$$\"\"\", event_type=\"commit\", source_type_name=\"git\", event_object=\"0xdeadbeef\")['event']['id']\n event = self.get_event_with_retry(event_id)\n self.assertEqual(event['event']['title'], \"Testing git commits\")", "def handle_commits_published(extension=None, **kwargs):\n review_request = kwargs.get('review_request')\n\n if review_request is None:\n return\n\n commit_data = fetch_commit_data(review_request)\n\n if (not is_pushed(review_request, commit_data) or\n not is_parent(review_request, commit_data)):\n return\n\n # Check the change description and only continue if it contains a change\n # to the commit information. Currently change descriptions won't include\n # information about our extra data field, so we'll look for a change to\n # the diff which is mandatory if the commits changed. TODO: Properly use\n # the commit information once we start populating the change description\n # with it.\n #\n # A change description will not exist if this is the first publish of the\n # review request. In that case we know there must be commits since this\n # is a pushed request.\n cd = kwargs.get('changedesc')\n if (cd is not None and ('diff' not in cd.fields_changed or\n 'added' not in cd.fields_changed['diff'])):\n return\n\n # We publish both the review repository url as well as the landing\n # (\"inbound\") repository url. This gives consumers which perform hg\n # operations the option to avoid cloning the review repository, which may\n # be large.\n repo = review_request.repository\n repo_url = repo.path\n landing_repo_url = repo.extra_data.get('landing_repository_url')\n\n child_rrids = []\n commits = []\n ext_commits = json.loads(commit_data.extra_data.get(COMMITS_KEY, '[]'))\n\n for rev, rrid in ext_commits:\n child_rrids.append(int(rrid))\n commits.append({\n 'rev': rev,\n 'review_request_id': int(rrid),\n 'diffset_revision': None\n })\n\n # In order to retrieve the diff revision for each commit we need to fetch\n # their correpsonding child review request.\n review_requests = dict(\n (obj.id, obj) for obj in\n ReviewRequest.objects.filter(pk__in=child_rrids))\n\n for commit_info in commits:\n # TODO: Every call to get_latest_diffset() makes its own query to the\n # database. It is probably possible to retrieve the diffsets we care\n # about using a single query through Django's ORM, but it's not trivial.\n commit_info['diffset_revision'] = review_requests[\n commit_info['review_request_id']\n ].get_latest_diffset().revision\n\n msg = base.GenericMessage()\n msg.routing_parts.append('mozreview.commits.published')\n msg.data['parent_review_request_id'] = review_request.id\n msg.data['parent_diffset_revision'] = review_request.get_latest_diffset().revision\n msg.data['commits'] = commits\n msg.data['repository_url'] = repo_url\n msg.data['landing_repository_url'] = landing_repo_url\n\n # TODO: Make work with RB localsites.\n msg.data['review_board_url'] = get_server_url()\n\n publish_message(extension, msg)", "def calc_lang_features(commits, author):\n\tlang_features = ['/\\*\\*', '\\\\\"\\\\\"\\\\\"', '///', # documentation\n\t\t\t'^\\s*@', 'def.+:.+->', 'using\\s+System\\.ComponentModel\\.DataAnnotations', # assertion\n\t\t\t'assert', 'TODO', 'lambda']\n\n\t# delete contents\n\topen('lang_features.csv', 'w').close()\n\t\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 5 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\n\t\t\t# for each blob modified\n\t\tquery = (\"for x in $(echo \" + commit + \" | ssh da4 ~/lookup/cmputeDiff2.perl); do \" +\n\t\t\t\t# get the chold and parent blob\n\t\t\t\t\"diff_blobs=$(echo $x | awk -v RS=';' 1 | sed -n '3,4 p');\" +\n\t\t\t\t# if a parent blob does not exist, the author authored all of the content of the file\n\t\t\t\t\"if [ $(echo $diff_blobs|wc -w) -eq 1 ]; then \" +\n\t\t\t\t\t\"echo $diff_blobs | ~/lookup/showCnt blob 2> /dev/null; \" +\n\t\t\t\t# if a parent blob exists, find the diff, in order to search only the modified lines\n\t\t\t\t\"elif [ $(echo $diff_blobs|wc -w) -eq 2 ]; then \" +\n\t\t\t\t\t\"vars=( $diff_blobs );\" +\n\t\t\t\t\t# using bash instead of sh in order to use the process substitution,\n\t\t\t\t\t# to get the modified lines\n\t\t\t\t\t\"/bin/bash -c \\\"diff <(echo ${vars[0]} | ~/lookup/showCnt blob)\" +\n\t\t\t\t\t\t\t\t\" <(echo ${vars[1]} | ~/lookup/showCnt blob)\\\";\" +\n\t\t\t\t\"fi;\" +\n\t\t\t# grep the above practices and discard the lines that were deleted from the parent blob\n\t\t\t# (they start with \">\" in diff)\n\t\t\t\"done | egrep \\\"\" + \"|\".join(lang_features) + \"\\\" | grep -v '^>' | wc -l \")\n\t\tcount_uses = int(bash(query).strip())\n\t\tif count_uses > 0: # good practice feature is used\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\n\t\t\tf = open(\"lang_features.csv\", \"a\")\n\t\t\tprint 'lang_f'\n\t\t\tf.write(author + ', ' + 'LANG_F' + ', ' + str(time) + ', ' + main_proj + ', ' + str(count_uses) + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def parse(self, tokens, pred_tags):\n entities = []\n entity = None\n tag = ''\n for idx, st in enumerate(pred_tags):\n if entity is None:\n if st.startswith('B'):\n entity = {}\n entity['start'] = idx\n tag = st[2:]\n else:\n continue\n else:\n if st == 'O':\n entity['end'] = idx\n name = ''.join(tokens[entity['start']: entity['end']])\n entities.append((name, tag))\n entity = None\n tag = ''\n elif st.startswith('B'):\n entity['end'] = idx\n name = ''.join(tokens[entity['start']: entity['end']])\n entities.append((name, tag))\n entity = {}\n entity['start'] = idx\n tag = st[2:]\n else:\n continue\n return entities", "def parse(self, parser, tokens):\n self.parser = parser\n self.bits = tokens.split_contents()\n self.tagname = self.bits.pop(0)\n self.kwargs = {}\n self.blocks = {}\n self.arguments = self.options.get_arguments()\n self.current_argument = None\n self.todo = list(self.bits)\n for bit in self.bits:\n self.handle_bit(bit)\n self.finish()\n self.parse_blocks()\n return self.kwargs, self.blocks", "def on_commit_comment(self, payload):\n pass", "def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def process(self):\n\n form = cgi.FieldStorage()\n commit = self.read_commit(form)\n\n print(\"Content-Type: text/plain; charset='utf-8'\\r\")\n print(\"Cache-Control: max-age=60\\r\")\n if form.getfirst(\"download\", \"false\") == \"true\":\n print(\"Content-Disposition: attachment; filename=\\\"patch.txt\\\"\\r\")\n\n print(\"\\r\")\n\n print((\"#\" + json.dumps(PostsaiCommitViewer.format_commit_header(commit), default=convert_to_builtin_type)))\n sys.stdout.flush()\n PostsaiCommitViewer.dump_commit_diff(commit)", "def _parse_tags(self):\n tokens = self.tags_str[1:].split(\";\")\n self._tags = {\n k.strip(): v\n for token in tokens\n for k, v in [token.split(\"=\")]\n }", "def _validate_commits(pull_request):\n commits = github.get_commits(pull_request[\"commits_url\"])\n analyzed = []\n\n for commit_wrapper in commits:\n commit = {\n \"sha\": commit_wrapper[\"sha\"],\n \"message\": commit_wrapper[\"commit\"][\"message\"],\n }\n\n commit[\"standard\"] = _validate_title(commit[\"message\"])\n analyzed.append(commit)\n\n result = all(commit[\"standard\"] for commit in analyzed)\n return analyzed, result", "def parse_bang_command(self):\n valid_commands = {\n 'help': help.HelpPlugin,\n 'karma': karma.KarmaPlugin,\n 'karma_newest': karma.KarmaNewestPlugin,\n 'karma_top': karma.KarmaTopPlugin,\n 'karma_bottom': karma.KarmaBottomPlugin,\n 'roll': roll.RollPlugin,\n 'quest': quest.QuestPlugin,\n 'log': highlights.HighlightPlugin,\n 'attr': attribute.AttrPlugin,\n }\n\n evt_string = self.event['text']\n cmd_string = evt_string[1:]\n\n try:\n command, arg_string = cmd_string.split(' ', 1)\n except ValueError:\n command, arg_string = cmd_string, \"\"\n\n if command in self.valid_commands.keys():\n plugin = self.valid_commands[command](\n self.event,\n arg_string,\n )\n plugin.run()\n\n else:\n message = \"Sorry, '!{}' is not a valid command.\".format(command)\n self.bot.make_post(self.event, message)", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def _clean_commit(self, line):\n cleaned_line = {\n 'repo': line['origin'],\n 'hash': line['data_commit'],\n 'author': line['data_Author'],\n 'category': \"commit\",\n 'created_date': utils.str_to_dt_data(line['data_AuthorDate']),\n 'commit': line['data_Commit'],\n 'commit_date': utils.str_to_dt_data(line['data_CommitDate']),\n 'files_no': len(line['data_files']),\n 'refs': line['data_refs'],\n 'parents': line['data_parents'],\n 'files': line['data_files']\n }\n\n actions = 0\n for file in line['data_files']:\n if 'action' in file:\n actions += 1\n cleaned_line['files_action'] = actions\n\n try:\n non_merge = math.isnan(line['data_Merge'])\n\n except (TypeError, KeyError):\n non_merge = False\n\n cleaned_line['merge'] = not non_merge\n return cleaned_line", "def _get_postproc_token(self):\n if self.config[\"postprocessing\"] == \"gatk_post_bam\":\n do_realignment = self.config[\"gatk_post_bam\"][\"do_realignment\"]\n do_recalibration = self.config[\"gatk_post_bam\"][\"do_recalibration\"]\n else:\n do_realignment, do_recalibration = False, False\n realigned_infix = self.config[\"gatk_post_bam\"][\"realigned_infix\"]\n recalibrated_infix = self.config[\"gatk_post_bam\"][\"recalibrated_infix\"]\n return {\n (False, False): \"\",\n (False, True): \".\" + recalibrated_infix,\n (True, False): \".\" + realigned_infix,\n (True, True): \".\" + realigned_infix + \".\" + recalibrated_infix,\n }[(do_realignment, do_recalibration)]", "def parse_cmd(cmd):\n begin_pat = re.compile(r'BEGIN\\s*\\{(.+?)\\}\\s*;?', re.X | re.S)\n end_pat = re.compile(r'END\\s*\\{(.+?)\\}\\s*;?', re.X | re.S)\n normal_pat = re.compile(r'([^{]*)(\\{(.+?)\\})?\\s*;?', re.X | re.S)\n\n # get BEGIN part\n begin = u''\n m = begin_pat.search(cmd)\n if m:\n begin = m.group(1).strip()\n cmd = cmd.replace(m.group(0), u'')\n\n # get END part\n end = u''\n m = end_pat.search(cmd)\n if m:\n end = m.group(1).strip()\n cmd = cmd.replace(m.group(0), u'')\n\n # get NORMAL part\n normal = (u'', u'')\n m = normal_pat.search(cmd)\n if m:\n pattern = m.group(1) or u'' # get u'' if \\1 is None\n action = m.group(3) or u'' # get u'' if \\3 is None\n normal = (pattern.strip(), action.strip())\n\n return (begin, normal, end)", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def at_post_cmd(self):\n pass", "def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def process(self, tweet):\n\n #identify the applicable event keywords for this text\n text = self.cleanup_data(tweet.text)\n tokens = [str(t.lower()).translate(None, string.punctuation) for t in tweet.text.split()]\n applicable_tokens = []\n for phrase in self.match_event_tree.root.keywords:\n if phrase in \" \".join(tokens):\n applicable_tokens.append(phrase)\n\n self.match_event_tree.propogate_tweet(applicable_tokens, tweet)", "def parse_text(self, text):\n m = re.match(' *([^:]*)(?:[:] *([^ ]*))?', text)\n keyword = m.group(1)\n argument = m.group(2)\n if argument:\n parameters_count = 1\n arguments = [argument]\n else:\n parameters_count = 0\n arguments = []\n if keyword in self.commands[parameters_count]:\n try:\n self.commands[parameters_count][keyword]['callback'](*arguments)\n except:\n self.slack.reply(\n 'I crashed on your command:' + '\\n```\\n{}\\n```'.format(traceback.format_exc()),\n True)\n else:\n self.slack.reply((\"Unknown command. Say \\\"<@{}> help\\\" for \"+\n \"list of known commands\")\\\n .format(self.slack.my_username))", "def parse_post_values(self): \n self.parse_values(sys.stdin.read())", "def postcmd(self, stop, line):\n\n if line and shlex.split(line)[0] == 'commit':\n # for the moment, self.target is the indication of a successful creation\n if self.target:\n return True\n else:\n return False\n\n return AttributeEditor.postcmd(self, stop, line)", "def parse(self, text):\n chars = list(text)\n chars.reverse()\n result = self.parse_closure(chars)\n if chars:\n raise Exception(\"Extra | or } in the script. Remember, | cannot \"\n \"appear outside of a function call. You can escape it \"\n \"with a backslash if you need an actual | in the text.\")\n return result", "def _parse_commands(self, message):\n command, params = (None,None)\n\n if message.get('content',{}).get('@type','') == 'messageText':\n text = message.get('content',{}).get('text',{}).get('text','')\n entities = message.get('content',{}).get('text',{}).get('entities',[])\n \n for e in entities:\n if e.get('type',{}).get('@type','') == 'textEntityTypeBotCommand':\n offset = e.get('offset',0)\n start = offset + 1\n end = offset + e.get('length',0)\n command = text[start:end]\n # parse command params\n params = (text[end:]).split(' ')\n params = list(filter(bool,params))\n break\n return command, params", "def postparse_hook_emptystatement(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:\n self.called_postparsing += 1\n raise exceptions.EmptyStatement", "def parse_post_category(advertise: Dict[str, Any]) -> Optional[List[str]]:\n\n noise_terms: List[str] = [\"voltar\"]\n\n if \"post_category\" in advertise.keys():\n tmp: List[str] = re.split('[ \\t]{2,}', advertise[\"post_category\"])\n tmp = Parser.remove_noise_terms(tmp, noise_terms)\n return tmp", "def _parse_committee_links(self, links):\n committees = {}\n\n for link in links:\n url = link.attrs[\"href\"]\n id_ = re.sub(\"/index.shtml$\", \"\", url)\n id_ = id_[id_.rfind(\"/\") + 1 :]\n name, date = link.text.rsplit(\"(\", maxsplit=1)\n if not date.endswith(\"–)\"):\n raise StopIteration()\n date = date.rstrip(\"–)\")\n committees[id_] = {\"url\": url, \"name\": name.strip(), \"since\": date.strip()}\n\n return committees", "def refactor_post(self,post_name):\n for name in list(self.rules):\n related_post = \"{}.post.{}\".format(name,post_name)\n if related_post in self.rules:\n parts = [self.MakeSymbolName(x) for x in [post_name, related_post]]\n self.rules[name] = self.MakeChoice([self.MakeSeq(parts)])", "def register_post_parser(self, fct, cfg, ctx):\n self.post_parsers.append((fct, cfg, ctx))", "def extract_entities_from_dependency_parse(dtrees, postag):\n sents = []\n for x in range(0,len(dtrees)):\n tok_list = []\n for node_index in dtrees[x].nodes:\n if node_index != 0:\n node = dtrees[x].nodes[node_index]\n if node['ctag'] == postag:\n tok_list.append((node['word'],postag))\n else:\n tok_list.append((node['word'],'O'))\n sents.append(tok_list)\n return sents", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def getmetakeywords(allcontent, corpus):\n for i in range(0, len(allcontent)):\n words = re.split(\"[, ]+\", allcontent[i])\n if words[0] == \"Meta\":\n for j in range(3, len(words)):\n if len(processword(words[j])) > 0:\n corpus.append(processword(words[j]))", "def _pre_commit_has_hallmark(pre_commit_file):\n with open(pre_commit_file) as fh:\n script = fh.read()\n if u'from jig' in script or u'jig init' in script:\n return True\n return False", "def critic_parse(self, m):\n accept = self.config[\"mode\"] == 'accept'\n if m.group('ins_open'):\n return m.group('ins_text') if accept else ''\n elif m.group('del_open'):\n return '' if accept else m.group('del_text')\n elif m.group('mark_open'):\n return m.group('mark_text')\n elif m.group('com_open'):\n return ''\n elif m.group('sub_open'):\n return m.group('sub_ins_text') if accept else m.group('sub_del_text')", "def process_git_tag(regex, inputtag):\n\ttry: \n\t\tgitre = re.compile(regex)\n\t\tmatch = gitre.search(inputtag)\n\t\tgroups = match.groupdict()\n\t\tversion = groups.get('version', '.unknown')\n\t\tdate = groups.get('date', '')\n\t\tgitmeta = groups.get('gitmeta', '')\n\t\tif date:\n\t\t\tversion = '.'.join([version, ''.join(date.split('-'))])\n\texcept (AttributeError, EnvironmentError, OSError):\n\t\tversion, gitmeta = '.unknown', ''\n\n\treturn version, gitmeta", "def post_process(text):\n # XXX update to spit out HTML - no need for requests GDocs can take html\n verbose = False\n request_list = []\n chars = iter(text)\n normal_text = []\n knownsigils = {\"end\":('',\"NONE\"),\n \"^\": (\"0123456789+-\",\"SUPERSCRIPT\"),\n \"_\": (\"0123456789\",\"SUBSCRIPT\")\n }\n c = next(chars, \"end\")\n while (True):\n if (c in knownsigils.keys()):\n if len(normal_text): request_list.append((''.join(normal_text), \"NORMAL\"))\n normal_text.clear()\n (c,token) = _gettoken(c,chars,knownsigils)\n if (token is not None): request_list.append(token)\n if (c==\"end\"):\n break\n else:\n continue\n else:\n normal_text.append(c)\n c = next(chars, \"end\")\n return request_list", "def commit():\n query = {\"type\": \"commit\", \"cmd\": \"<commit></commit>\"}\n\n return _get_job_results(query)", "def _is_commit_sha(commit):\n return len(commit) == 40 and all([\n ch.isdigit() or (ch >= \"a\" and ch <= \"f\")\n for ch in commit.elems()\n ])", "def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result", "def parse_post_metadata(post_text):\n result = {}\n \n header_end = 0\n \n promed_date_match = re.search(\n r\"Published Date:\\s(?P<date>.*)\", post_text)\n result[\"promedDate\"] = parse_promed_pub_datetime(\n promed_date_match.group(\"date\"))\n \n archive_match = re.search(r\"Archive Number: (?P<num>.*)\", post_text)\n result[\"archiveNumber\"] = archive_match.group(\"num\")\n header_end = archive_match.end()\n \n subject = re.search(r\"Subject:\\s(?P<subject>.*)\", post_text).group(\"subject\")\n result[\"subject\"] = parse_subject_line(subject)\n result[\"subject\"][\"raw\"] = subject\n \n # This will not find all linked reports.\n # Some older posts refrence posts using different indexes I do not know\n # how to interpret.\n # Example: http://promedmail.org/direct.php?id=2194235\n result[\"linkedReports\"] = [\n report_id for report_id in re.findall(r\"\\d{8}\\.\\d+\", post_text)]\n \n # Most links will be article source urls or links to promed.\n result[\"links\"] = list(set(\n re.findall(r\"http\\S+[^(\\.\\])(\\.\\)>\\s]\", post_text)))\n result[\"links\"].sort()\n \n communicated_match = re.search(communicated_by_regex, post_text, re.M)\n if communicated_match:\n result[\"communicatedBy\"] = communicated_match.group(\"communicated_by\")\n return result, header_end", "def post_data_parser(post_data):\n post_data_json = {}\n for parameter in post_data.rsplit(\"&\"):\n post_data_json[parameter.rsplit(\"=\")[0]] = parameter.rsplit(\"=\")[1]\n return post_data_json", "def _do_commit(self):", "def test_parse_test(self, sha, data):\n self.instance = Commit(sha)\n self.instance.load()\n\n msg = 'Test failed for commit with sha %s' % sha\n\n self.assertEqual(data['author'], self.instance.author.name, msg)\n self.assertEqual(data['commit'], self.instance.commit.name, msg)\n\n self.assertEqual(dp.parse(data['authorDate']), self.instance.author_date, msg)\n self.assertEqual(dp.parse(data['commitDate']), self.instance.commit_date, msg)\n\n self.assertEqual(data['title'], self.instance.title, msg)\n self.assertEqual(data['msg'], self.instance.message, msg)\n\n self.assertEqual(data['parents'], list(map(lambda x: x.sha, self.instance.parents)), msg)\n\n self.assertEqual(data['numFiles'], len(self.instance.changes().data))", "def _hgcs(self, *commandlines):\n lastresult = None\n for cmd in commandlines:\n result = self.gc.handle_git_command(cmd)\n self.assertIsNone(lastresult)\n lastresult = result\n return lastresult", "def commit(self, _context: 'IconScoreContext', precommit_data: 'PrecommitData'):\n # Updated every block\n self.preps = precommit_data.preps\n\n # Updated every term\n if precommit_data.term is not None:\n self.term: 'Term' = precommit_data.term", "def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False", "def clean_for_commit(self):", "def check_composite_tokens(self, name, tokens):\n assert len(tokens) >= 2\n key = tokens[0]\n\n assert key.value.lower() == name\n assert tokens[-1].value.lower() == \"end\"\n\n if len(tokens) == 2:\n body = [] # empty TYPE..END block\n else:\n body = tokens[1:-1]\n\n body_tokens = []\n\n for t in body:\n if isinstance(t, dict):\n body_tokens.append(t[\"__tokens__\"])\n else:\n body_tokens.append(t)\n return key, body_tokens", "def parse_rule(text: str) -> Tuple[str, Dict[str, int]]:\n kind, contents_text = text.split(\" bags contain \")\n contents: Dict[str, int] = defaultdict(int)\n if contents_text != \"no other bags.\":\n for subbag_text in contents_text[:-1].split(\", \"):\n count, subkind = parse_bagexpr(subbag_text)\n contents[subkind] += count\n return kind, contents", "def analyze_args(self, args):\n args_obj = args.__dict__\n if args_obj.get('add'):\n action = 'bc'\n params = args_obj['add']\n\n elif args_obj.get('prime'):\n action = 'prime'\n params = args_obj['prime']\n else:\n print \"Invalid Arguments...\"\n sys.exit(0)\n\n params_check = self.validate_params(params)\n if params_check is True:\n parsed_args = dict(action=action,\n params=map(int, params))\n return parsed_args", "def prepare_for_commit(self):", "def keywords(self):\n return {\n \"unary\": {\n k: v[0] for k, v in self.unary_commands.items()\n },\n \"terminal\": {\n k: v[0] for k, v in self.terminal_commands.items()\n },\n \"binary\": {\n k: v[0] for k, v in self.binary_commands.items()\n },\n }", "def _parse_tags (tag, multi_kind_dataset ='bagoue'): \r\n tag = str(tag); t = tag.strip().split() \r\n \r\n if len(t) ==1 : \r\n if t[0].lower() not in _DTAGS: \r\n tag = multi_kind_dataset +' ' + t[0]\r\n \r\n warn(f\"Fetching {multi_kind_dataset.title()!r} data without\"\r\n \" explicitly prefixing the kind of data with the area\"\r\n \" name will raise an error. In future, the argument\"\r\n f\" should be '{tag}' instead.\", FutureWarning \r\n )\r\n elif len(t) >1 : \r\n # only the multi kind dataset is allowed \r\n # to contain two words for fetching data \r\n if t[0].lower() !=multi_kind_dataset: \r\n tag = t[0].lower() # skip the second word \r\n return tag", "def check_msg_release_on_commit(self, broker, ftd_msgs):\n hits = self._get_hits(broker, re.compile(\"debug Message id=\\\"[0-9a-f-]{36}\\\"; pid=0x[0-9a-f]+: \"\n \"Content released on commit$\", re.MULTILINE))\n self._reconsile_hits(broker, ftd_msgs, hits)", "def load_commits(commit_list):\n commit_insert = \"INSERT OR REPLACE INTO github_commit VALUES \" \\\n \"(?, ? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,?)\"\n dbutils.load_list(commit_insert, commit_list, DATABASE_FILE)", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def test_parse_quotes(self):\n quote = api.parse_quote(\" This is a quote. | Author | Publication | tag1, tag2 , tag3 \",\n simple_format=False)\n self.assertEqual(\"This is a quote.\", quote.quote)\n self.assertEqual(\"Author\", quote.author)\n self.assertEqual(\"Publication\", quote.publication)\n self.assertEqual(3, len(quote.tags))", "def audit_names_in_metadata(self):\n\n # Iterate over commits....\n for commit in self.repository.commits.values():\n for name in [ commit.committer_name, commit.author_name ]:\n # Is the name whitelisted?\n if name in self.FullNameWhitelist:\n continue\n\n # As a special case, allow the name 'GitHub' for certain repositories\n if name == 'GitHub' and self.repository.path in self.GitHubPRWhitelist:\n self.__log_warning(commit.sha1, \"Commit has username 'GitHub' (web merge of PR); allowing anyway\")\n continue\n\n # Check to see if the name contains spaces - if not - it is probably misconfigured....\n if \" \" not in name.strip():\n self.__log_failure(commit.sha1, \"Non-full name: \" + name)\n continue", "def _filter_post(post):\n\n return True", "def do_post_parse_json(self, *args, **kwargs): # real signature unknown\n pass", "def parse_post_text(formatted_content):\n post = {}\n # Parse Mod comments and remove them from the text.\n potential_comments = re.finditer(\"\\[.+?\\]\", formatted_content, re.DOTALL)\n comments = []\n for comment_match in potential_comments:\n comment = comment_match.group()\n mod = re.search(r\"\\-\\s?Mod\\.\\s?(?P<mod>\\w+\\b)\", comment)\n if mod:\n comments.append({\n \"comment\" : comment,\n \"mod\" : mod.group(\"mod\")\n })\n post[\"modComments\"] = comments\n \n # Comments are removed from the post test so that\n # links, reports, etc. mentioned by mods are not extracted.\n no_comment_txt = formatted_content\n for comment in comments:\n no_comment_txt = no_comment_txt.replace(comment[\"comment\"], \"\")\n \n metadata, header_end = parse_post_metadata(no_comment_txt)\n post.update(metadata)\n \n sections = re.split(r\"^[\\*#]{3,}\\s*$\", no_comment_txt[header_end:], flags=re.M)\n articles = []\n \n # Some posts have articles which are parsed into multiple sections:\n # Ex: http://www.promedmail.org/direct.php?id=2194235\n # The section parsing code tries to recombine these by concatenating\n # unrecognized sections onto the previous sections if they form an article.\n # article_start_idx keeps track of the first section in the article.\n article_start_idx = None\n \n for idx, section in enumerate(sections):\n section = section.strip()\n article = parse_article_text(section, post_date=post['promedDate'])\n # Check if the section contains an actual article by seeing which\n # properties could be parsed.\n if article.get('source') or article.get('date'):\n articles.append(article)\n article_start_idx = idx\n else:\n # When a section cannot be parsed as an article the following code\n # tries to determine what it is. If the type cannot be determined\n # an error or warning is thrown.\n # These warnings can be used to find sections which are not being\n # correctly parsed.\n # Posts with known issues:\n # http://www.promedmail.org/direct.php?id=19990512.0773\n if re.search(r\"Visit ProMED-mail\\'s web site at|\"\n r\"Please support (the \\d{4}\\s)?ProMED\\-mail|\"\n r\"Donate to ProMED\\-mail. Details available at|\"\n r\"ProMED\\-mail makes every effort to verify the reports|\"\n r\"PROMED\\-MAIL FREQUENTLY ASKED QUESTIONS|\"\n r\"Become a ProMED\\-mail Premium Subscriber|\"\n r\"A ProMED\\-mail post\",\n section, re.I):\n # boilerplate promed notice section\n pass\n elif re.search(r\"In this (update|post(ing)?)\", section):\n # table of contents section\n pass\n elif re.search(r\"Cases in various countries\", section):\n # This type of post typically has links to several articles\n # with single sentence summaries.\n # Ex: http://www.promedmail.org/direct.php?id=20131125.2073661\n pass\n elif section == \"\":\n # empty section\n pass\n elif idx == 0 and section.count(\"\\n\") < 2:\n # probably the article title\n pass\n else:\n if article_start_idx != None:\n article = parse_article_text(\n \"\\n#####\\n\".join(\n sections[article_start_idx:idx]).strip(),\n post_date=post['promedDate'])\n assert article.get('source') or article.get('date')\n articles[-1] = article\n continue\n else:\n print \"Unexpected Section (%s):\" % post['archiveNumber'], [section[0:50] + \"...\"]\n article_start_idx = None\n post['articles'] = articles\n return post", "def parse(raw_query, EXPAND_SET = False):\r\n\t\r\n\t# tokenize and tag the query using nltk tools, use .lower() to standardize the input\r\n\ttokenized_query = nltk.word_tokenize(raw_query.lower())\r\n\ttagged_query = nltk.pos_tag(tokenized_query)\r\n\t\r\n\t#master_chunk = r\"Chunk: {(<VB\\w?>|<JJ>*|<RB\\w?>)<DT>?(<NN\\w?>+)}\" \r\n\t\r\n\t\r\n\t# master_chunk now captures prepositional phrase, as they are typically part of one thought.\r\n\t\r\n\tmaster_chunk = r\"Chunk: {((<JJ\\w?>*|<RB\\w?>*)<DT>?(<NN\\w?>+))(<IN>((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+)))*}\" # Regex to identify chunks that may be useful \r\n\t#\t\t\t\t\tmaster_chunk breakdown\r\n\t#\r\n\t#\tFirst half : ((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+))\r\n\t#\t<JJ\\w?>* | <RB\\w>?>* allows an arbitrary number of adjectives to precede the noun\r\n\t# \t\"\\w\" is \"any character\" and allows the capture of all JJ and RB tags, which include JJ, JJR, JJS, RB, RBR, and RBS\r\n\t#\t<DT>? allows for exactly one (1) or zero (0) determiner, often this will capture things like \"no\" and then a noun\r\n\t# \t(<NN\\w>+) captures one (1) or arbitrarily more nouns\r\n\t#\t\r\n\t#\tSecond half: (<IN>((<JJ>*|<RB\\w?>*)<DT>?(<NN\\w?>+)))*\r\n\t#\t<IN> captures prepostions \"of\", \"with\", and so on.\r\n\t# \tThe rest of the expression is the same as the first half \r\n\t# \tThe final * (kleene star) allows zero (0) or more prepositional phrases to be captured\r\n\t\r\n\t\r\n\tmaster_parser = nltk.RegexpParser(master_chunk) # Create the parser from the Regex\r\n\tmaster = master_parser.parse(tagged_query) # Parse the query previously tagged\r\n\t\r\n\tchunk_list = []\r\n\tkeywords = []\r\n\tfor phrase in master:\r\n\t\tif (not isinstance(phrase, tuple)): # all non-chunks are tuples, a chunk is a nltk.tree type\r\n\t\t\tchunk_list.append(phrase)\r\n\t\t\ttmp = \"\"\r\n\t\t\tfor word in phrase: # generate keyword phrases from the chunks\r\n\t\t\t\ttmp += word[0] + \" \"\r\n\t\t\t\r\n\t\t\ttmp = tmp[:-1] # Remove final space\r\n\t\t\tkeywords.append(tmp)\r\n\t\t\t\r\n\tif EXPAND_SET: # defualt is not to expand\r\n\t\t# combine the two lists, using set() to remove any repeated phrases\r\n\t\treturn list(set(generate_keywords(chunk_list) + keywords))\r\n\telse:\r\n\t\treturn keywords", "def process_special_sign(self):\r\n # 首先把全部是英文的句子找出来,没有特殊符号,没有其他东西,只有字母和数字。\r\n # 思路大概是用正则表达式确定结尾,用函数判断中间全部都是英文的句子,不允许特殊符号。\r\n # 用上面的check_sents函数,解决这个问题。\r\n all_sents = list()\r\n for i in self.set_of_result[0][\"question_text\"]:\r\n if DataCleanCheckTool.check_sents(i):\r\n all_sents.append(i)\r\n\r\n # 有些特殊情况的数据,直接抛弃掉,数量不大\r\n # 然后有一些描述词性特殊的单词的其实没有意义,直接抛掉\r\n # 还有一些带括号的,那些需要把括号中的内容抛掉\r\n # 但是因为用的是pop,每次pop之后index都变化,所以会跳着pop,因此在数据量大的情况下需要重复执行\r\n for k, v in enumerate(all_sents):\r\n if \". . .\" in v:\r\n all_sents.pop(k)\r\n elif \"...\" in v:\r\n all_sents.pop(k)\r\n elif \"adj.\" in v:\r\n all_sents.pop(k)\r\n elif \"adv.\" in v:\r\n all_sents.pop(k)\r\n elif \"n.\" in v:\r\n all_sents.pop(k)\r\n elif \"v.\" in v:\r\n all_sents.pop(k)\r\n elif \"prep.\" in v:\r\n all_sents.pop(k)\r\n elif \"sth.\" in v:\r\n all_sents.pop(k)\r\n elif \"sb.\" in v:\r\n all_sents.pop(k)\r\n\r\n # 小写开头的都可以全部抛弃掉了,不是完整的真正的句子,只是一段不完整的话。\r\n pattern = re.compile(\"^[a-z].+\")\r\n for k, v in enumerate(all_sents):\r\n try:\r\n pattern.search(v).group()\r\n all_sents.pop(k)\r\n except Exception as e:\r\n logging.exception(e)\r\n\r\n return all_sents", "def flag_all_commit(self):\n\t\tfor k in self.data.keys():\n\t\t\tindex = 0\n\t\t\tfor item in self[k]:\n\t\t\t\tself.data[k][index]['meta']['needs_commit'] = True\n\t\t\t\tindex += 1", "def postprocess(self, target):\n if self.params.get('commit'):\n commit()\n\n return target", "def parse(text):\n # Remove hashtag and trailing whitespaces\n clean_text = reHASH.sub(\"\", text).strip()\n # We discard those entries with URLs inside\n if reURL.search(clean_text):\n raise\n # Also discard those that contain @user forms\n if '@' in clean_text:\n raise\n parts = clean_text.split(':')\n if len(parts) < 2:\n raise\n term = parts[0].strip().lower()\n meaning = \":\".join(parts[1:]).strip()\n return term, meaning", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def check_commit(self, commit):\n # pylint: disable=too-many-branches\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.debug('check_commit() Checking mark={} sha1={} file-ct={} -- {}'\n .format( commit['mark']\n , p4gf_util.abbrev(commit['sha1'])\n , len(commit['files'])\n , repr(commit['data'])[:20].splitlines()[0]))\n\n if not commit['author_p4user']:\n raise PreflightException(_(\"User '{user}' not permitted to commit\")\n .format(user=commit['author']['email'].strip('<>')))\n\n if 'merge' in commit:\n ref_is_review = (self.gsreview_coll and\n self.gsreview_coll.ref_in_review_list(self._current_prt.ref))\n if not ref_is_review and not self.ctx.merge_commits:\n raise PreflightException(_('Merge commits are not enabled for this repo.'))\n if (not ref_is_review and\n not self.ctx.branch_creation and self.assigner.have_anonymous_branches):\n msg = _('Git branch creation is prohibited for this repo.')\n p4_branch_names_non_lw = [b.git_branch_name for b in self.ctx.branch_dict().values()\n if b.git_branch_name and not b.is_lightweight]\n if len(p4_branch_names_non_lw) > 1:\n msg += _('\\nThis repo has more than one named branch.'\n '\\nTry altering the push order - '\n 'pushing branches with merge ancestors first.')\n raise PreflightException(msg)\n if LOG.isEnabledFor(logging.DEBUG):\n for parent_mark in commit['merge']:\n parent_sha1 = self.fast_export_marks.get_commit(parent_mark)[:7]\n LOG.debug(\"check_commit() merge mark={} sha1={}\"\n .format(parent_mark, parent_sha1))\n\n if not self.ctx.submodules and 'files' in commit:\n for f in commit['files']:\n if f.get('mode') == '160000':\n if 'first_commit' in commit and not self._path_added(f.get('path'), commit):\n LOG.debug2('check_commit() passed {} in {}'.format(\n f.get('path'), p4gf_util.abbrev(commit['sha1'])))\n continue\n raise PreflightException(\n _('Git submodules not permitted: path={path} commit={commit_sha1}')\n .format(path=f.get('path'), commit_sha1=p4gf_util.abbrev(commit['sha1'])))\n\n for f in commit['files']:\n LOG.debug3(\"check_commit : commit files: \" + _log_fe_file(f))\n err = check_valid_filename(f['path'], self.ctx)\n if err:\n raise PreflightException(err)\n if self.ctx.is_lfs_enabled:\n self._check_lfs(commit, f)\n\n # Warn user about any jobs that appear to not exist\n jobs = G2PJob.lookup_jobs(self.ctx, G2PJob.extract_jobs(commit['data']))\n if jobs:\n for job_id in jobs:\n r = self.ctx.p4run('jobs', '-e', 'job={}'.format(job_id))\n if not r:\n _print_error(_(\"Job '{job_id}' doesn't exist\").format(job_id=job_id))\n # Create pending changes for any Git-Swarm reviews", "def extract_commands(self):\n # import pdb; pdb.set_trace()\n left_i = 0\n right_i = 1\n commands = {}\n cmd = self.cmd\n\n if not cmd:\n return\n while left_i < len(cmd):\n sub_cmd = cmd[left_i:right_i]\n if sub_cmd in self.action_list:\n arg_len, arguments = self.extract_command_arguments(right_i)\n commands[sub_cmd] = arguments\n left_i = right_i + arg_len\n right_i = left_i + 1\n else:\n left_i, right_i = self.update_i(left_i, right_i)\n return commands", "def _parse(cls, tokens, *, get_params=False):\n\n\t\tif get_params:\n\t\t\tresult = []\n\t\telse:\n\t\t\tresult = None\n\n\t\tfor t in tokens:\n\t\t\tnew = None\n\t\t\tdone = False\n\n\t\t\tif t.kind == 'OPEN':\n\t\t\t\tnew = cls._parse(tokens)\n\t\t\telif t.kind in {'CLOSE', 'DOT'}:\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'LAMBDA':\n\t\t\t\tparams = cls._parse(tokens, get_params=True)\n\n\t\t\t\tif not params:\n\t\t\t\t\traise LambdaError('No parameters in lambda', t.line, t.pos)\n\n\t\t\t\tbody = cls._parse(tokens)\n\n\t\t\t\tif not body:\n\t\t\t\t\traise LambdaError('No body in lambda', t.line, t.pos)\n\n\t\t\t\tnew = Abs(params[-1], body, line=t.line, pos=t.pos)\n\n\t\t\t\tfor param in params[-2::-1]:\n\t\t\t\t\tnew = Abs(param, new, line=t.line, pos=t.pos)\n\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'EQUAL':\n\t\t\t\tvar = cls._parse(tokens)\n\n\t\t\t\tif not var:\n\t\t\t\t\traise LambdaError('No variable to assign to', t.line, t.pos)\n\n\t\t\t\tvalue = cls._parse(tokens)\n\n\t\t\t\tif not value:\n\t\t\t\t\traise LambdaError('No value to assign: ' + var.name, t.line, t.pos)\n\n\t\t\t\tnew = Ass(var, value, line=t.line, pos=t.pos)\n\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'QUERY':\n\t\t\t\tvalue = cls._parse(tokens)\n\n\t\t\t\tif not value:\n\t\t\t\t\traise LambdaError('No value to query', t.line, t.pos)\n\n\t\t\t\tnew = Que(value, line=t.line, pos=t.pos)\n\n\t\t\t\tdone = True\n\t\t\telif t.kind == 'SYMBOL':\n\t\t\t\tnew = Var(t.value, line=t.line, pos=t.pos)\n\n\t\t\tif new is not None:\n\t\t\t\tif get_params:\n\t\t\t\t\tresult.append(new)\n\t\t\t\telif result is None:\n\t\t\t\t\tresult = new\n\t\t\t\telse:\n\t\t\t\t\t# Ensure that when the function and argument are output,\n\t\t\t\t\t# they are correctly parenthesized.\n\t\t\t\t\tif isinstance(result, (Abs, Ass, Que)):\n\t\t\t\t\t\tresult.surround_on_str = True\n\n\t\t\t\t\tif isinstance(new, App):\n\t\t\t\t\t\tnew.surround_on_str = True\n\n\t\t\t\t\tresult = App(result, new, line=new.line, pos=new.pos)\n\n\t\t\tif done:\n\t\t\t\tbreak\n\n\t\treturn result", "def get_extra_keywords(self, code_root, repository, dataset, work_dir,\n flavour, extra_env):\n return({})", "def FakeCommitAsDict(commit_self):\n git_hash = commit_self.git_hash\n n = git_hash[len('git_hash_'):]\n return {\n 'repository': 'chromium',\n 'git_hash': git_hash,\n 'url': 'https://example.com/repository/+/' + git_hash,\n 'author': 'author%[email protected]' % (n,),\n 'subject': 'Subject.',\n 'message': 'Subject.\\n\\nCommit message.',\n }", "def _binary_command_regexes(self):\n patterns = {}\n for intent, keys in self.keywords.get(\"binary\").items():\n if keys:\n patterns[intent] = re.compile(r'\\b' + r'\\b|\\b'.join(keys) + r'\\b')\n return patterns", "def do_post_action_processing(self, i_state, low_level_actions):\n pass", "def testApplyVisitor(self):\n\t\tfor key in inputparse.keys():\n\t\t\tinparse = inputparse[key]\n\t\t\texpected = expectedoutput[key]\n\t\t\tself.subtest = key\n\t\t\tself.assertEqual(cmakemodifier.apply_all_cleanup_visitors(inparse), expected)", "def parse_unknown_args(args):\n retval = {}\n preceded_by_key = False\n for arg in args:\n if arg.startswith('--'):\n if '=' in arg:\n key = arg.split('=')[0][2:]\n value = arg.split('=')[1]\n retval[key] = value\n else:\n key = arg[2:]\n preceded_by_key = True\n elif preceded_by_key:\n retval[key] = arg\n preceded_by_key = False\n\n return retval", "def process(self):\n assert self.valid, 'cannot apply invalid op'\n from hive.indexer.cached_post import CachedPost\n\n action = self.action\n params = dict(\n date=self.date,\n community=self.community,\n community_id=self.community_id,\n actor=self.actor,\n actor_id=self.actor_id,\n account=self.account,\n account_id=self.account_id,\n post_id=self.post_id,\n role_id=self.role_id,\n notes=self.notes,\n title=self.title,\n )\n\n # Community-level commands\n if action == 'updateProps':\n bind = ', '.join([k+\" = :\"+k for k in list(self.props.keys())])\n DB.query(\"UPDATE hive_communities SET %s WHERE id = :id\" % bind,\n id=self.community_id, **self.props)\n self._notify('set_props', payload=json.dumps(read_key_dict(self.op, 'props')))\n\n elif action == 'subscribe':\n DB.query(\"\"\"INSERT INTO hive_subscriptions\n (account_id, community_id, created_at)\n VALUES (:actor_id, :community_id, :date)\"\"\", **params)\n DB.query(\"\"\"UPDATE hive_communities\n SET subscribers = subscribers + 1\n WHERE id = :community_id\"\"\", **params)\n self._notify('subscribe')\n elif action == 'unsubscribe':\n DB.query(\"\"\"DELETE FROM hive_subscriptions\n WHERE account_id = :actor_id\n AND community_id = :community_id\"\"\", **params)\n DB.query(\"\"\"UPDATE hive_communities\n SET subscribers = subscribers - 1\n WHERE id = :community_id\"\"\", **params)\n\n # Account-level actions\n elif action == 'setRole':\n DB.query(\"\"\"INSERT INTO hive_roles\n (account_id, community_id, role_id, created_at)\n VALUES (:account_id, :community_id, :role_id, :date)\n ON CONFLICT (account_id, community_id)\n DO UPDATE SET role_id = :role_id\"\"\", **params)\n self._notify('set_role', payload=Role(self.role_id).name)\n elif action == 'setUserTitle':\n DB.query(\"\"\"INSERT INTO hive_roles\n (account_id, community_id, title, created_at)\n VALUES (:account_id, :community_id, :title, :date)\n ON CONFLICT (account_id, community_id)\n DO UPDATE SET title = :title\"\"\", **params)\n self._notify('set_label', payload=self.title)\n\n # Post-level actions\n elif action == 'mutePost':\n DB.query(\"\"\"UPDATE hive_posts SET is_muted = '1'\n WHERE id = :post_id\"\"\", **params)\n self._notify('mute_post', payload=self.notes)\n if not DbState.is_initial_sync():\n CachedPost.update(self.account, self.permlink, self.post_id)\n\n elif action == 'unmutePost':\n DB.query(\"\"\"UPDATE hive_posts SET is_muted = '0'\n WHERE id = :post_id\"\"\", **params)\n self._notify('unmute_post', payload=self.notes)\n if not DbState.is_initial_sync():\n CachedPost.update(self.account, self.permlink, self.post_id)\n\n elif action == 'pinPost':\n DB.query(\"\"\"UPDATE hive_posts SET is_pinned = '1'\n WHERE id = :post_id\"\"\", **params)\n self._notify('pin_post', payload=self.notes)\n elif action == 'unpinPost':\n DB.query(\"\"\"UPDATE hive_posts SET is_pinned = '0'\n WHERE id = :post_id\"\"\", **params)\n self._notify('unpin_post', payload=self.notes)\n elif action == 'flagPost':\n self._notify('flag_post', payload=self.notes)\n\n return True", "def test_backup_logs_for_keywords(self):\n # Populate the default bucket on self.master with documents\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n\n # Create backup archive and repository.\n self.backup_create()\n\n # Perform backup.\n self.backup_cluster()\n\n # Keywords to fail on (Keyword: str, at_start: bool, lines_before: int, lines_after: int)\n bad_keywords = [\n (\"cbbackupmgr version Unknown\", False, 0, 0), # Checks cbbackupmgr build version/hash set correctly at build time\n ( \"panic\", True, 0, 12) # Checks for the panic keyword at start of sentence\n ]\n\n # Scan logs for keywords in bad_keywords\n for keyword, at_start, lines_before, lines_after in bad_keywords:\n\n found, output, error = \\\n self._check_output_in_backup_logs(keyword, at_start = at_start, lines_before = lines_before, lines_after = lines_after)\n\n if found:\n self.fail(f\"Found bad keyword(s) '{keyword}' in backup logs:\\n\" + \"\\n\".join(output))", "def token_kwargs(bits, parser):\r\n if not bits:\r\n return {}\r\n kwargs = SortedDict()\r\n while bits:\r\n match = kwarg_re.match(bits[0])\r\n if not match or not match.group(1):\r\n return kwargs\r\n key, value = match.groups()\r\n del bits[:1]\r\n kwargs[parser.compile_filter(key)] = parser.compile_filter(value)\r\n return kwargs", "def valid_retag_params(self) -> bool:\n if not (self.action[0] == Actions.RETAG.value):\n return False\n pairs = self.action[1].split(\",\")\n for pair in pairs:\n if not self.correct_retag_pair(pair):\n return False\n return True", "def parse_pkgsubmit(self):\n parser = pkgsubmitParser()\n with self.opener.open(PKGSUBMIT_URL) as f:\n parser.feed(f.read().decode())\n if parser.token:\n self.token = parser.token\n self.categories = parser.categories", "def postparse_hook_too_many_parameters(self, data1, data2) -> cmd2.plugin.PostparsingData:\n pass", "def parse_body(body):\n for line in body.lower().split(\"\\n\"):\n words = line.split()\n try:\n idx = words.index(\"re-run\")\n except ValueError:\n continue\n if words[idx + 1] == \"full\":\n yield words[idx : idx + 3]\n else:\n yield words[idx : idx + 2]", "def parse(self, commands):\n raise NotImplementedError()", "def parse(args, query):\n\n global query_type\n\n # Deal first with requests for definition or pronunciation\n # 1. Make the code easier to read\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n fourth_word = args[3] if len(args) > 3 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # 2. Check for keywords in the list of arguments\n # Example: nostrum defined\n # Example: pronunciation of otolaryngology\n if first_word == \"define\":\n # e.g. if the first word is \"define\" we'll add the second word to the query\n query = {\"sp\": second_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the query is a dictionary of GET parameters for the http request, eg\n # https://api.datamuse.com/words?max=1&sp=SECOND_WORD_HERE&qe=sp&md=d&ipa=1\n elif second_word == \"defined\" or second_word == \"definition\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses string interpolation (the f\"\" stuff)\n elif f\"{second_word} {third_word}\" == \"means what\":\n query = {\"sp\": first_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n elif f\"{second_word} {third_word} {fourth_word}\" == \"is said how\":\n query = {\"sp\": first_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # this one uses regular expressions -- i.e. if the second_word is \"of\" or \"for\"\n elif first_word == \"definition\" and re.match(r'(of)|(for)',second_word):\n query = {\"sp\": third_word, \"md\": \"d\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the is_pronounced function returns true if first_word is a (mis)spelling of pronounced\n elif re.match(r'(of)|(for)',second_word) and is_pronounced(first_word):\n query = {\"sp\": third_word, \"md\": \"r\", \"max\": \"1\", \"qe\": \"sp\", \"ipa\": \"1\"}\n # the ordering in the above list is not entirely random\n # since an if-elif-else statement won't keep evaluating after it finds a match\n # it makes sense to put the most computationally complex clauses at the end\n # >>> import timeit\n # >>> timeit.timeit('from word_helpers import is_pronounced; is_pronounced(\"pronounced\")', number=10000)\n # 0.022870146989589557\n # >>> timeit.timeit('args = [\"defined\"]; args[0] == \"defined\"', number=10000)\n # 0.002359684993280098\n # it takes 2 milliseconds to compare a string in a list 10,000 times\n # -- versus 2 centiseconds to run is_pronounced 10,000 times\n # (on my Intel Core i5 2.67GHz CPU -- obviously speed depends on the processor)\n # it's also worth noting that readability counts more than speed optimization (most of the time!)\n\n # Quick way to check if any of the above if statements matched\n if \"sp\" in query:\n # if so, we are done in this function\n if query[\"md\"] == \"r\": query_type = \"PRO\"\n if query[\"md\"] == \"d\": query_type = \"DEF\"\n return query\n\n # these will be useful later\n STOP_WORDS = (\"and\", \"meaning\", \"means\", \"max\", \"about\", \"which\", \"that\")\n\n # Parse more complicated requests for synonyms, etc\n # 0 is false in python, so this loop will run until we've removed all the args\n while len(args):\n # we must reset these vars each time the loop starts\n # in case we've deleted items from the args list\n first_word = args[0]\n second_word = args[1] if len(args) > 1 else \"\"\n third_word = args[2] if len(args) > 2 else \"\"\n # we use the teranary operator (this if ____ else that) to avoid an IndexError\n # IndexError would be raised if we tried to access the second element (args[1])\n # in a list which contained only one item (eg args == [\"lonely\"])\n # the teranary operator (in most languages it looks like \"____ ? this : that\")\n # returns \"this\" when the if is true and \"that\" when the if is false\n # meaning, if len(args) is NOT greater than 1, second_word == \"\"\n\n # Disambiguate homonym requests from spelling correction requests\n # Example: sounding like tung\n # Example: sounds like doe but spelled differently\n if re.match(r'sound((s)|(ing)) like',f\"{first_word} {second_word}\"):\n\n # again, use len(args) to avoid an IndexError\n if len(args) >= 6 and \\\n re.match(r'((but)|(except)) spelled different(ly)?',f\"{args[3]} {args[4]} {args[5]}\"):\n # but instead of teranary operator,\n # use \"short circuit logic\" -- when python sees \"if __A__ and __B__ \",\n # it knows that if A is false, the whole thing will be false\n # (you can't have \"ice cream and potatoes\" for dinner if you don't have ice cream)\n # and it won't waste time evaluating B, so re.match won't run and args[4]\n # won't be accessed and no IndexError will be raised, yay!\n # regex explained: ? means the prior thing matched zero or one times\n # different(ly)? matches \"different\" and \"differently\"\n query[\"rel_hom\"] = third_word\n # now, delete 6 items from args, starting at item 0\n del args[0:6]\n else:\n query[\"sl\"] = third_word\n del args[0:3]\n\n # Example: spelled like 'cens?r'\n elif re.match(r'spell((ed)|(ing)) like',f\"{first_word} {second_word}\"):\n # two stars (**) means \"unpack\" a dictionary\n # just like unpacking a suitcase, we've dumped the old contents of query\n # into a new dictionary (which we are saving with the same variable name!)\n query = {**query,\"sp\": third_word}\n # query[\"sp\"] = third_word also works fine\n # just showing off how to combine two dictionaries :)\n del args[0:3]\n\n # Example: rhymes with culminate\n elif len(args) > 2 and second_word == \"with\" and is_rhymes(first_word):\n query[\"rel_rhy\"] = third_word\n del args[0:3]\n\n # Example: almost rhymes with culminate\n elif len(args) > 3 and \\\n f\"{first_word} {third_word}\" == \"almost with\" and \\\n is_rhymes(second_word):\n query[\"rel_nry\"] = args[3] # fourth_word\n del args[0:4]\n\n # Example: comes after sea\n elif f\"{first_word} {second_word}\" == \"comes after\":\n query[\"lc\"] = third_word\n del args[0:3]\n elif first_word == \"follows\":\n query[\"lc\"] = second_word\n del args[0:2]\n elif f\"{first_word} {second_word}\" == \"comes before\":\n query[\"rc\"] = third_word\n del args[0:3]\n elif first_word == \"preceeds\":\n query[\"rc\"] = second_word\n del args[0:2]\n\n # Example: describes paint\n elif first_word == \"describes\":\n query[\"rel_jjb\"] = second_word\n del args[0:2]\n\n # Example: associated with feet\n elif f\"{first_word} {second_word}\" == \"associated with\" or \\\n f\"{first_word} {second_word}\" == \"triggered by\":\n query[\"rel_trg\"] = third_word\n del args[0:3]\n\n # Example: meaning feeling tired\n elif first_word in [\"means\",\"meaning\",\"like\"]:\n # get rid of first_word\n del args[0]\n # now short circuit logic again, plus using the tuple from ealier\n # b/c if we have \"meaning deer and sounds like roe\" we don't want\n # query[\"ml\"] == \"deer and sounds like roe\" -- it should be just \"deer\"\n while len(args) and args[0] not in STOP_WORDS:\n # teranary operator prevents KeyError if \"ml\" not already in query dictionary\n query[\"ml\"] = f\"{query['ml']} {args[0]}\" if \"ml\" in query else args[0]\n del args[0]\n # an example with the previous code to make things clearer\n # say args == [\"means\", \"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # first_word IS in [\"means\",\"meaning\",\"like\"]\n # del first_word, args is now [\"egg\", \"beater\", \"and\", \"max\", \"35\"]\n # len(args) == 5, args[0] is NOT in STOP_WORDS\n # \"ml\" is NOT in query, so teranary returns args[0] (\"egg\")\n # args[0] is copied to query[\"ml\"] (query is now {ml: \"egg\"})\n # del args[0], args is now [\"beater\", \"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 4, args[0] is NOT in STOP_WORDS\n # \"ml\" IS in query, so teranary returns f\"{query['ml']} {args[0]}\" (\"egg beater\") \n # f\"{query['ml']} {args[0]}\" is copied to query[\"ml\"]\n # (query is now {ml: \"egg beater\"})\n # del args[0], args is now [\"and\", \"max\", \"35\"]\n # return to top of while loop, len(args) == 3,\n # args[0] IS in STOP_WORDS (args[0] == \"and\")\n # DO NOT enter the while loop, continue past this code block\n\n # Discover the topic of our query\n elif first_word == \"about\":\n del args[0]\n count = 0\n # Datamuse allows a max of five topic words\n while len(args) and args[0] not in STOP_WORDS and count <= 5:\n query[\"topics\"] = f\"{query['topics']} {args[0]}\" if \"topics\" in query else args[0]\n del args[0]\n # count += 1 is the same as count = count + 1\n count += 1\n\n # How many results to return (max 1000)\n elif first_word in [\"max\", \"maximum\", \"only\"]:\n user_max = convert_num(second_word)\n if user_max and int(user_max) <= 1000:\n query[\"max\"] = user_max\n del args[0:2]\n\n # Remove filler words if they weren't parsed out above\n elif first_word in [\"that\",\"which\",\"and\",\"like\",\"is\"]:\n del args[0]\n\n # Add anything not otherwise parsable to the ml parameter\n else:\n query[\"ml\"] = f\"{query['ml']} {first_word}\" if \"ml\" in query else first_word\n del args[0]\n\n # this is the bottom of that massive while loop\n # if args is not empty by now, we'll start over from the top ^\n\n return query\n # and this is the end of the \"def parse(args, query)\" function\n # whew!", "def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]" ]
[ "0.599186", "0.520402", "0.5114426", "0.5038884", "0.5008906", "0.49850717", "0.49810693", "0.48139718", "0.47817907", "0.46998245", "0.4676912", "0.46327248", "0.46078002", "0.46047723", "0.4595017", "0.45620742", "0.45620742", "0.45582268", "0.45472842", "0.45239753", "0.45174563", "0.44828975", "0.44813994", "0.44729525", "0.44670293", "0.44607785", "0.44483322", "0.44404364", "0.44357005", "0.44285548", "0.44157493", "0.44015017", "0.44010508", "0.43980965", "0.43952322", "0.43833455", "0.4379336", "0.43713665", "0.43682972", "0.43570825", "0.43478358", "0.43463805", "0.43416977", "0.433929", "0.4330289", "0.4324915", "0.4319049", "0.43170616", "0.43092483", "0.43046793", "0.42996708", "0.42929474", "0.42906192", "0.4287134", "0.42866504", "0.42854813", "0.4284755", "0.4284172", "0.42778888", "0.42764282", "0.4271151", "0.42661154", "0.42572013", "0.42496872", "0.42269397", "0.4219741", "0.42197257", "0.42156234", "0.42136362", "0.42129558", "0.42116863", "0.42051104", "0.41790175", "0.41748562", "0.41743648", "0.4172645", "0.41674513", "0.4165251", "0.41621032", "0.41526777", "0.41430348", "0.41409254", "0.41283038", "0.4125435", "0.41176185", "0.41163322", "0.41154212", "0.41056308", "0.41044453", "0.4103479", "0.41034412", "0.410334", "0.40952438", "0.4093048", "0.40923887", "0.4085325", "0.40825406", "0.40799588", "0.40708297", "0.406901" ]
0.56862926
1